Merge ock repo's next branch into tk5 repo's master branch
This commit is contained in:
commit
724ee3c454
|
@ -37,9 +37,20 @@ Example usage of perf::
|
|||
hisi_pcie0_core0/rx_mwr_cnt/ [kernel PMU event]
|
||||
------------------------------------------
|
||||
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mwr_latency/
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mwr_cnt/
|
||||
$# perf stat -g -e hisi_pcie0_core0/rx_mwr_latency/ -e hisi_pcie0_core0/rx_mwr_cnt/
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mwr_latency,port=0xffff/
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mwr_cnt,port=0xffff/
|
||||
|
||||
The related events usually used to calculate the bandwidth, latency or others.
|
||||
They need to start and end counting at the same time, therefore related events
|
||||
are best used in the same event group to get the expected value. There are two
|
||||
ways to know if they are related events:
|
||||
a) By event name, such as the latency events "xxx_latency, xxx_cnt" or
|
||||
bandwidth events "xxx_flux, xxx_time".
|
||||
b) By event type, such as "event=0xXXXX, event=0x1XXXX".
|
||||
|
||||
Example usage of perf group::
|
||||
|
||||
$# perf stat -e "{hisi_pcie0_core0/rx_mwr_latency,port=0xffff/,hisi_pcie0_core0/rx_mwr_cnt,port=0xffff/}"
|
||||
|
||||
The current driver does not support sampling. So "perf record" is unsupported.
|
||||
Also attach to a task is unsupported for PCIe PMU.
|
||||
|
@ -51,8 +62,12 @@ Filter options
|
|||
|
||||
PMU could only monitor the performance of traffic downstream target Root
|
||||
Ports or downstream target Endpoint. PCIe PMU driver support "port" and
|
||||
"bdf" interfaces for users, and these two interfaces aren't supported at the
|
||||
same time.
|
||||
"bdf" interfaces for users.
|
||||
Please notice that, one of these two interfaces must be set, and these two
|
||||
interfaces aren't supported at the same time. If they are both set, only
|
||||
"port" filter is valid.
|
||||
If "port" filter not being set or is set explicitly to zero (default), the
|
||||
"bdf" filter will be in effect, because "bdf=0" meaning 0000:000:00.0.
|
||||
|
||||
- port
|
||||
|
||||
|
@ -95,7 +110,7 @@ Filter options
|
|||
|
||||
Example usage of perf::
|
||||
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mrd_flux,trig_len=0x4,trig_mode=1/ sleep 5
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mrd_flux,port=0xffff,trig_len=0x4,trig_mode=1/ sleep 5
|
||||
|
||||
3. Threshold filter
|
||||
|
||||
|
@ -109,7 +124,7 @@ Filter options
|
|||
|
||||
Example usage of perf::
|
||||
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mrd_flux,thr_len=0x4,thr_mode=1/ sleep 5
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mrd_flux,port=0xffff,thr_len=0x4,thr_mode=1/ sleep 5
|
||||
|
||||
4. TLP Length filter
|
||||
|
||||
|
@ -127,4 +142,4 @@ Filter options
|
|||
|
||||
Example usage of perf::
|
||||
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mrd_flux,len_mode=0x1/ sleep 5
|
||||
$# perf stat -e hisi_pcie0_core0/rx_mrd_flux,port=0xffff,len_mode=0x1/ sleep 5
|
||||
|
|
|
@ -200,6 +200,13 @@ this documentation.
|
|||
when the in-band link state changes - otherwise the link will never
|
||||
come up.
|
||||
|
||||
The :c:func:`mac_get_caps` method is optional, and if provided should
|
||||
return the phylink MAC capabilities that are supported for the passed
|
||||
``interface`` mode. In general, there is no need to implement this method.
|
||||
Phylink will use these capabilities in combination with permissible
|
||||
capabilities for ``interface`` to determine the allowable ethtool link
|
||||
modes.
|
||||
|
||||
The :c:func:`validate` method should mask the supplied supported mask,
|
||||
and ``state->advertising`` with the supported ethtool link modes.
|
||||
These are the new ethtool link modes, so bitmask operations must be
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
/* Synopsys Core versions */
|
||||
#define DWMAC_CORE_3_40 0x34
|
||||
#define DWMAC_CORE_3_50 0x35
|
||||
#define DWMAC_CORE_3_70 0x37
|
||||
#define DWMAC_CORE_4_00 0x40
|
||||
#define DWMAC_CORE_4_10 0x41
|
||||
#define DWMAC_CORE_5_00 0x50
|
||||
|
|
|
@ -8,15 +8,88 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include "stmmac.h"
|
||||
#include "dwmac_dma.h"
|
||||
#include "dwmac1000.h"
|
||||
|
||||
static int loongson_default_data(struct plat_stmmacenet_data *plat)
|
||||
/* Normal Loongson Tx Summary */
|
||||
#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
|
||||
/* Normal Loongson Rx Summary */
|
||||
#define DMA_INTR_ENA_NIE_RX_LOONGSON 0x00020000
|
||||
|
||||
#define DMA_INTR_NORMAL_LOONGSON (DMA_INTR_ENA_NIE_TX_LOONGSON | \
|
||||
DMA_INTR_ENA_NIE_RX_LOONGSON | \
|
||||
DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE)
|
||||
|
||||
/* Abnormal Loongson Tx Summary */
|
||||
#define DMA_INTR_ENA_AIE_TX_LOONGSON 0x00010000
|
||||
/* Abnormal Loongson Rx Summary */
|
||||
#define DMA_INTR_ENA_AIE_RX_LOONGSON 0x00008000
|
||||
|
||||
#define DMA_INTR_ABNORMAL_LOONGSON (DMA_INTR_ENA_AIE_TX_LOONGSON | \
|
||||
DMA_INTR_ENA_AIE_RX_LOONGSON | \
|
||||
DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE)
|
||||
|
||||
#define DMA_INTR_DEFAULT_MASK_LOONGSON (DMA_INTR_NORMAL_LOONGSON | \
|
||||
DMA_INTR_ABNORMAL_LOONGSON)
|
||||
|
||||
/* Normal Loongson Tx Interrupt Summary */
|
||||
#define DMA_STATUS_NIS_TX_LOONGSON 0x00040000
|
||||
/* Normal Loongson Rx Interrupt Summary */
|
||||
#define DMA_STATUS_NIS_RX_LOONGSON 0x00020000
|
||||
|
||||
/* Abnormal Loongson Tx Interrupt Summary */
|
||||
#define DMA_STATUS_AIS_TX_LOONGSON 0x00010000
|
||||
/* Abnormal Loongson Rx Interrupt Summary */
|
||||
#define DMA_STATUS_AIS_RX_LOONGSON 0x00008000
|
||||
|
||||
/* Fatal Loongson Tx Bus Error Interrupt */
|
||||
#define DMA_STATUS_FBI_TX_LOONGSON 0x00002000
|
||||
/* Fatal Loongson Rx Bus Error Interrupt */
|
||||
#define DMA_STATUS_FBI_RX_LOONGSON 0x00001000
|
||||
|
||||
#define DMA_STATUS_MSK_COMMON_LOONGSON (DMA_STATUS_NIS_TX_LOONGSON | \
|
||||
DMA_STATUS_NIS_RX_LOONGSON | \
|
||||
DMA_STATUS_AIS_TX_LOONGSON | \
|
||||
DMA_STATUS_AIS_RX_LOONGSON | \
|
||||
DMA_STATUS_FBI_TX_LOONGSON | \
|
||||
DMA_STATUS_FBI_RX_LOONGSON)
|
||||
|
||||
#define DMA_STATUS_MSK_RX_LOONGSON (DMA_STATUS_ERI | DMA_STATUS_RWT | \
|
||||
DMA_STATUS_RPS | DMA_STATUS_RU | \
|
||||
DMA_STATUS_RI | DMA_STATUS_OVF | \
|
||||
DMA_STATUS_MSK_COMMON_LOONGSON)
|
||||
|
||||
#define DMA_STATUS_MSK_TX_LOONGSON (DMA_STATUS_ETI | DMA_STATUS_UNF | \
|
||||
DMA_STATUS_TJT | DMA_STATUS_TU | \
|
||||
DMA_STATUS_TPS | DMA_STATUS_TI | \
|
||||
DMA_STATUS_MSK_COMMON_LOONGSON)
|
||||
|
||||
#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03
|
||||
#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
|
||||
#define LOONGSON_DWMAC_CORE_1_00 0x10 /* Loongson custom IP */
|
||||
#define CHANNEL_NUM 8
|
||||
|
||||
struct loongson_data {
|
||||
u32 gmac_verion;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
struct stmmac_pci_info {
|
||||
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
|
||||
};
|
||||
|
||||
static void loongson_default_data(struct pci_dev *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
/* Get bus_id, this can be overloaded later */
|
||||
plat->bus_id = (pci_domain_nr(pdev->bus) << 16) |
|
||||
PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
|
||||
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
|
||||
plat->has_gmac = 1;
|
||||
plat->force_sf_dma_mode = 1;
|
||||
|
||||
/* Set default value for multicast hash bins */
|
||||
plat->multicast_filter_bins = HASH_TABLE_SIZE;
|
||||
plat->mac_interface = PHY_INTERFACE_MODE_GMII;
|
||||
|
||||
/* Set default value for unicast filter entries */
|
||||
plat->unicast_filter_entries = 1;
|
||||
|
@ -24,10 +97,6 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
|
|||
/* Set the maxmtu to a default of JUMBO_LEN */
|
||||
plat->maxmtu = JUMBO_LEN;
|
||||
|
||||
/* Set default number of RX and TX queues to use */
|
||||
plat->tx_queues_to_use = 1;
|
||||
plat->rx_queues_to_use = 1;
|
||||
|
||||
/* Disable Priority config by default */
|
||||
plat->tx_queues_cfg[0].use_prio = false;
|
||||
plat->rx_queues_cfg[0].use_prio = false;
|
||||
|
@ -35,6 +104,9 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
|
|||
/* Disable RX queues routing by default */
|
||||
plat->rx_queues_cfg[0].pkt_route = 0x0;
|
||||
|
||||
plat->clk_ref_rate = 125000000;
|
||||
plat->clk_ptp_rate = 125000000;
|
||||
|
||||
/* Default to phy auto-detection */
|
||||
plat->phy_addr = -1;
|
||||
|
||||
|
@ -42,23 +114,346 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
|
|||
plat->dma_cfg->pblx8 = true;
|
||||
|
||||
plat->multicast_filter_bins = 256;
|
||||
}
|
||||
|
||||
static int loongson_gmac_data(struct pci_dev *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
loongson_default_data(pdev, plat);
|
||||
|
||||
plat->mdio_bus_data->phy_mask = 0;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct stmmac_pci_info loongson_gmac_pci_info = {
|
||||
.setup = loongson_gmac_data,
|
||||
};
|
||||
|
||||
static void loongson_gnet_dma_init_channel(struct stmmac_priv *priv,
|
||||
void __iomem *ioaddr,
|
||||
struct stmmac_dma_cfg *dma_cfg,
|
||||
u32 chan)
|
||||
{
|
||||
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
|
||||
int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
|
||||
u32 value;
|
||||
|
||||
value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
|
||||
|
||||
if (dma_cfg->pblx8)
|
||||
value |= DMA_BUS_MODE_MAXPBL;
|
||||
|
||||
value |= DMA_BUS_MODE_USP;
|
||||
value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
|
||||
value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
|
||||
value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
|
||||
|
||||
/* Set the Fixed burst mode */
|
||||
if (dma_cfg->fixed_burst)
|
||||
value |= DMA_BUS_MODE_FB;
|
||||
|
||||
/* Mixed Burst has no effect when fb is set */
|
||||
if (dma_cfg->mixed_burst)
|
||||
value |= DMA_BUS_MODE_MB;
|
||||
|
||||
if (dma_cfg->atds)
|
||||
value |= DMA_BUS_MODE_ATDS;
|
||||
|
||||
if (dma_cfg->aal)
|
||||
value |= DMA_BUS_MODE_AAL;
|
||||
|
||||
writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
|
||||
|
||||
/* Mask interrupts by writing to CSR7 */
|
||||
writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr +
|
||||
DMA_CHAN_INTR_ENA(chan));
|
||||
}
|
||||
|
||||
static int loongson_gnet_dma_interrupt(struct stmmac_priv *priv,
|
||||
void __iomem *ioaddr,
|
||||
struct stmmac_extra_stats *x,
|
||||
u32 chan, u32 dir)
|
||||
{
|
||||
struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
|
||||
u32 abnor_intr_status;
|
||||
u32 nor_intr_status;
|
||||
u32 fb_intr_status;
|
||||
u32 intr_status;
|
||||
int ret = 0;
|
||||
|
||||
/* read the status register (CSR5) */
|
||||
intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
|
||||
|
||||
if (dir == DMA_DIR_RX)
|
||||
intr_status &= DMA_STATUS_MSK_RX_LOONGSON;
|
||||
else if (dir == DMA_DIR_TX)
|
||||
intr_status &= DMA_STATUS_MSK_TX_LOONGSON;
|
||||
|
||||
nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON |
|
||||
DMA_STATUS_NIS_RX_LOONGSON);
|
||||
abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON |
|
||||
DMA_STATUS_AIS_RX_LOONGSON);
|
||||
fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON |
|
||||
DMA_STATUS_FBI_RX_LOONGSON);
|
||||
|
||||
/* ABNORMAL interrupts */
|
||||
if (unlikely(abnor_intr_status)) {
|
||||
if (unlikely(intr_status & DMA_STATUS_UNF)) {
|
||||
ret = tx_hard_error_bump_tc;
|
||||
x->tx_undeflow_irq++;
|
||||
}
|
||||
if (unlikely(intr_status & DMA_STATUS_TJT))
|
||||
x->tx_jabber_irq++;
|
||||
if (unlikely(intr_status & DMA_STATUS_OVF))
|
||||
x->rx_overflow_irq++;
|
||||
if (unlikely(intr_status & DMA_STATUS_RU))
|
||||
x->rx_buf_unav_irq++;
|
||||
if (unlikely(intr_status & DMA_STATUS_RPS))
|
||||
x->rx_process_stopped_irq++;
|
||||
if (unlikely(intr_status & DMA_STATUS_RWT))
|
||||
x->rx_watchdog_irq++;
|
||||
if (unlikely(intr_status & DMA_STATUS_ETI))
|
||||
x->tx_early_irq++;
|
||||
if (unlikely(intr_status & DMA_STATUS_TPS)) {
|
||||
x->tx_process_stopped_irq++;
|
||||
ret = tx_hard_error;
|
||||
}
|
||||
if (unlikely(fb_intr_status)) {
|
||||
x->fatal_bus_error_irq++;
|
||||
ret = tx_hard_error;
|
||||
}
|
||||
}
|
||||
/* TX/RX NORMAL interrupts */
|
||||
if (likely(nor_intr_status)) {
|
||||
if (likely(intr_status & DMA_STATUS_RI)) {
|
||||
u32 value = readl(ioaddr + DMA_INTR_ENA);
|
||||
/* to schedule NAPI on real RIE event. */
|
||||
if (likely(value & DMA_INTR_ENA_RIE)) {
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
u64_stats_inc(&stats->rx_normal_irq_n[chan]);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
ret |= handle_rx;
|
||||
}
|
||||
}
|
||||
if (likely(intr_status & DMA_STATUS_TI)) {
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
u64_stats_inc(&stats->tx_normal_irq_n[chan]);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
ret |= handle_tx;
|
||||
}
|
||||
if (unlikely(intr_status & DMA_STATUS_ERI))
|
||||
x->rx_early_irq++;
|
||||
}
|
||||
/* Optional hardware blocks, interrupts should be disabled */
|
||||
if (unlikely(intr_status &
|
||||
(DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
|
||||
pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
|
||||
|
||||
/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
|
||||
writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
|
||||
unsigned int mode)
|
||||
{
|
||||
struct loongson_data *ld = (struct loongson_data *)priv;
|
||||
struct net_device *ndev = dev_get_drvdata(ld->dev);
|
||||
struct stmmac_priv *ptr = netdev_priv(ndev);
|
||||
|
||||
/* The controller and PHY don't work well together.
|
||||
* We need to use the PS bit to check if the controller's status
|
||||
* is correct and reset PHY if necessary.
|
||||
* MAC_CTRL_REG.15 is defined by the GMAC_CONTROL_PS macro.
|
||||
*/
|
||||
if (speed == SPEED_1000) {
|
||||
if (readl(ptr->ioaddr + MAC_CTRL_REG) &
|
||||
GMAC_CONTROL_PS)
|
||||
/* Word around hardware bug, restart autoneg */
|
||||
phy_restart_aneg(ndev->phydev);
|
||||
}
|
||||
}
|
||||
|
||||
static int loongson_gnet_data(struct pci_dev *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
loongson_default_data(pdev, plat);
|
||||
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
|
||||
plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
|
||||
plat->fix_mac_speed = loongson_gnet_fix_speed;
|
||||
|
||||
/* GNET devices with dev revision 0x00 do not support manually
|
||||
* setting the speed to 1000.
|
||||
*/
|
||||
if (pdev->revision == 0x00)
|
||||
plat->flags |= STMMAC_FLAG_DISABLE_FORCE_1000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct stmmac_pci_info loongson_gnet_pci_info = {
|
||||
.setup = loongson_gnet_data,
|
||||
};
|
||||
|
||||
static int loongson_dwmac_config_legacy(struct pci_dev *pdev,
|
||||
struct plat_stmmacenet_data *plat,
|
||||
struct stmmac_resources *res,
|
||||
struct device_node *np)
|
||||
{
|
||||
if (np) {
|
||||
res->irq = of_irq_get_byname(np, "macirq");
|
||||
if (res->irq < 0) {
|
||||
dev_err(&pdev->dev, "IRQ macirq not found\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
res->wol_irq = of_irq_get_byname(np, "eth_wake_irq");
|
||||
if (res->wol_irq < 0) {
|
||||
dev_info(&pdev->dev,
|
||||
"IRQ eth_wake_irq not found, using macirq\n");
|
||||
res->wol_irq = res->irq;
|
||||
}
|
||||
|
||||
res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
|
||||
if (res->lpi_irq < 0) {
|
||||
dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
res->irq = pdev->irq;
|
||||
res->wol_irq = res->irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int loongson_dwmac_config_msi(struct pci_dev *pdev,
|
||||
struct plat_stmmacenet_data *plat,
|
||||
struct stmmac_resources *res,
|
||||
struct device_node *np)
|
||||
{
|
||||
int i, ret, vecs;
|
||||
|
||||
vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
|
||||
ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
|
||||
if (ret < 0) {
|
||||
dev_info(&pdev->dev,
|
||||
"MSI enable failed, Fallback to legacy interrupt\n");
|
||||
return loongson_dwmac_config_legacy(pdev, plat, res, np);
|
||||
}
|
||||
|
||||
res->irq = pci_irq_vector(pdev, 0);
|
||||
res->wol_irq = 0;
|
||||
|
||||
/* INT NAME | MAC | CH7 rx | CH7 tx | ... | CH0 rx | CH0 tx |
|
||||
* --------- ----- -------- -------- ... -------- --------
|
||||
* IRQ NUM | 0 | 1 | 2 | ... | 15 | 16 |
|
||||
*/
|
||||
for (i = 0; i < CHANNEL_NUM; i++) {
|
||||
res->rx_irq[CHANNEL_NUM - 1 - i] =
|
||||
pci_irq_vector(pdev, 1 + i * 2);
|
||||
res->tx_irq[CHANNEL_NUM - 1 - i] =
|
||||
pci_irq_vector(pdev, 2 + i * 2);
|
||||
}
|
||||
|
||||
plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mac_device_info *loongson_dwmac_setup(void *apriv)
|
||||
{
|
||||
struct stmmac_priv *priv = apriv;
|
||||
struct mac_device_info *mac;
|
||||
struct stmmac_dma_ops *dma;
|
||||
struct loongson_data *ld;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
ld = priv->plat->bsp_priv;
|
||||
pdev = to_pci_dev(priv->device);
|
||||
|
||||
mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
|
||||
if (!mac)
|
||||
return NULL;
|
||||
|
||||
dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL);
|
||||
if (!dma)
|
||||
return NULL;
|
||||
|
||||
/* The original IP-core version is 0x37 in all Loongson GNET
|
||||
* (ls2k2000 and ls7a2000), but the GNET HW designers have changed the
|
||||
* GMAC_VERSION.SNPSVER field to the custom 0x10 value on the Loongson
|
||||
* ls2k2000 MAC to emphasize the differences: multiple DMA-channels,
|
||||
* AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
|
||||
* original value so the correct HW-interface would be selected.
|
||||
*/
|
||||
if (ld->gmac_verion == LOONGSON_DWMAC_CORE_1_00) {
|
||||
priv->synopsys_id = DWMAC_CORE_3_70;
|
||||
*dma = dwmac1000_dma_ops;
|
||||
dma->init_chan = loongson_gnet_dma_init_channel;
|
||||
dma->dma_interrupt = loongson_gnet_dma_interrupt;
|
||||
mac->dma = dma;
|
||||
}
|
||||
|
||||
priv->dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
/* Pre-initialize the respective "mac" fields as it's done in
|
||||
* dwmac1000_setup()
|
||||
*/
|
||||
mac->pcsr = priv->ioaddr;
|
||||
mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
|
||||
mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
|
||||
mac->mcast_bits_log2 = 0;
|
||||
|
||||
if (mac->multicast_filter_bins)
|
||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||
|
||||
/* The GMAC devices with PCI ID 0x7a03 does not support any pause mode.
|
||||
* The GNET devices without CORE ID 0x10 does not support half-duplex.
|
||||
*/
|
||||
if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
|
||||
mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
|
||||
} else {
|
||||
if (ld->gmac_verion == LOONGSON_DWMAC_CORE_1_00)
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10 | MAC_100 | MAC_1000;
|
||||
else
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10FD | MAC_100FD | MAC_1000FD;
|
||||
}
|
||||
|
||||
mac->link.duplex = GMAC_CONTROL_DM;
|
||||
mac->link.speed10 = GMAC_CONTROL_PS;
|
||||
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
|
||||
mac->link.speed1000 = 0;
|
||||
mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
|
||||
mac->mii.addr = GMAC_MII_ADDR;
|
||||
mac->mii.data = GMAC_MII_DATA;
|
||||
mac->mii.addr_shift = 11;
|
||||
mac->mii.addr_mask = 0x0000F800;
|
||||
mac->mii.reg_shift = 6;
|
||||
mac->mii.reg_mask = 0x000007C0;
|
||||
mac->mii.clk_csr_shift = 2;
|
||||
mac->mii.clk_csr_mask = GENMASK(5, 2);
|
||||
|
||||
return mac;
|
||||
}
|
||||
|
||||
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct plat_stmmacenet_data *plat;
|
||||
int ret, i, bus_id, phy_mode;
|
||||
struct stmmac_pci_info *info;
|
||||
struct stmmac_resources res;
|
||||
struct loongson_data *ld;
|
||||
struct device_node *np;
|
||||
int ret, i, phy_mode;
|
||||
|
||||
np = dev_of_node(&pdev->dev);
|
||||
|
||||
if (!np) {
|
||||
pr_info("dwmac_loongson_pci: No OF node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
|
||||
if (!plat)
|
||||
return -ENOMEM;
|
||||
|
@ -69,17 +464,13 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
|
|||
if (!plat->mdio_bus_data)
|
||||
return -ENOMEM;
|
||||
|
||||
plat->mdio_node = of_get_child_by_name(np, "mdio");
|
||||
if (plat->mdio_node) {
|
||||
dev_info(&pdev->dev, "Found MDIO subnode\n");
|
||||
plat->mdio_bus_data->needs_reset = true;
|
||||
}
|
||||
|
||||
plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
|
||||
if (!plat->dma_cfg) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_node;
|
||||
}
|
||||
if (!plat->dma_cfg)
|
||||
return -ENOMEM;
|
||||
|
||||
ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL);
|
||||
if (!ld)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Enable pci device */
|
||||
ret = pci_enable_device(pdev);
|
||||
|
@ -98,55 +489,68 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
|
|||
break;
|
||||
}
|
||||
|
||||
plat->bus_id = of_alias_get_id(np, "ethernet");
|
||||
if (plat->bus_id < 0)
|
||||
plat->bus_id = pci_dev_id(pdev);
|
||||
|
||||
phy_mode = device_get_phy_mode(&pdev->dev);
|
||||
if (phy_mode < 0) {
|
||||
dev_err(&pdev->dev, "phy_mode not found\n");
|
||||
ret = phy_mode;
|
||||
goto err_disable_device;
|
||||
}
|
||||
|
||||
plat->phy_interface = phy_mode;
|
||||
plat->mac_interface = PHY_INTERFACE_MODE_GMII;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
loongson_default_data(plat);
|
||||
pci_enable_msi(pdev);
|
||||
info = (struct stmmac_pci_info *)id->driver_data;
|
||||
ret = info->setup(pdev, plat);
|
||||
if (ret)
|
||||
goto err_disable_device;
|
||||
|
||||
if (np) {
|
||||
plat->mdio_node = of_get_child_by_name(np, "mdio");
|
||||
if (plat->mdio_node) {
|
||||
dev_info(&pdev->dev, "Found MDIO subnode\n");
|
||||
plat->mdio_bus_data->needs_reset = true;
|
||||
}
|
||||
|
||||
bus_id = of_alias_get_id(np, "ethernet");
|
||||
if (bus_id >= 0)
|
||||
plat->bus_id = bus_id;
|
||||
|
||||
phy_mode = device_get_phy_mode(&pdev->dev);
|
||||
if (phy_mode < 0) {
|
||||
dev_err(&pdev->dev, "phy_mode not found\n");
|
||||
ret = phy_mode;
|
||||
goto err_disable_device;
|
||||
}
|
||||
plat->phy_interface = phy_mode;
|
||||
}
|
||||
|
||||
plat->bsp_priv = ld;
|
||||
plat->setup = loongson_dwmac_setup;
|
||||
ld->dev = &pdev->dev;
|
||||
|
||||
memset(&res, 0, sizeof(res));
|
||||
res.addr = pcim_iomap_table(pdev)[0];
|
||||
ld->gmac_verion = readl(res.addr + GMAC_VERSION) & 0xff;
|
||||
|
||||
res.irq = of_irq_get_byname(np, "macirq");
|
||||
if (res.irq < 0) {
|
||||
dev_err(&pdev->dev, "IRQ macirq not found\n");
|
||||
ret = -ENODEV;
|
||||
goto err_disable_msi;
|
||||
}
|
||||
switch (ld->gmac_verion) {
|
||||
case LOONGSON_DWMAC_CORE_1_00:
|
||||
plat->rx_queues_to_use = CHANNEL_NUM;
|
||||
plat->tx_queues_to_use = CHANNEL_NUM;
|
||||
|
||||
res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
|
||||
if (res.wol_irq < 0) {
|
||||
dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n");
|
||||
res.wol_irq = res.irq;
|
||||
}
|
||||
/* Only channel 0 supports checksum,
|
||||
* so turn off checksum to enable multiple channels.
|
||||
*/
|
||||
for (i = 1; i < CHANNEL_NUM; i++)
|
||||
plat->tx_queues_cfg[i].coe_unsupported = 1;
|
||||
|
||||
res.lpi_irq = of_irq_get_byname(np, "eth_lpi");
|
||||
if (res.lpi_irq < 0) {
|
||||
dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
|
||||
ret = -ENODEV;
|
||||
goto err_disable_msi;
|
||||
ret = loongson_dwmac_config_msi(pdev, plat, &res, np);
|
||||
break;
|
||||
default: /* 0x35 device and 0x37 device. */
|
||||
plat->tx_queues_to_use = 1;
|
||||
plat->rx_queues_to_use = 1;
|
||||
|
||||
ret = loongson_dwmac_config_legacy(pdev, plat, &res, np);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
|
||||
if (ret)
|
||||
goto err_disable_msi;
|
||||
goto err_disable_device;
|
||||
|
||||
return ret;
|
||||
|
||||
err_disable_msi:
|
||||
pci_disable_msi(pdev);
|
||||
err_disable_device:
|
||||
pci_disable_device(pdev);
|
||||
err_put_node:
|
||||
|
@ -213,7 +617,8 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
|
|||
loongson_dwmac_resume);
|
||||
|
||||
static const struct pci_device_id loongson_dwmac_id_table[] = {
|
||||
{ PCI_VDEVICE(LOONGSON, 0x7a03) },
|
||||
{ PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
|
||||
{ PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
|
||||
|
@ -232,4 +637,5 @@ module_pci_driver(loongson_dwmac_driver);
|
|||
|
||||
MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
|
||||
MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
|
||||
MODULE_AUTHOR("Yanteng Si <siyanteng@loongson.cn>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -299,7 +299,7 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
|
|||
* Called from stmmac via stmmac_dma_ops->init
|
||||
*/
|
||||
static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
|
||||
struct stmmac_dma_cfg *dma_cfg, int atds)
|
||||
struct stmmac_dma_cfg *dma_cfg)
|
||||
{
|
||||
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
|
||||
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
|
||||
|
@ -395,7 +395,7 @@ static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv,
|
|||
writel(v, ioaddr + EMAC_TX_CTL1);
|
||||
}
|
||||
|
||||
static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
|
||||
static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
|
||||
{
|
||||
u32 v;
|
||||
|
||||
|
|
|
@ -70,15 +70,17 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
|
|||
writel(value, ioaddr + DMA_AXI_BUS_MODE);
|
||||
}
|
||||
|
||||
static void dwmac1000_dma_init(void __iomem *ioaddr,
|
||||
struct stmmac_dma_cfg *dma_cfg, int atds)
|
||||
static void dwmac1000_dma_init_channel(struct stmmac_priv *priv,
|
||||
void __iomem *ioaddr,
|
||||
struct stmmac_dma_cfg *dma_cfg, u32 chan)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_BUS_MODE);
|
||||
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
|
||||
int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
|
||||
u32 value;
|
||||
|
||||
/*
|
||||
* Set the DMA PBL (Programmable Burst Length) mode.
|
||||
value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
|
||||
|
||||
/* Set the DMA PBL (Programmable Burst Length) mode.
|
||||
*
|
||||
* Note: before stmmac core 3.50 this mode bit was 4xPBL, and
|
||||
* post 3.5 mode bit acts as 8*PBL.
|
||||
|
@ -98,16 +100,16 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
|
|||
if (dma_cfg->mixed_burst)
|
||||
value |= DMA_BUS_MODE_MB;
|
||||
|
||||
if (atds)
|
||||
if (dma_cfg->atds)
|
||||
value |= DMA_BUS_MODE_ATDS;
|
||||
|
||||
if (dma_cfg->aal)
|
||||
value |= DMA_BUS_MODE_AAL;
|
||||
|
||||
writel(value, ioaddr + DMA_BUS_MODE);
|
||||
writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
|
||||
|
||||
/* Mask interrupts by writing to CSR7 */
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
}
|
||||
|
||||
static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
|
||||
|
@ -116,7 +118,7 @@ static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
|
|||
dma_addr_t dma_rx_phy, u32 chan)
|
||||
{
|
||||
/* RX descriptor base address list must be written into DMA CSR3 */
|
||||
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
|
||||
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan));
|
||||
}
|
||||
|
||||
static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
|
||||
|
@ -125,7 +127,7 @@ static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
|
|||
dma_addr_t dma_tx_phy, u32 chan)
|
||||
{
|
||||
/* TX descriptor base address list must be written into DMA CSR4 */
|
||||
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
|
||||
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
|
||||
}
|
||||
|
||||
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
|
||||
|
@ -153,7 +155,7 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
|
|||
void __iomem *ioaddr, int mode,
|
||||
u32 channel, int fifosz, u8 qmode)
|
||||
{
|
||||
u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||||
u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
|
||||
|
||||
if (mode == SF_DMA_MODE) {
|
||||
pr_debug("GMAC: enable RX store and forward mode\n");
|
||||
|
@ -175,14 +177,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
|
|||
/* Configure flow control based on rx fifo size */
|
||||
csr6 = dwmac1000_configure_fc(csr6, fifosz);
|
||||
|
||||
writel(csr6, ioaddr + DMA_CONTROL);
|
||||
writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
|
||||
}
|
||||
|
||||
static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
|
||||
void __iomem *ioaddr, int mode,
|
||||
u32 channel, int fifosz, u8 qmode)
|
||||
{
|
||||
u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||||
u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
|
||||
|
||||
if (mode == SF_DMA_MODE) {
|
||||
pr_debug("GMAC: enable TX store and forward mode\n");
|
||||
|
@ -209,7 +211,7 @@ static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
|
|||
csr6 |= DMA_CONTROL_TTC_256;
|
||||
}
|
||||
|
||||
writel(csr6, ioaddr + DMA_CONTROL);
|
||||
writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
|
||||
}
|
||||
|
||||
static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv,
|
||||
|
@ -271,12 +273,12 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
|
|||
static void dwmac1000_rx_watchdog(struct stmmac_priv *priv,
|
||||
void __iomem *ioaddr, u32 riwt, u32 queue)
|
||||
{
|
||||
writel(riwt, ioaddr + DMA_RX_WATCHDOG);
|
||||
writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
|
||||
}
|
||||
|
||||
const struct stmmac_dma_ops dwmac1000_dma_ops = {
|
||||
.reset = dwmac_dma_reset,
|
||||
.init = dwmac1000_dma_init,
|
||||
.init_chan = dwmac1000_dma_init_channel,
|
||||
.init_rx_chan = dwmac1000_dma_init_rx,
|
||||
.init_tx_chan = dwmac1000_dma_init_tx,
|
||||
.axi = dwmac1000_dma_axi,
|
||||
|
@ -294,3 +296,4 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
|
|||
.get_hw_feature = dwmac1000_get_hw_feature,
|
||||
.rx_watchdog = dwmac1000_rx_watchdog,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(dwmac1000_dma_ops);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "dwmac_dma.h"
|
||||
|
||||
static void dwmac100_dma_init(void __iomem *ioaddr,
|
||||
struct stmmac_dma_cfg *dma_cfg, int atds)
|
||||
struct stmmac_dma_cfg *dma_cfg)
|
||||
{
|
||||
/* Enable Application Access by writing to DMA CSR0 */
|
||||
writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
|
||||
|
|
|
@ -68,7 +68,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
|
|||
init_waitqueue_head(&priv->tstamp_busy_wait);
|
||||
}
|
||||
|
||||
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
|
||||
static void dwmac4_update_caps(struct stmmac_priv *priv)
|
||||
{
|
||||
if (priv->plat->tx_queues_to_use > 1)
|
||||
priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||
|
@ -1161,7 +1161,7 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
|
|||
|
||||
const struct stmmac_ops dwmac4_ops = {
|
||||
.core_init = dwmac4_core_init,
|
||||
.phylink_get_caps = dwmac4_phylink_get_caps,
|
||||
.update_caps = dwmac4_update_caps,
|
||||
.set_mac = stmmac_set_mac,
|
||||
.rx_ipc = dwmac4_rx_ipc_enable,
|
||||
.rx_queue_enable = dwmac4_rx_queue_enable,
|
||||
|
@ -1204,7 +1204,7 @@ const struct stmmac_ops dwmac4_ops = {
|
|||
|
||||
const struct stmmac_ops dwmac410_ops = {
|
||||
.core_init = dwmac4_core_init,
|
||||
.phylink_get_caps = dwmac4_phylink_get_caps,
|
||||
.update_caps = dwmac4_update_caps,
|
||||
.set_mac = stmmac_dwmac4_set_mac,
|
||||
.rx_ipc = dwmac4_rx_ipc_enable,
|
||||
.rx_queue_enable = dwmac4_rx_queue_enable,
|
||||
|
@ -1253,7 +1253,7 @@ const struct stmmac_ops dwmac410_ops = {
|
|||
|
||||
const struct stmmac_ops dwmac510_ops = {
|
||||
.core_init = dwmac4_core_init,
|
||||
.phylink_get_caps = dwmac4_phylink_get_caps,
|
||||
.update_caps = dwmac4_update_caps,
|
||||
.set_mac = stmmac_dwmac4_set_mac,
|
||||
.rx_ipc = dwmac4_rx_ipc_enable,
|
||||
.rx_queue_enable = dwmac4_rx_queue_enable,
|
||||
|
|
|
@ -153,7 +153,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
|
|||
}
|
||||
|
||||
static void dwmac4_dma_init(void __iomem *ioaddr,
|
||||
struct stmmac_dma_cfg *dma_cfg, int atds)
|
||||
struct stmmac_dma_cfg *dma_cfg)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
|
||||
|
||||
|
|
|
@ -22,6 +22,23 @@
|
|||
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
|
||||
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
|
||||
|
||||
/* Following DMA defines are channels oriented */
|
||||
#define DMA_CHAN_BASE_OFFSET 0x100
|
||||
|
||||
static inline u32 dma_chan_base_addr(u32 base, u32 chan)
|
||||
{
|
||||
return base + chan * DMA_CHAN_BASE_OFFSET;
|
||||
}
|
||||
|
||||
#define DMA_CHAN_XMT_POLL_DEMAND(chan) dma_chan_base_addr(DMA_XMT_POLL_DEMAND, chan)
|
||||
#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan)
|
||||
#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan)
|
||||
#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan)
|
||||
#define DMA_CHAN_BUS_MODE(chan) dma_chan_base_addr(DMA_BUS_MODE, chan)
|
||||
#define DMA_CHAN_RCV_BASE_ADDR(chan) dma_chan_base_addr(DMA_RCV_BASE_ADDR, chan)
|
||||
#define DMA_CHAN_TX_BASE_ADDR(chan) dma_chan_base_addr(DMA_TX_BASE_ADDR, chan)
|
||||
#define DMA_CHAN_RX_WATCHDOG(chan) dma_chan_base_addr(DMA_RX_WATCHDOG, chan)
|
||||
|
||||
/* SW Reset */
|
||||
#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
|
||||
|
||||
|
@ -152,7 +169,7 @@
|
|||
#define NUM_DWMAC1000_DMA_REGS 23
|
||||
#define NUM_DWMAC4_DMA_REGS 27
|
||||
|
||||
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
|
||||
void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
u32 chan, bool rx, bool tx);
|
||||
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
|
@ -168,5 +185,4 @@ void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|||
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
struct stmmac_extra_stats *x, u32 chan, u32 dir);
|
||||
int dwmac_dma_reset(void __iomem *ioaddr);
|
||||
|
||||
#endif /* __DWMAC_DMA_H__ */
|
||||
|
|
|
@ -28,65 +28,65 @@ int dwmac_dma_reset(void __iomem *ioaddr)
|
|||
}
|
||||
|
||||
/* CSR1 enables the transmit DMA to check for new descriptor */
|
||||
void dwmac_enable_dma_transmission(void __iomem *ioaddr)
|
||||
void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
|
||||
{
|
||||
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
|
||||
writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan));
|
||||
}
|
||||
|
||||
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
u32 chan, bool rx, bool tx)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_INTR_ENA);
|
||||
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
|
||||
if (rx)
|
||||
value |= DMA_INTR_DEFAULT_RX;
|
||||
if (tx)
|
||||
value |= DMA_INTR_DEFAULT_TX;
|
||||
|
||||
writel(value, ioaddr + DMA_INTR_ENA);
|
||||
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
}
|
||||
|
||||
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
u32 chan, bool rx, bool tx)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_INTR_ENA);
|
||||
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
|
||||
if (rx)
|
||||
value &= ~DMA_INTR_DEFAULT_RX;
|
||||
if (tx)
|
||||
value &= ~DMA_INTR_DEFAULT_TX;
|
||||
|
||||
writel(value, ioaddr + DMA_INTR_ENA);
|
||||
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
}
|
||||
|
||||
void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
u32 chan)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_CONTROL);
|
||||
u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
value |= DMA_CONTROL_ST;
|
||||
writel(value, ioaddr + DMA_CONTROL);
|
||||
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
}
|
||||
|
||||
void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_CONTROL);
|
||||
u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
value &= ~DMA_CONTROL_ST;
|
||||
writel(value, ioaddr + DMA_CONTROL);
|
||||
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
}
|
||||
|
||||
void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
u32 chan)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_CONTROL);
|
||||
u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
value |= DMA_CONTROL_SR;
|
||||
writel(value, ioaddr + DMA_CONTROL);
|
||||
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
}
|
||||
|
||||
void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_CONTROL);
|
||||
u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
value &= ~DMA_CONTROL_SR;
|
||||
writel(value, ioaddr + DMA_CONTROL);
|
||||
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
}
|
||||
|
||||
#ifdef DWMAC_DMA_DEBUG
|
||||
|
@ -165,7 +165,7 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|||
struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
|
||||
int ret = 0;
|
||||
/* read the status register (CSR5) */
|
||||
u32 intr_status = readl(ioaddr + DMA_STATUS);
|
||||
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
|
||||
|
||||
#ifdef DWMAC_DMA_DEBUG
|
||||
/* Enable it to monitor DMA rx/tx status in case of critical problems */
|
||||
|
|
|
@ -20,7 +20,7 @@ static int dwxgmac2_dma_reset(void __iomem *ioaddr)
|
|||
}
|
||||
|
||||
static void dwxgmac2_dma_init(void __iomem *ioaddr,
|
||||
struct stmmac_dma_cfg *dma_cfg, int atds)
|
||||
struct stmmac_dma_cfg *dma_cfg)
|
||||
{
|
||||
u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
|
||||
|
||||
|
|
|
@ -167,8 +167,7 @@ struct dma_features;
|
|||
struct stmmac_dma_ops {
|
||||
/* DMA core initialization */
|
||||
int (*reset)(void __iomem *ioaddr);
|
||||
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
|
||||
int atds);
|
||||
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg);
|
||||
void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
struct stmmac_dma_cfg *dma_cfg, u32 chan);
|
||||
void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
|
@ -190,7 +189,7 @@ struct stmmac_dma_ops {
|
|||
/* To track extra statistic (if supported) */
|
||||
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
|
||||
void __iomem *ioaddr);
|
||||
void (*enable_dma_transmission) (void __iomem *ioaddr);
|
||||
void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan);
|
||||
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
u32 chan, bool rx, bool tx);
|
||||
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
||||
|
@ -300,8 +299,8 @@ struct stmmac_est;
|
|||
struct stmmac_ops {
|
||||
/* MAC core initialization */
|
||||
void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
|
||||
/* Get phylink capabilities */
|
||||
void (*phylink_get_caps)(struct stmmac_priv *priv);
|
||||
/* Update MAC capabilities */
|
||||
void (*update_caps)(struct stmmac_priv *priv);
|
||||
/* Enable the MAC RX/TX */
|
||||
void (*set_mac)(void __iomem *ioaddr, bool enable);
|
||||
/* Enable and verify that the IPC module is supported */
|
||||
|
@ -423,8 +422,8 @@ struct stmmac_ops {
|
|||
|
||||
#define stmmac_core_init(__priv, __args...) \
|
||||
stmmac_do_void_callback(__priv, mac, core_init, __args)
|
||||
#define stmmac_mac_phylink_get_caps(__priv) \
|
||||
stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv)
|
||||
#define stmmac_mac_update_caps(__priv) \
|
||||
stmmac_do_void_callback(__priv, mac, update_caps, __priv)
|
||||
#define stmmac_mac_set(__priv, __args...) \
|
||||
stmmac_do_void_callback(__priv, mac, set_mac, __args)
|
||||
#define stmmac_rx_ipc(__priv, __args...) \
|
||||
|
|
|
@ -412,6 +412,12 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (priv->plat->flags & STMMAC_FLAG_DISABLE_FORCE_1000) {
|
||||
if (cmd->base.speed == SPEED_1000 &&
|
||||
cmd->base.autoneg != AUTONEG_ENABLE)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return phylink_ethtool_ksettings_set(priv->phylink, cmd);
|
||||
}
|
||||
|
||||
|
|
|
@ -936,6 +936,22 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
|
|||
priv->pause, tx_cnt);
|
||||
}
|
||||
|
||||
static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
|
||||
phy_interface_t interface)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
|
||||
|
||||
/* Refresh the MAC-specific capabilities */
|
||||
stmmac_mac_update_caps(priv);
|
||||
|
||||
config->mac_capabilities = priv->hw->link.caps;
|
||||
|
||||
if (priv->plat->max_speed)
|
||||
phylink_limit_mac_speed(config, priv->plat->max_speed);
|
||||
|
||||
return config->mac_capabilities;
|
||||
}
|
||||
|
||||
static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
|
||||
phy_interface_t interface)
|
||||
{
|
||||
|
@ -1105,6 +1121,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
|
|||
}
|
||||
|
||||
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
|
||||
.mac_get_caps = stmmac_mac_get_caps,
|
||||
.mac_select_pcs = stmmac_mac_select_pcs,
|
||||
.mac_config = stmmac_mac_config,
|
||||
.mac_link_down = stmmac_mac_link_down,
|
||||
|
@ -1204,7 +1221,6 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
|
|||
int mode = priv->plat->phy_interface;
|
||||
struct fwnode_handle *fwnode;
|
||||
struct phylink *phylink;
|
||||
int max_speed;
|
||||
|
||||
priv->phylink_config.dev = &priv->dev->dev;
|
||||
priv->phylink_config.type = PHYLINK_NETDEV;
|
||||
|
@ -1225,15 +1241,6 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
|
|||
xpcs_get_interfaces(priv->hw->xpcs,
|
||||
priv->phylink_config.supported_interfaces);
|
||||
|
||||
/* Get the MAC specific capabilities */
|
||||
stmmac_mac_phylink_get_caps(priv);
|
||||
|
||||
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
|
||||
|
||||
max_speed = priv->plat->max_speed;
|
||||
if (max_speed)
|
||||
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
|
||||
|
||||
fwnode = priv->plat->port_node;
|
||||
if (!fwnode)
|
||||
fwnode = dev_fwnode(priv->device);
|
||||
|
@ -2505,7 +2512,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
|
|||
true, priv->mode, true, true,
|
||||
xdp_desc.len);
|
||||
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr);
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
|
||||
|
||||
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
|
||||
entry = tx_q->cur_tx;
|
||||
|
@ -2937,7 +2944,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|||
struct stmmac_rx_queue *rx_q;
|
||||
struct stmmac_tx_queue *tx_q;
|
||||
u32 chan = 0;
|
||||
int atds = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
|
||||
|
@ -2946,7 +2952,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|||
}
|
||||
|
||||
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
|
||||
atds = 1;
|
||||
priv->plat->dma_cfg->atds = 1;
|
||||
|
||||
ret = stmmac_reset(priv, priv->ioaddr);
|
||||
if (ret) {
|
||||
|
@ -2955,7 +2961,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|||
}
|
||||
|
||||
/* DMA Configuration */
|
||||
stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
|
||||
stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
|
||||
|
||||
if (priv->plat->axi)
|
||||
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
|
||||
|
@ -4624,7 +4630,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
|
||||
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr);
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
|
||||
|
||||
stmmac_flush_tx_descriptors(priv, queue);
|
||||
stmmac_tx_timer_arm(priv, queue);
|
||||
|
@ -4843,7 +4849,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
|
|||
u64_stats_update_end(&txq_stats->q_syncp);
|
||||
}
|
||||
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr);
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
|
||||
|
||||
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
tx_q->cur_tx = entry;
|
||||
|
@ -7180,7 +7186,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
|||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
int ret = 0, i;
|
||||
int max_speed;
|
||||
|
||||
if (netif_running(dev))
|
||||
stmmac_release(dev);
|
||||
|
@ -7194,14 +7199,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
|||
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
|
||||
rx_cnt);
|
||||
|
||||
stmmac_mac_phylink_get_caps(priv);
|
||||
|
||||
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
|
||||
|
||||
max_speed = priv->plat->max_speed;
|
||||
if (max_speed)
|
||||
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
|
||||
|
||||
stmmac_napi_add(dev);
|
||||
|
||||
if (netif_running(dev))
|
||||
|
|
|
@ -657,6 +657,7 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
|
|||
unsigned long *supported,
|
||||
struct phylink_link_state *state)
|
||||
{
|
||||
unsigned long capabilities;
|
||||
struct phylink_pcs *pcs;
|
||||
int ret;
|
||||
|
||||
|
@ -696,10 +697,17 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
|
|||
}
|
||||
|
||||
/* Then validate the link parameters with the MAC */
|
||||
if (pl->mac_ops->validate)
|
||||
if (pl->mac_ops->validate) {
|
||||
pl->mac_ops->validate(pl->config, supported, state);
|
||||
else
|
||||
phylink_generic_validate(pl->config, supported, state);
|
||||
} else {
|
||||
if (pl->mac_ops->mac_get_caps)
|
||||
capabilities = pl->mac_ops->mac_get_caps(pl->config,
|
||||
state->interface);
|
||||
else
|
||||
capabilities = pl->config->mac_capabilities;
|
||||
|
||||
phylink_validate_mask_caps(supported, state, capabilities);
|
||||
}
|
||||
|
||||
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
|
||||
}
|
||||
|
|
|
@ -216,10 +216,8 @@ static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset,
|
|||
writeq_relaxed(val, pcie_pmu->base + offset);
|
||||
}
|
||||
|
||||
static void hisi_pcie_pmu_config_filter(struct perf_event *event)
|
||||
static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event)
|
||||
{
|
||||
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 port, trig_len, thr_len, len_mode;
|
||||
u64 reg = HISI_PCIE_INIT_SET;
|
||||
|
||||
|
@ -256,10 +254,19 @@ static void hisi_pcie_pmu_config_filter(struct perf_event *event)
|
|||
else
|
||||
reg |= FIELD_PREP(HISI_PCIE_LEN_M, HISI_PCIE_LEN_M_DEFAULT);
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
static void hisi_pcie_pmu_config_event_ctrl(struct perf_event *event)
|
||||
{
|
||||
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 reg = hisi_pcie_pmu_get_event_ctrl_val(event);
|
||||
|
||||
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg);
|
||||
}
|
||||
|
||||
static void hisi_pcie_pmu_clear_filter(struct perf_event *event)
|
||||
static void hisi_pcie_pmu_clear_event_ctrl(struct perf_event *event)
|
||||
{
|
||||
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
@ -299,18 +306,24 @@ static bool hisi_pcie_pmu_valid_filter(struct perf_event *event,
|
|||
if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL)
|
||||
return false;
|
||||
|
||||
if (requester_id) {
|
||||
if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
|
||||
return false;
|
||||
}
|
||||
/* Need to explicitly set filter of "port" or "bdf" */
|
||||
if (!hisi_pcie_get_port(event) &&
|
||||
!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check Whether two events share the same config. The same config means not
|
||||
* only the event code, but also the filter settings of the two events are
|
||||
* the same.
|
||||
*/
|
||||
static bool hisi_pcie_pmu_cmp_event(struct perf_event *target,
|
||||
struct perf_event *event)
|
||||
{
|
||||
return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event);
|
||||
return hisi_pcie_pmu_get_event_ctrl_val(target) ==
|
||||
hisi_pcie_pmu_get_event_ctrl_val(event);
|
||||
}
|
||||
|
||||
static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
|
||||
|
@ -385,40 +398,32 @@ static u64 hisi_pcie_pmu_read_counter(struct perf_event *event)
|
|||
return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx);
|
||||
}
|
||||
|
||||
static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu,
|
||||
struct perf_event *event)
|
||||
/*
|
||||
* Check all work events, if a relevant event is found then we return it
|
||||
* first, otherwise return the first idle counter (need to reset).
|
||||
*/
|
||||
static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu,
|
||||
struct perf_event *event)
|
||||
{
|
||||
int first_idle = -EAGAIN;
|
||||
struct perf_event *sibling;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
|
||||
sibling = pcie_pmu->hw_events[idx];
|
||||
if (!sibling)
|
||||
continue;
|
||||
|
||||
if (!hisi_pcie_pmu_cmp_event(sibling, event))
|
||||
if (!sibling) {
|
||||
if (first_idle == -EAGAIN)
|
||||
first_idle = idx;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Related events must be used in group */
|
||||
if (sibling->group_leader == event->group_leader)
|
||||
return idx;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu)
|
||||
{
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
|
||||
if (!pcie_pmu->hw_events[idx])
|
||||
if (hisi_pcie_pmu_cmp_event(sibling, event) &&
|
||||
sibling->group_leader == event->group_leader)
|
||||
return idx;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
return first_idle;
|
||||
}
|
||||
|
||||
static void hisi_pcie_pmu_event_update(struct perf_event *event)
|
||||
|
@ -505,7 +510,7 @@ static void hisi_pcie_pmu_start(struct perf_event *event, int flags)
|
|||
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
|
||||
hwc->state = 0;
|
||||
|
||||
hisi_pcie_pmu_config_filter(event);
|
||||
hisi_pcie_pmu_config_event_ctrl(event);
|
||||
hisi_pcie_pmu_enable_counter(pcie_pmu, hwc);
|
||||
hisi_pcie_pmu_enable_int(pcie_pmu, hwc);
|
||||
hisi_pcie_pmu_set_period(event);
|
||||
|
@ -526,7 +531,7 @@ static void hisi_pcie_pmu_stop(struct perf_event *event, int flags)
|
|||
hisi_pcie_pmu_event_update(event);
|
||||
hisi_pcie_pmu_disable_int(pcie_pmu, hwc);
|
||||
hisi_pcie_pmu_disable_counter(pcie_pmu, hwc);
|
||||
hisi_pcie_pmu_clear_filter(event);
|
||||
hisi_pcie_pmu_clear_event_ctrl(event);
|
||||
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
||||
hwc->state |= PERF_HES_STOPPED;
|
||||
|
||||
|
@ -544,27 +549,18 @@ static int hisi_pcie_pmu_add(struct perf_event *event, int flags)
|
|||
|
||||
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||
|
||||
/* Check all working events to find a related event. */
|
||||
idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
/* Current event shares an enabled counter with the related event */
|
||||
if (idx < HISI_PCIE_MAX_COUNTERS) {
|
||||
hwc->idx = idx;
|
||||
goto start_count;
|
||||
}
|
||||
|
||||
idx = hisi_pcie_pmu_get_event_idx(pcie_pmu);
|
||||
idx = hisi_pcie_pmu_get_event_idx(pcie_pmu, event);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
hwc->idx = idx;
|
||||
pcie_pmu->hw_events[idx] = event;
|
||||
/* Reset Counter to avoid previous statistic interference. */
|
||||
hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
|
||||
|
||||
start_count:
|
||||
/* No enabled counter found with related event, reset it */
|
||||
if (!pcie_pmu->hw_events[idx]) {
|
||||
hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
|
||||
pcie_pmu->hw_events[idx] = event;
|
||||
}
|
||||
|
||||
if (flags & PERF_EF_START)
|
||||
hisi_pcie_pmu_start(event, PERF_EF_RELOAD);
|
||||
|
||||
|
@ -714,10 +710,18 @@ static struct attribute *hisi_pcie_pmu_events_attr[] = {
|
|||
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_flux, 0x0104),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_time, 0x10104),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_flux, 0x2004),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_time, 0x12004),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_flux, 0x0105),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_time, 0x10105),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_flux, 0x1005),
|
||||
HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_time, 0x11005),
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -1085,15 +1085,27 @@ static bool hns3_pmu_validate_event_group(struct perf_event *event)
|
|||
return false;
|
||||
|
||||
for (num = 0; num < counters; num++) {
|
||||
/*
|
||||
* If we find a related event, then it's a valid group
|
||||
* since we don't need to allocate a new counter for it.
|
||||
*/
|
||||
if (hns3_pmu_cmp_event(event_group[num], sibling))
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise it's a new event but if there's no available counter,
|
||||
* fail the check since we cannot schedule all the events in
|
||||
* the group simultaneously.
|
||||
*/
|
||||
if (num == HNS3_PMU_MAX_HW_EVENTS)
|
||||
return false;
|
||||
|
||||
if (num == counters)
|
||||
event_group[counters++] = sibling;
|
||||
}
|
||||
|
||||
return counters <= HNS3_PMU_MAX_HW_EVENTS;
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
|
||||
|
@ -1515,7 +1527,7 @@ static int hns3_pmu_irq_register(struct pci_dev *pdev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
|
||||
ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev);
|
||||
if (ret) {
|
||||
pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -228,6 +228,7 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
|
|||
/**
|
||||
* struct phylink_mac_ops - MAC operations structure.
|
||||
* @validate: Validate and update the link configuration.
|
||||
* @mac_get_caps: Get MAC capabilities for interface mode.
|
||||
* @mac_select_pcs: Select a PCS for the interface mode.
|
||||
* @mac_prepare: prepare for a major reconfiguration of the interface.
|
||||
* @mac_config: configure the MAC for the selected mode and state.
|
||||
|
@ -241,6 +242,8 @@ struct phylink_mac_ops {
|
|||
void (*validate)(struct phylink_config *config,
|
||||
unsigned long *supported,
|
||||
struct phylink_link_state *state);
|
||||
unsigned long (*mac_get_caps)(struct phylink_config *config,
|
||||
phy_interface_t interface);
|
||||
struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
|
||||
phy_interface_t interface);
|
||||
int (*mac_prepare)(struct phylink_config *config, unsigned int mode,
|
||||
|
@ -292,6 +295,18 @@ struct phylink_mac_ops {
|
|||
*/
|
||||
void validate(struct phylink_config *config, unsigned long *supported,
|
||||
struct phylink_link_state *state);
|
||||
/**
|
||||
* mac_get_caps: Get MAC capabilities for interface mode.
|
||||
* @config: a pointer to a &struct phylink_config.
|
||||
* @interface: PHY interface mode.
|
||||
*
|
||||
* Optional method. When not provided, config->mac_capabilities will be used.
|
||||
* When implemented, this returns the MAC capabilities for the specified
|
||||
* interface mode where there is some special handling required by the MAC
|
||||
* driver (e.g. not supporting half-duplex in certain interface modes.)
|
||||
*/
|
||||
unsigned long mac_get_caps(struct phylink_config *config,
|
||||
phy_interface_t interface);
|
||||
/**
|
||||
* mac_select_pcs: Select a PCS for the interface mode.
|
||||
* @config: a pointer to a &struct phylink_config.
|
||||
|
|
|
@ -100,6 +100,7 @@ struct stmmac_dma_cfg {
|
|||
bool eame;
|
||||
bool multi_msi_en;
|
||||
bool dche;
|
||||
bool atds;
|
||||
};
|
||||
|
||||
#define AXI_BLEN 7
|
||||
|
@ -221,6 +222,7 @@ struct dwmac4_addrs {
|
|||
#define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10)
|
||||
#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11)
|
||||
#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(12)
|
||||
#define STMMAC_FLAG_DISABLE_FORCE_1000 BIT(13)
|
||||
|
||||
struct plat_stmmacenet_data {
|
||||
int bus_id;
|
||||
|
|
Loading…
Reference in New Issue