Merge branch 'xgene-next'
Iyappan Subramanian says: ==================== drivers: net: xgene: Add ethernet with ring manager v2 support Adding XFI based 10GbE and SGMII based 1GbE with ring manager v2 support for APM X-Gene ethernet driver. The ring manager v2 is used by 2nd generation SoC. v1: * Initial version ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
79c9cc3cd0
|
@ -3,5 +3,5 @@
|
|||
#
|
||||
|
||||
xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
|
||||
xgene_enet_main.o xgene_enet_ethtool.o
|
||||
xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o
|
||||
obj-$(CONFIG_NET_XGENE) += xgene-enet.o
|
||||
|
|
|
@ -87,10 +87,11 @@ static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
|
|||
|
||||
static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
|
||||
int i;
|
||||
|
||||
xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
|
||||
for (i = 0; i < NUM_RING_CONFIG; i++) {
|
||||
for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
|
||||
xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
|
||||
ring->state[i]);
|
||||
}
|
||||
|
@ -98,7 +99,7 @@ static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
|
|||
|
||||
static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG);
|
||||
memset(ring->state, 0, sizeof(ring->state));
|
||||
xgene_enet_write_ring_state(ring);
|
||||
}
|
||||
|
||||
|
@ -141,8 +142,8 @@ static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
|
|||
xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
|
||||
}
|
||||
|
||||
struct xgene_enet_desc_ring *xgene_enet_setup_ring(
|
||||
struct xgene_enet_desc_ring *ring)
|
||||
static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
|
||||
struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 size = ring->size;
|
||||
u32 i, data;
|
||||
|
@ -168,7 +169,7 @@ struct xgene_enet_desc_ring *xgene_enet_setup_ring(
|
|||
return ring;
|
||||
}
|
||||
|
||||
void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
|
||||
static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 data;
|
||||
bool is_bufpool;
|
||||
|
@ -186,6 +187,22 @@ out:
|
|||
xgene_enet_clr_ring_state(ring);
|
||||
}
|
||||
|
||||
static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
|
||||
{
|
||||
iowrite32(count, ring->cmd);
|
||||
}
|
||||
|
||||
static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 __iomem *cmd_base = ring->cmd_base;
|
||||
u32 ring_state, num_msgs;
|
||||
|
||||
ring_state = ioread32(&cmd_base[1]);
|
||||
num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
|
||||
|
||||
return num_msgs;
|
||||
}
|
||||
|
||||
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
|
||||
struct xgene_enet_pdata *pdata,
|
||||
enum xgene_enet_err_code status)
|
||||
|
@ -803,3 +820,12 @@ struct xgene_port_ops xgene_gport_ops = {
|
|||
.cle_bypass = xgene_enet_cle_bypass,
|
||||
.shutdown = xgene_gport_shutdown,
|
||||
};
|
||||
|
||||
struct xgene_ring_ops xgene_ring1_ops = {
|
||||
.num_ring_config = NUM_RING_CONFIG,
|
||||
.num_ring_id_shift = 6,
|
||||
.setup = xgene_enet_setup_ring,
|
||||
.clear = xgene_enet_clear_ring,
|
||||
.wr_cmd = xgene_enet_wr_cmd,
|
||||
.len = xgene_enet_ring_len,
|
||||
};
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
struct xgene_enet_pdata;
|
||||
struct xgene_enet_stats;
|
||||
struct xgene_enet_desc_ring;
|
||||
|
||||
/* clears and then set bits */
|
||||
static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len)
|
||||
|
@ -101,8 +102,8 @@ enum xgene_enet_rm {
|
|||
|
||||
#define BLOCK_ETH_CSR_OFFSET 0x2000
|
||||
#define BLOCK_ETH_RING_IF_OFFSET 0x9000
|
||||
#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000
|
||||
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
|
||||
|
||||
#define BLOCK_ETH_MAC_OFFSET 0x0000
|
||||
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
|
||||
|
||||
|
@ -261,6 +262,7 @@ enum xgene_enet_ring_type {
|
|||
|
||||
enum xgene_ring_owner {
|
||||
RING_OWNER_ETH0,
|
||||
RING_OWNER_ETH1,
|
||||
RING_OWNER_CPU = 15,
|
||||
RING_OWNER_INVALID
|
||||
};
|
||||
|
@ -314,9 +316,6 @@ static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
|
|||
size / WORK_DESC_SIZE;
|
||||
}
|
||||
|
||||
struct xgene_enet_desc_ring *xgene_enet_setup_ring(
|
||||
struct xgene_enet_desc_ring *ring);
|
||||
void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring);
|
||||
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
|
||||
struct xgene_enet_pdata *pdata,
|
||||
enum xgene_enet_err_code status);
|
||||
|
@ -327,5 +326,6 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
|
|||
|
||||
extern struct xgene_mac_ops xgene_gmac_ops;
|
||||
extern struct xgene_port_ops xgene_gport_ops;
|
||||
extern struct xgene_ring_ops xgene_ring1_ops;
|
||||
|
||||
#endif /* __XGENE_ENET_HW_H__ */
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
#define RES_RING_CSR 1
|
||||
#define RES_RING_CMD 2
|
||||
|
||||
static const struct of_device_id xgene_enet_of_match[];
|
||||
|
||||
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
|
||||
{
|
||||
struct xgene_enet_raw_desc16 *raw_desc;
|
||||
|
@ -48,6 +50,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
struct xgene_enet_raw_desc16 *raw_desc;
|
||||
struct xgene_enet_pdata *pdata;
|
||||
struct net_device *ndev;
|
||||
struct device *dev;
|
||||
dma_addr_t dma_addr;
|
||||
|
@ -58,6 +61,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
|
|||
|
||||
ndev = buf_pool->ndev;
|
||||
dev = ndev_to_dev(buf_pool->ndev);
|
||||
pdata = netdev_priv(ndev);
|
||||
bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
|
||||
len = XGENE_ENET_MAX_MTU;
|
||||
|
||||
|
@ -82,7 +86,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
|
|||
tail = (tail + 1) & slots;
|
||||
}
|
||||
|
||||
iowrite32(nbuf, buf_pool->cmd);
|
||||
pdata->ring_ops->wr_cmd(buf_pool, nbuf);
|
||||
buf_pool->tail = tail;
|
||||
|
||||
return 0;
|
||||
|
@ -102,26 +106,16 @@ static u8 xgene_enet_hdr_len(const void *data)
|
|||
return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
|
||||
}
|
||||
|
||||
static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 __iomem *cmd_base = ring->cmd_base;
|
||||
u32 ring_state, num_msgs;
|
||||
|
||||
ring_state = ioread32(&cmd_base[1]);
|
||||
num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
|
||||
|
||||
return num_msgs >> NUMMSGSINQ_POS;
|
||||
}
|
||||
|
||||
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
|
||||
{
|
||||
struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
|
||||
struct xgene_enet_raw_desc16 *raw_desc;
|
||||
u32 slots = buf_pool->slots - 1;
|
||||
u32 tail = buf_pool->tail;
|
||||
u32 userinfo;
|
||||
int i, len;
|
||||
|
||||
len = xgene_enet_ring_len(buf_pool);
|
||||
len = pdata->ring_ops->len(buf_pool);
|
||||
for (i = 0; i < len; i++) {
|
||||
tail = (tail - 1) & slots;
|
||||
raw_desc = &buf_pool->raw_desc16[tail];
|
||||
|
@ -131,7 +125,7 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
|
|||
dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
|
||||
}
|
||||
|
||||
iowrite32(-len, buf_pool->cmd);
|
||||
pdata->ring_ops->wr_cmd(buf_pool, -len);
|
||||
buf_pool->tail = tail;
|
||||
}
|
||||
|
||||
|
@ -263,8 +257,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
|
|||
struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
|
||||
u32 tx_level, cq_level;
|
||||
|
||||
tx_level = xgene_enet_ring_len(tx_ring);
|
||||
cq_level = xgene_enet_ring_len(cp_ring);
|
||||
tx_level = pdata->ring_ops->len(tx_ring);
|
||||
cq_level = pdata->ring_ops->len(cp_ring);
|
||||
if (unlikely(tx_level > pdata->tx_qcnt_hi ||
|
||||
cq_level > pdata->cp_qcnt_hi)) {
|
||||
netif_stop_queue(ndev);
|
||||
|
@ -276,7 +270,7 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
iowrite32(1, tx_ring->cmd);
|
||||
pdata->ring_ops->wr_cmd(tx_ring, 1);
|
||||
skb_tx_timestamp(skb);
|
||||
tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
|
||||
|
||||
|
@ -389,11 +383,11 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
|
|||
} while (--budget);
|
||||
|
||||
if (likely(count)) {
|
||||
iowrite32(-count, ring->cmd);
|
||||
pdata->ring_ops->wr_cmd(ring, -count);
|
||||
ring->head = head;
|
||||
|
||||
if (netif_queue_stopped(ring->ndev)) {
|
||||
if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
|
||||
if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
|
||||
netif_wake_queue(ring->ndev);
|
||||
}
|
||||
}
|
||||
|
@ -510,6 +504,7 @@ static int xgene_enet_open(struct net_device *ndev)
|
|||
else
|
||||
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
|
||||
|
||||
netif_carrier_off(ndev);
|
||||
netif_start_queue(ndev);
|
||||
|
||||
return ret;
|
||||
|
@ -545,7 +540,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
|
|||
pdata = netdev_priv(ring->ndev);
|
||||
dev = ndev_to_dev(ring->ndev);
|
||||
|
||||
xgene_enet_clear_ring(ring);
|
||||
pdata->ring_ops->clear(ring);
|
||||
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
|
||||
}
|
||||
|
||||
|
@ -598,15 +593,17 @@ static int xgene_enet_get_ring_size(struct device *dev,
|
|||
|
||||
static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
struct xgene_enet_pdata *pdata;
|
||||
struct device *dev;
|
||||
|
||||
if (!ring)
|
||||
return;
|
||||
|
||||
dev = ndev_to_dev(ring->ndev);
|
||||
pdata = netdev_priv(ring->ndev);
|
||||
|
||||
if (ring->desc_addr) {
|
||||
xgene_enet_clear_ring(ring);
|
||||
pdata->ring_ops->clear(ring);
|
||||
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
|
||||
}
|
||||
devm_kfree(dev, ring);
|
||||
|
@ -637,6 +634,25 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
|
|||
}
|
||||
}
|
||||
|
||||
static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
|
||||
struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
if ((pdata->enet_id == XGENE_ENET2) &&
|
||||
(xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
|
||||
struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
|
||||
|
||||
return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
|
||||
}
|
||||
|
||||
static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
|
||||
struct net_device *ndev, u32 ring_num,
|
||||
enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
|
||||
|
@ -668,9 +684,20 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
|
|||
}
|
||||
ring->size = size;
|
||||
|
||||
ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
|
||||
if (is_irq_mbox_required(pdata, ring)) {
|
||||
ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
|
||||
&ring->irq_mbox_dma, GFP_KERNEL);
|
||||
if (!ring->irq_mbox_addr) {
|
||||
dma_free_coherent(dev, size, ring->desc_addr,
|
||||
ring->dma);
|
||||
devm_kfree(dev, ring);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
|
||||
ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
|
||||
ring = xgene_enet_setup_ring(ring);
|
||||
ring = pdata->ring_ops->setup(ring);
|
||||
netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
|
||||
ring->num, ring->size, ring->id, ring->slots);
|
||||
|
||||
|
@ -682,12 +709,34 @@ static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
|
|||
return (owner << 6) | (bufnum & GENMASK(5, 0));
|
||||
}
|
||||
|
||||
static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
|
||||
{
|
||||
enum xgene_ring_owner owner;
|
||||
|
||||
if (p->enet_id == XGENE_ENET1) {
|
||||
switch (p->phy_mode) {
|
||||
case PHY_INTERFACE_MODE_SGMII:
|
||||
owner = RING_OWNER_ETH0;
|
||||
break;
|
||||
default:
|
||||
owner = (!p->port_id) ? RING_OWNER_ETH0 :
|
||||
RING_OWNER_ETH1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
|
||||
}
|
||||
|
||||
return owner;
|
||||
}
|
||||
|
||||
static int xgene_enet_create_desc_rings(struct net_device *ndev)
|
||||
{
|
||||
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
|
||||
struct device *dev = ndev_to_dev(ndev);
|
||||
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
|
||||
struct xgene_enet_desc_ring *buf_pool = NULL;
|
||||
enum xgene_ring_owner owner;
|
||||
u8 cpu_bufnum = pdata->cpu_bufnum;
|
||||
u8 eth_bufnum = pdata->eth_bufnum;
|
||||
u8 bp_bufnum = pdata->bp_bufnum;
|
||||
|
@ -696,6 +745,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
|
|||
int ret;
|
||||
|
||||
/* allocate rx descriptor ring */
|
||||
owner = xgene_derive_ring_owner(pdata);
|
||||
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
|
||||
rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
|
||||
RING_CFGSIZE_16KB, ring_id);
|
||||
|
@ -705,7 +755,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
|
|||
}
|
||||
|
||||
/* allocate buffer pool for receiving packets */
|
||||
ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
|
||||
owner = xgene_derive_ring_owner(pdata);
|
||||
ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
|
||||
buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
|
||||
RING_CFGSIZE_2KB, ring_id);
|
||||
if (!buf_pool) {
|
||||
|
@ -734,7 +785,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
|
|||
pdata->rx_ring = rx_ring;
|
||||
|
||||
/* allocate tx descriptor ring */
|
||||
ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
|
||||
owner = xgene_derive_ring_owner(pdata);
|
||||
ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
|
||||
tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
|
||||
RING_CFGSIZE_16KB, ring_id);
|
||||
if (!tx_ring) {
|
||||
|
@ -824,14 +876,21 @@ static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
|
|||
int ret;
|
||||
|
||||
ret = device_property_read_u32(dev, "port-id", &id);
|
||||
if (!ret && id > 1) {
|
||||
dev_err(dev, "Incorrect port-id specified\n");
|
||||
return -ENODEV;
|
||||
|
||||
switch (ret) {
|
||||
case -EINVAL:
|
||||
pdata->port_id = 0;
|
||||
ret = 0;
|
||||
break;
|
||||
case 0:
|
||||
pdata->port_id = id & BIT(0);
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Incorrect port-id specified: errno: %d\n", ret);
|
||||
break;
|
||||
}
|
||||
|
||||
pdata->port_id = id;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xgene_get_mac_address(struct device *dev,
|
||||
|
@ -876,6 +935,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
|||
struct device *dev;
|
||||
struct resource *res;
|
||||
void __iomem *base_addr;
|
||||
u32 offset;
|
||||
int ret;
|
||||
|
||||
pdev = pdata->pdev;
|
||||
|
@ -962,14 +1022,20 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
|||
pdata->clk = NULL;
|
||||
}
|
||||
|
||||
base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
|
||||
if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
|
||||
base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
|
||||
else
|
||||
base_addr = pdata->base_addr;
|
||||
pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
|
||||
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
|
||||
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
|
||||
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
|
||||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
|
||||
pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
|
||||
pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
|
||||
offset = (pdata->enet_id == XGENE_ENET1) ?
|
||||
BLOCK_ETH_MAC_CSR_OFFSET :
|
||||
X2_BLOCK_ETH_MAC_CSR_OFFSET;
|
||||
pdata->mcx_mac_csr_addr = base_addr + offset;
|
||||
} else {
|
||||
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
|
||||
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
|
||||
|
@ -1034,23 +1100,44 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
|
|||
break;
|
||||
}
|
||||
|
||||
switch (pdata->port_id) {
|
||||
case 0:
|
||||
pdata->cpu_bufnum = START_CPU_BUFNUM_0;
|
||||
pdata->eth_bufnum = START_ETH_BUFNUM_0;
|
||||
pdata->bp_bufnum = START_BP_BUFNUM_0;
|
||||
pdata->ring_num = START_RING_NUM_0;
|
||||
break;
|
||||
case 1:
|
||||
pdata->cpu_bufnum = START_CPU_BUFNUM_1;
|
||||
pdata->eth_bufnum = START_ETH_BUFNUM_1;
|
||||
pdata->bp_bufnum = START_BP_BUFNUM_1;
|
||||
pdata->ring_num = START_RING_NUM_1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
if (pdata->enet_id == XGENE_ENET1) {
|
||||
switch (pdata->port_id) {
|
||||
case 0:
|
||||
pdata->cpu_bufnum = START_CPU_BUFNUM_0;
|
||||
pdata->eth_bufnum = START_ETH_BUFNUM_0;
|
||||
pdata->bp_bufnum = START_BP_BUFNUM_0;
|
||||
pdata->ring_num = START_RING_NUM_0;
|
||||
break;
|
||||
case 1:
|
||||
pdata->cpu_bufnum = START_CPU_BUFNUM_1;
|
||||
pdata->eth_bufnum = START_ETH_BUFNUM_1;
|
||||
pdata->bp_bufnum = START_BP_BUFNUM_1;
|
||||
pdata->ring_num = START_RING_NUM_1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
pdata->ring_ops = &xgene_ring1_ops;
|
||||
} else {
|
||||
switch (pdata->port_id) {
|
||||
case 0:
|
||||
pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
|
||||
pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
|
||||
pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
|
||||
pdata->ring_num = X2_START_RING_NUM_0;
|
||||
break;
|
||||
case 1:
|
||||
pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
|
||||
pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
|
||||
pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
|
||||
pdata->ring_num = X2_START_RING_NUM_1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
pdata->rm = RM0;
|
||||
pdata->ring_ops = &xgene_ring2_ops;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
|
||||
|
@ -1086,6 +1173,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
|
|||
struct xgene_enet_pdata *pdata;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct xgene_mac_ops *mac_ops;
|
||||
const struct of_device_id *of_id;
|
||||
int ret;
|
||||
|
||||
ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
|
||||
|
@ -1104,6 +1192,17 @@ static int xgene_enet_probe(struct platform_device *pdev)
|
|||
NETIF_F_GSO |
|
||||
NETIF_F_GRO;
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
|
||||
if (of_id) {
|
||||
pdata->enet_id = (enum xgene_enet_id)of_id->data;
|
||||
if (!pdata->enet_id) {
|
||||
free_netdev(ndev);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = xgene_enet_get_resources(pdata);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -1175,9 +1274,11 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
|
|||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id xgene_enet_of_match[] = {
|
||||
{.compatible = "apm,xgene-enet",},
|
||||
{.compatible = "apm,xgene1-sgenet",},
|
||||
{.compatible = "apm,xgene1-xgenet",},
|
||||
{.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
|
||||
{.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
|
||||
{.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
|
||||
{.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
|
||||
{.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/if_vlan.h>
|
||||
#include <linux/phy.h>
|
||||
#include "xgene_enet_hw.h"
|
||||
#include "xgene_enet_ring2.h"
|
||||
|
||||
#define XGENE_DRV_VERSION "v1.0"
|
||||
#define XGENE_ENET_MAX_MTU 1536
|
||||
|
@ -51,12 +52,26 @@
|
|||
#define START_BP_BUFNUM_1 0x2A
|
||||
#define START_RING_NUM_1 264
|
||||
|
||||
#define X2_START_CPU_BUFNUM_0 0
|
||||
#define X2_START_ETH_BUFNUM_0 0
|
||||
#define X2_START_BP_BUFNUM_0 0x20
|
||||
#define X2_START_RING_NUM_0 0
|
||||
#define X2_START_CPU_BUFNUM_1 0xc
|
||||
#define X2_START_ETH_BUFNUM_1 0
|
||||
#define X2_START_BP_BUFNUM_1 0x20
|
||||
#define X2_START_RING_NUM_1 256
|
||||
|
||||
#define IRQ_ID_SIZE 16
|
||||
#define XGENE_MAX_TXC_RINGS 1
|
||||
|
||||
#define PHY_POLL_LINK_ON (10 * HZ)
|
||||
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
|
||||
|
||||
enum xgene_enet_id {
|
||||
XGENE_ENET1 = 1,
|
||||
XGENE_ENET2
|
||||
};
|
||||
|
||||
/* software context of a descriptor ring */
|
||||
struct xgene_enet_desc_ring {
|
||||
struct net_device *ndev;
|
||||
|
@ -72,6 +87,8 @@ struct xgene_enet_desc_ring {
|
|||
void __iomem *cmd_base;
|
||||
void __iomem *cmd;
|
||||
dma_addr_t dma;
|
||||
dma_addr_t irq_mbox_dma;
|
||||
void *irq_mbox_addr;
|
||||
u16 dst_ring_num;
|
||||
u8 nbufpool;
|
||||
struct sk_buff *(*rx_skb);
|
||||
|
@ -105,6 +122,15 @@ struct xgene_port_ops {
|
|||
void (*shutdown)(struct xgene_enet_pdata *pdata);
|
||||
};
|
||||
|
||||
struct xgene_ring_ops {
|
||||
u8 num_ring_config;
|
||||
u8 num_ring_id_shift;
|
||||
struct xgene_enet_desc_ring * (*setup)(struct xgene_enet_desc_ring *);
|
||||
void (*clear)(struct xgene_enet_desc_ring *);
|
||||
void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
|
||||
u32 (*len)(struct xgene_enet_desc_ring *);
|
||||
};
|
||||
|
||||
/* ethernet private data */
|
||||
struct xgene_enet_pdata {
|
||||
struct net_device *ndev;
|
||||
|
@ -113,6 +139,7 @@ struct xgene_enet_pdata {
|
|||
int phy_speed;
|
||||
struct clk *clk;
|
||||
struct platform_device *pdev;
|
||||
enum xgene_enet_id enet_id;
|
||||
struct xgene_enet_desc_ring *tx_ring;
|
||||
struct xgene_enet_desc_ring *rx_ring;
|
||||
char *dev_name;
|
||||
|
@ -136,6 +163,7 @@ struct xgene_enet_pdata {
|
|||
struct rtnl_link_stats64 stats;
|
||||
struct xgene_mac_ops *mac_ops;
|
||||
struct xgene_port_ops *port_ops;
|
||||
struct xgene_ring_ops *ring_ops;
|
||||
struct delayed_work link_work;
|
||||
u32 port_id;
|
||||
u8 cpu_bufnum;
|
||||
|
|
|
@ -0,0 +1,200 @@
|
|||
/* Applied Micro X-Gene SoC Ethernet Driver
|
||||
*
|
||||
* Copyright (c) 2015, Applied Micro Circuits Corporation
|
||||
* Author: Iyappan Subramanian <isubramanian@apm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "xgene_enet_main.h"
|
||||
#include "xgene_enet_hw.h"
|
||||
#include "xgene_enet_ring2.h"
|
||||
|
||||
static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 *ring_cfg = ring->state;
|
||||
u64 addr = ring->dma;
|
||||
|
||||
if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
|
||||
ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
|
||||
ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
|
||||
}
|
||||
ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
|
||||
|
||||
addr >>= 8;
|
||||
ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
|
||||
|
||||
addr >>= 27;
|
||||
ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
|
||||
| ACCEPTLERR
|
||||
| SET_VAL(RINGADDRH, addr);
|
||||
ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
|
||||
ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
|
||||
}
|
||||
|
||||
static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 *ring_cfg = ring->state;
|
||||
bool is_bufpool;
|
||||
u32 val;
|
||||
|
||||
is_bufpool = xgene_enet_is_bufpool(ring->id);
|
||||
val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
|
||||
ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
|
||||
if (is_bufpool)
|
||||
ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
|
||||
}
|
||||
|
||||
static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 *ring_cfg = ring->state;
|
||||
|
||||
ring_cfg[3] |= RECOMBBUF;
|
||||
ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
|
||||
}
|
||||
|
||||
static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
|
||||
u32 offset, u32 data)
|
||||
{
|
||||
struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
|
||||
|
||||
iowrite32(data, pdata->ring_csr_addr + offset);
|
||||
}
|
||||
|
||||
static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
|
||||
int i;
|
||||
|
||||
xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
|
||||
for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
|
||||
xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
|
||||
ring->state[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
memset(ring->state, 0, sizeof(ring->state));
|
||||
xgene_enet_write_ring_state(ring);
|
||||
}
|
||||
|
||||
static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
enum xgene_ring_owner owner;
|
||||
|
||||
xgene_enet_ring_set_type(ring);
|
||||
|
||||
owner = xgene_enet_ring_owner(ring->id);
|
||||
if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
|
||||
xgene_enet_ring_set_recombbuf(ring);
|
||||
|
||||
xgene_enet_ring_init(ring);
|
||||
xgene_enet_write_ring_state(ring);
|
||||
}
|
||||
|
||||
static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 ring_id_val, ring_id_buf;
|
||||
bool is_bufpool;
|
||||
|
||||
if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
|
||||
return;
|
||||
|
||||
is_bufpool = xgene_enet_is_bufpool(ring->id);
|
||||
|
||||
ring_id_val = ring->id & GENMASK(9, 0);
|
||||
ring_id_val |= OVERWRITE;
|
||||
|
||||
ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
|
||||
ring_id_buf |= PREFETCH_BUF_EN;
|
||||
if (is_bufpool)
|
||||
ring_id_buf |= IS_BUFFER_POOL;
|
||||
|
||||
xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
|
||||
xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
|
||||
}
|
||||
|
||||
static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 ring_id;
|
||||
|
||||
ring_id = ring->id | OVERWRITE;
|
||||
xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
|
||||
xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
|
||||
}
|
||||
|
||||
static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
|
||||
struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
bool is_bufpool;
|
||||
u32 addr, i;
|
||||
|
||||
xgene_enet_clr_ring_state(ring);
|
||||
xgene_enet_set_ring_state(ring);
|
||||
xgene_enet_set_ring_id(ring);
|
||||
|
||||
ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
|
||||
|
||||
is_bufpool = xgene_enet_is_bufpool(ring->id);
|
||||
if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
|
||||
return ring;
|
||||
|
||||
addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
|
||||
xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
|
||||
|
||||
for (i = 0; i < ring->slots; i++)
|
||||
xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
|
||||
|
||||
return ring;
|
||||
}
|
||||
|
||||
static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
xgene_enet_clr_desc_ring_id(ring);
|
||||
xgene_enet_clr_ring_state(ring);
|
||||
}
|
||||
|
||||
static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
|
||||
{
|
||||
u32 data = 0;
|
||||
|
||||
if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
|
||||
data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
|
||||
INTR_CLEAR;
|
||||
}
|
||||
data |= (count & GENMASK(16, 0));
|
||||
|
||||
iowrite32(data, ring->cmd);
|
||||
}
|
||||
|
||||
static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 __iomem *cmd_base = ring->cmd_base;
|
||||
u32 ring_state, num_msgs;
|
||||
|
||||
ring_state = ioread32(&cmd_base[1]);
|
||||
num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
|
||||
|
||||
return num_msgs;
|
||||
}
|
||||
|
||||
struct xgene_ring_ops xgene_ring2_ops = {
|
||||
.num_ring_config = X2_NUM_RING_CONFIG,
|
||||
.num_ring_id_shift = 13,
|
||||
.setup = xgene_enet_setup_ring,
|
||||
.clear = xgene_enet_clear_ring,
|
||||
.wr_cmd = xgene_enet_wr_cmd,
|
||||
.len = xgene_enet_ring_len,
|
||||
};
|
|
@ -0,0 +1,49 @@
|
|||
/* Applied Micro X-Gene SoC Ethernet Driver
|
||||
*
|
||||
* Copyright (c) 2015, Applied Micro Circuits Corporation
|
||||
* Author: Iyappan Subramanian <isubramanian@apm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __XGENE_ENET_RING2_H__
|
||||
#define __XGENE_ENET_RING2_H__
|
||||
|
||||
#include "xgene_enet_main.h"
|
||||
|
||||
#define X2_NUM_RING_CONFIG 6
|
||||
|
||||
#define INTR_MBOX_SIZE 1024
|
||||
#define CSR_VMID0_INTR_MBOX 0x0270
|
||||
#define INTR_CLEAR BIT(23)
|
||||
|
||||
#define X2_MSG_AM_POS 10
|
||||
#define X2_QBASE_AM_POS 11
|
||||
#define X2_INTLINE_POS 24
|
||||
#define X2_INTLINE_LEN 5
|
||||
#define X2_CFGCRID_POS 29
|
||||
#define X2_CFGCRID_LEN 3
|
||||
#define X2_SELTHRSH_POS 7
|
||||
#define X2_SELTHRSH_LEN 3
|
||||
#define X2_RINGTYPE_POS 23
|
||||
#define X2_RINGTYPE_LEN 2
|
||||
#define X2_DEQINTEN_POS 29
|
||||
#define X2_RECOMTIMEOUT_POS 0
|
||||
#define X2_RECOMTIMEOUT_LEN 7
|
||||
#define X2_NUMMSGSINQ_POS 0
|
||||
#define X2_NUMMSGSINQ_LEN 17
|
||||
|
||||
extern struct xgene_ring_ops xgene_ring2_ops;
|
||||
|
||||
#endif /* __XGENE_ENET_RING2_H__ */
|
|
@ -21,6 +21,7 @@
|
|||
#include "xgene_enet_main.h"
|
||||
#include "xgene_enet_hw.h"
|
||||
#include "xgene_enet_sgmac.h"
|
||||
#include "xgene_enet_xgmac.h"
|
||||
|
||||
static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
|
||||
{
|
||||
|
@ -39,6 +40,14 @@ static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
|
|||
iowrite32(val, p->eth_diag_csr_addr + offset);
|
||||
}
|
||||
|
||||
static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
|
||||
u32 offset, u32 val)
|
||||
{
|
||||
void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
|
||||
|
||||
iowrite32(val, addr);
|
||||
}
|
||||
|
||||
static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
|
||||
u32 wr_addr, u32 wr_data)
|
||||
{
|
||||
|
@ -140,8 +149,9 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
|
|||
|
||||
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
|
||||
{
|
||||
u32 val = 0xffffffff;
|
||||
u32 val;
|
||||
|
||||
val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
|
||||
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
|
||||
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
|
||||
}
|
||||
|
@ -227,6 +237,8 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
|
|||
{
|
||||
u32 data, loop = 10;
|
||||
u32 offset = p->port_id * 4;
|
||||
u32 enet_spare_cfg_reg, rsif_config_reg;
|
||||
u32 cfg_bypass_reg, rx_dv_gate_reg;
|
||||
|
||||
xgene_sgmac_reset(p);
|
||||
|
||||
|
@ -239,7 +251,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
|
|||
SGMII_STATUS_ADDR >> 2);
|
||||
if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
|
||||
break;
|
||||
usleep_range(10, 20);
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
|
||||
netdev_err(p->ndev, "Auto-negotiation failed\n");
|
||||
|
@ -249,33 +261,38 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
|
|||
xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
|
||||
xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
|
||||
|
||||
data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
|
||||
if (p->enet_id == XGENE_ENET1) {
|
||||
enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
|
||||
rsif_config_reg = RSIF_CONFIG_REG_ADDR;
|
||||
cfg_bypass_reg = CFG_BYPASS_ADDR;
|
||||
rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
|
||||
} else {
|
||||
enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
|
||||
rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
|
||||
cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
|
||||
rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
|
||||
}
|
||||
|
||||
data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
|
||||
data |= MPA_IDLE_WITH_QMI_EMPTY;
|
||||
xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
|
||||
xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
|
||||
|
||||
xgene_sgmac_set_mac_addr(p);
|
||||
|
||||
data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
|
||||
data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
|
||||
xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
|
||||
|
||||
/* Adjust MDC clock frequency */
|
||||
data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
|
||||
MGMT_CLOCK_SEL_SET(&data, 7);
|
||||
xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
|
||||
|
||||
/* Enable drop if bufpool not available */
|
||||
data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
|
||||
data = xgene_enet_rd_csr(p, rsif_config_reg);
|
||||
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
|
||||
xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
|
||||
|
||||
/* Rtype should be copied from FP */
|
||||
xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
|
||||
xgene_enet_wr_csr(p, rsif_config_reg, data);
|
||||
|
||||
/* Bypass traffic gating */
|
||||
xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
|
||||
xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
|
||||
xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
|
||||
xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
|
||||
xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
|
||||
xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
|
||||
}
|
||||
|
||||
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
|
||||
|
@ -331,14 +348,23 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
|
|||
u32 dst_ring_num, u16 bufpool_id)
|
||||
{
|
||||
u32 data, fpsel;
|
||||
u32 cle_bypass_reg0, cle_bypass_reg1;
|
||||
u32 offset = p->port_id * MAC_OFFSET;
|
||||
|
||||
if (p->enet_id == XGENE_ENET1) {
|
||||
cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
|
||||
cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
|
||||
} else {
|
||||
cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
|
||||
cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
|
||||
}
|
||||
|
||||
data = CFG_CLE_BYPASS_EN0;
|
||||
xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
|
||||
xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
|
||||
|
||||
fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
|
||||
data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
|
||||
xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
|
||||
xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
|
||||
}
|
||||
|
||||
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
|
||||
|
|
|
@ -122,7 +122,6 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
|
||||
u32 rd_addr, u32 *rd_data)
|
||||
{
|
||||
|
|
|
@ -21,9 +21,28 @@
|
|||
#ifndef __XGENE_ENET_XGMAC_H__
|
||||
#define __XGENE_ENET_XGMAC_H__
|
||||
|
||||
#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
|
||||
#define BLOCK_AXG_MAC_OFFSET 0x0800
|
||||
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
|
||||
|
||||
#define XGENET_CONFIG_REG_ADDR 0x20
|
||||
#define XGENET_SRST_ADDR 0x00
|
||||
#define XGENET_CLKEN_ADDR 0x08
|
||||
|
||||
#define CSR_CLK BIT(0)
|
||||
#define XGENET_CLK BIT(1)
|
||||
#define PCS_CLK BIT(3)
|
||||
#define AN_REF_CLK BIT(4)
|
||||
#define AN_CLK BIT(5)
|
||||
#define AD_CLK BIT(6)
|
||||
|
||||
#define CSR_RST BIT(0)
|
||||
#define XGENET_RST BIT(1)
|
||||
#define PCS_RST BIT(3)
|
||||
#define AN_REF_RST BIT(4)
|
||||
#define AN_RST BIT(5)
|
||||
#define AD_RST BIT(6)
|
||||
|
||||
#define AXGMAC_CONFIG_0 0x0000
|
||||
#define AXGMAC_CONFIG_1 0x0004
|
||||
#define HSTMACRST BIT(31)
|
||||
|
@ -38,6 +57,7 @@
|
|||
#define HSTMACADR_MSW_ADDR 0x0014
|
||||
#define HSTMAXFRAME_LENGTH_ADDR 0x0020
|
||||
|
||||
#define XG_MCX_RX_DV_GATE_REG_0_ADDR 0x0004
|
||||
#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
|
||||
#define XCLE_BYPASS_REG0_ADDR 0x0160
|
||||
#define XCLE_BYPASS_REG1_ADDR 0x0164
|
||||
|
|
Loading…
Reference in New Issue