Merge branch 'xgbe-next'
Tom Lendacky says: ==================== amd-xgbe: AMD 10Gb Ethernet driver updates The following series fixes some bugs and provides new/changed support in the driver. - Make all the defines in the xgbe.h file unique by prefixing them with XGBE_ if they are not currently using the prefix. - VLAN CTAGs are supplied in context descriptors. Tell the hardware to look in the Tx context descriptor, and not a register, for the VLAN CTAG to be inserted in the packet. - The hardware will indicate a VLAN packet has been received even if VLAN CTAG stripping is currently disabled. Only indicate that a VLAN CTAG has been stripped for the current packet if stripping is enabled. - Add support for VLAN filtering - Modify destination address filtering to use the hardware hash tables - Eliminate a checkpatch warning by replacing sscanf with kstrtouint This patch series is based on net-next. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f5b265039f
|
@ -182,6 +182,8 @@ config AMD_XGBE
|
|||
depends on OF_NET
|
||||
select PHYLIB
|
||||
select AMD_XGBE_PHY
|
||||
select BITREVERSE
|
||||
select CRC32
|
||||
---help---
|
||||
This driver supports the AMD 10GbE Ethernet device found on an
|
||||
AMD SoC.
|
||||
|
|
|
@ -276,13 +276,6 @@
|
|||
#define MAC_PFR 0x0008
|
||||
#define MAC_WTR 0x000c
|
||||
#define MAC_HTR0 0x0010
|
||||
#define MAC_HTR1 0x0014
|
||||
#define MAC_HTR2 0x0018
|
||||
#define MAC_HTR3 0x001c
|
||||
#define MAC_HTR4 0x0020
|
||||
#define MAC_HTR5 0x0024
|
||||
#define MAC_HTR6 0x0028
|
||||
#define MAC_HTR7 0x002c
|
||||
#define MAC_VLANTR 0x0050
|
||||
#define MAC_VLANHTR 0x0058
|
||||
#define MAC_VLANIR 0x0060
|
||||
|
@ -315,6 +308,7 @@
|
|||
|
||||
#define MAC_QTFCR_INC 4
|
||||
#define MAC_MACA_INC 4
|
||||
#define MAC_HTR_INC 4
|
||||
|
||||
/* MAC register entry bit positions and sizes */
|
||||
#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
|
||||
|
@ -387,12 +381,16 @@
|
|||
#define MAC_MACA1HR_AE_WIDTH 1
|
||||
#define MAC_PFR_HMC_INDEX 2
|
||||
#define MAC_PFR_HMC_WIDTH 1
|
||||
#define MAC_PFR_HPF_INDEX 10
|
||||
#define MAC_PFR_HPF_WIDTH 1
|
||||
#define MAC_PFR_HUC_INDEX 1
|
||||
#define MAC_PFR_HUC_WIDTH 1
|
||||
#define MAC_PFR_PM_INDEX 4
|
||||
#define MAC_PFR_PM_WIDTH 1
|
||||
#define MAC_PFR_PR_INDEX 0
|
||||
#define MAC_PFR_PR_WIDTH 1
|
||||
#define MAC_PFR_VTFE_INDEX 16
|
||||
#define MAC_PFR_VTFE_WIDTH 1
|
||||
#define MAC_PMTCSR_MGKPKTEN_INDEX 1
|
||||
#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
|
||||
#define MAC_PMTCSR_PWRDWN_INDEX 0
|
||||
|
@ -427,16 +425,30 @@
|
|||
#define MAC_TCR_SS_WIDTH 2
|
||||
#define MAC_TCR_TE_INDEX 0
|
||||
#define MAC_TCR_TE_WIDTH 1
|
||||
#define MAC_VLANHTR_VLHT_INDEX 0
|
||||
#define MAC_VLANHTR_VLHT_WIDTH 16
|
||||
#define MAC_VLANIR_VLTI_INDEX 20
|
||||
#define MAC_VLANIR_VLTI_WIDTH 1
|
||||
#define MAC_VLANIR_CSVL_INDEX 19
|
||||
#define MAC_VLANIR_CSVL_WIDTH 1
|
||||
#define MAC_VLANTR_DOVLTC_INDEX 20
|
||||
#define MAC_VLANTR_DOVLTC_WIDTH 1
|
||||
#define MAC_VLANTR_ERSVLM_INDEX 19
|
||||
#define MAC_VLANTR_ERSVLM_WIDTH 1
|
||||
#define MAC_VLANTR_ESVL_INDEX 18
|
||||
#define MAC_VLANTR_ESVL_WIDTH 1
|
||||
#define MAC_VLANTR_ETV_INDEX 16
|
||||
#define MAC_VLANTR_ETV_WIDTH 1
|
||||
#define MAC_VLANTR_EVLS_INDEX 21
|
||||
#define MAC_VLANTR_EVLS_WIDTH 2
|
||||
#define MAC_VLANTR_EVLRXS_INDEX 24
|
||||
#define MAC_VLANTR_EVLRXS_WIDTH 1
|
||||
#define MAC_VLANTR_VL_INDEX 0
|
||||
#define MAC_VLANTR_VL_WIDTH 16
|
||||
#define MAC_VLANTR_VTHM_INDEX 25
|
||||
#define MAC_VLANTR_VTHM_WIDTH 1
|
||||
#define MAC_VLANTR_VTIM_INDEX 17
|
||||
#define MAC_VLANTR_VTIM_WIDTH 1
|
||||
#define MAC_VR_DEVID_INDEX 8
|
||||
#define MAC_VR_DEVID_WIDTH 8
|
||||
#define MAC_VR_SNPSVER_INDEX 0
|
||||
|
|
|
@ -151,7 +151,7 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
|
|||
{
|
||||
char workarea[32];
|
||||
ssize_t len;
|
||||
unsigned int scan_value;
|
||||
int ret;
|
||||
|
||||
if (*ppos != 0)
|
||||
return 0;
|
||||
|
@ -165,10 +165,9 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
|
|||
return len;
|
||||
|
||||
workarea[len] = '\0';
|
||||
if (sscanf(workarea, "%x", &scan_value) == 1)
|
||||
*value = scan_value;
|
||||
else
|
||||
return -EIO;
|
||||
ret = kstrtouint(workarea, 0, value);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
|
|||
|
||||
if (ring->rdata) {
|
||||
for (i = 0; i < ring->rdesc_count; i++) {
|
||||
rdata = GET_DESC_DATA(ring, i);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, i);
|
||||
xgbe_unmap_skb(pdata, rdata);
|
||||
}
|
||||
|
||||
|
@ -256,7 +256,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
|
|||
rdesc_dma = ring->rdesc_dma;
|
||||
|
||||
for (j = 0; j < ring->rdesc_count; j++) {
|
||||
rdata = GET_DESC_DATA(ring, j);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, j);
|
||||
|
||||
rdata->rdesc = rdesc;
|
||||
rdata->rdesc_dma = rdesc_dma;
|
||||
|
@ -298,7 +298,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
|
|||
rdesc_dma = ring->rdesc_dma;
|
||||
|
||||
for (j = 0; j < ring->rdesc_count; j++) {
|
||||
rdata = GET_DESC_DATA(ring, j);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, j);
|
||||
|
||||
rdata->rdesc = rdesc;
|
||||
rdata->rdesc_dma = rdesc_dma;
|
||||
|
@ -392,7 +392,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
|||
if ((tso && (packet->mss != ring->tx.cur_mss)) ||
|
||||
(vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
|
||||
cur_index++;
|
||||
rdata = GET_DESC_DATA(ring, cur_index);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
|
||||
|
||||
if (tso) {
|
||||
DBGPR(" TSO packet\n");
|
||||
|
@ -413,12 +413,12 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
|||
packet->length += packet->header_len;
|
||||
|
||||
cur_index++;
|
||||
rdata = GET_DESC_DATA(ring, cur_index);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
|
||||
}
|
||||
|
||||
/* Map the (remainder of the) packet */
|
||||
for (datalen = skb_headlen(skb) - offset; datalen; ) {
|
||||
len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
|
||||
len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
|
||||
|
||||
skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -437,7 +437,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
|||
packet->length += len;
|
||||
|
||||
cur_index++;
|
||||
rdata = GET_DESC_DATA(ring, cur_index);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
|
||||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
|
@ -447,7 +447,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
|||
offset = 0;
|
||||
|
||||
for (datalen = skb_frag_size(frag); datalen; ) {
|
||||
len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
|
||||
len = min_t(unsigned int, datalen,
|
||||
XGBE_TX_MAX_BUF_SIZE);
|
||||
|
||||
skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
|
||||
len, DMA_TO_DEVICE);
|
||||
|
@ -468,7 +469,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
|||
packet->length += len;
|
||||
|
||||
cur_index++;
|
||||
rdata = GET_DESC_DATA(ring, cur_index);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -484,7 +485,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
|||
|
||||
err_out:
|
||||
while (start_index < cur_index) {
|
||||
rdata = GET_DESC_DATA(ring, start_index++);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, start_index++);
|
||||
xgbe_unmap_skb(pdata, rdata);
|
||||
}
|
||||
|
||||
|
@ -507,7 +508,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
|
|||
ring->rx.realloc_index);
|
||||
|
||||
for (i = 0; i < ring->dirty; i++) {
|
||||
rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
|
||||
|
||||
/* Reset rdata values */
|
||||
xgbe_unmap_skb(pdata, rdata);
|
||||
|
|
|
@ -116,6 +116,8 @@
|
|||
|
||||
#include <linux/phy.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/bitrev.h>
|
||||
#include <linux/crc32.h>
|
||||
|
||||
#include "xgbe.h"
|
||||
#include "xgbe-common.h"
|
||||
|
@ -547,24 +549,16 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
|
||||
unsigned int am_mode)
|
||||
static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
|
||||
struct netdev_hw_addr *ha, unsigned int *mac_reg)
|
||||
{
|
||||
struct netdev_hw_addr *ha;
|
||||
unsigned int mac_reg;
|
||||
unsigned int mac_addr_hi, mac_addr_lo;
|
||||
u8 *mac_addr;
|
||||
unsigned int i;
|
||||
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
|
||||
mac_addr_lo = 0;
|
||||
mac_addr_hi = 0;
|
||||
|
||||
i = 0;
|
||||
mac_reg = MAC_MACA1HR;
|
||||
|
||||
netdev_for_each_uc_addr(ha, pdata->netdev) {
|
||||
mac_addr_lo = 0;
|
||||
mac_addr_hi = 0;
|
||||
if (ha) {
|
||||
mac_addr = (u8 *)&mac_addr_lo;
|
||||
mac_addr[0] = ha->addr[0];
|
||||
mac_addr[1] = ha->addr[1];
|
||||
|
@ -574,54 +568,93 @@ static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
|
|||
mac_addr[0] = ha->addr[4];
|
||||
mac_addr[1] = ha->addr[5];
|
||||
|
||||
DBGPR(" adding unicast address %pM at 0x%04x\n",
|
||||
ha->addr, mac_reg);
|
||||
DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
|
||||
*mac_reg);
|
||||
|
||||
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
|
||||
|
||||
XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
|
||||
mac_reg += MAC_MACA_INC;
|
||||
XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
|
||||
mac_reg += MAC_MACA_INC;
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
if (!am_mode) {
|
||||
netdev_for_each_mc_addr(ha, pdata->netdev) {
|
||||
mac_addr_lo = 0;
|
||||
mac_addr_hi = 0;
|
||||
mac_addr = (u8 *)&mac_addr_lo;
|
||||
mac_addr[0] = ha->addr[0];
|
||||
mac_addr[1] = ha->addr[1];
|
||||
mac_addr[2] = ha->addr[2];
|
||||
mac_addr[3] = ha->addr[3];
|
||||
mac_addr = (u8 *)&mac_addr_hi;
|
||||
mac_addr[0] = ha->addr[4];
|
||||
mac_addr[1] = ha->addr[5];
|
||||
XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
|
||||
*mac_reg += MAC_MACA_INC;
|
||||
XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
|
||||
*mac_reg += MAC_MACA_INC;
|
||||
}
|
||||
|
||||
DBGPR(" adding multicast address %pM at 0x%04x\n",
|
||||
ha->addr, mac_reg);
|
||||
static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
struct netdev_hw_addr *ha;
|
||||
unsigned int mac_reg;
|
||||
unsigned int addn_macs;
|
||||
|
||||
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
|
||||
mac_reg = MAC_MACA1HR;
|
||||
addn_macs = pdata->hw_feat.addn_mac;
|
||||
|
||||
XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
|
||||
mac_reg += MAC_MACA_INC;
|
||||
XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
|
||||
mac_reg += MAC_MACA_INC;
|
||||
if (netdev_uc_count(netdev) > addn_macs) {
|
||||
xgbe_set_promiscuous_mode(pdata, 1);
|
||||
} else {
|
||||
netdev_for_each_uc_addr(ha, netdev) {
|
||||
xgbe_set_mac_reg(pdata, ha, &mac_reg);
|
||||
addn_macs--;
|
||||
}
|
||||
|
||||
i++;
|
||||
if (netdev_mc_count(netdev) > addn_macs) {
|
||||
xgbe_set_all_multicast_mode(pdata, 1);
|
||||
} else {
|
||||
netdev_for_each_mc_addr(ha, netdev) {
|
||||
xgbe_set_mac_reg(pdata, ha, &mac_reg);
|
||||
addn_macs--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear remaining additional MAC address entries */
|
||||
for (; i < pdata->hw_feat.addn_mac; i++) {
|
||||
XGMAC_IOWRITE(pdata, mac_reg, 0);
|
||||
mac_reg += MAC_MACA_INC;
|
||||
XGMAC_IOWRITE(pdata, mac_reg, 0);
|
||||
mac_reg += MAC_MACA_INC;
|
||||
while (addn_macs--)
|
||||
xgbe_set_mac_reg(pdata, NULL, &mac_reg);
|
||||
}
|
||||
|
||||
static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
struct netdev_hw_addr *ha;
|
||||
unsigned int hash_reg;
|
||||
unsigned int hash_table_shift, hash_table_count;
|
||||
u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
|
||||
u32 crc;
|
||||
unsigned int i;
|
||||
|
||||
hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
|
||||
hash_table_count = pdata->hw_feat.hash_table_size / 32;
|
||||
memset(hash_table, 0, sizeof(hash_table));
|
||||
|
||||
/* Build the MAC Hash Table register values */
|
||||
netdev_for_each_uc_addr(ha, netdev) {
|
||||
crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
|
||||
crc >>= hash_table_shift;
|
||||
hash_table[crc >> 5] |= (1 << (crc & 0x1f));
|
||||
}
|
||||
|
||||
netdev_for_each_mc_addr(ha, netdev) {
|
||||
crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
|
||||
crc >>= hash_table_shift;
|
||||
hash_table[crc >> 5] |= (1 << (crc & 0x1f));
|
||||
}
|
||||
|
||||
/* Set the MAC Hash Table registers */
|
||||
hash_reg = MAC_HTR0;
|
||||
for (i = 0; i < hash_table_count; i++) {
|
||||
XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
|
||||
hash_reg += MAC_HTR_INC;
|
||||
}
|
||||
}
|
||||
|
||||
static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
if (pdata->hw_feat.hash_table_size)
|
||||
xgbe_set_mac_hash_table(pdata);
|
||||
else
|
||||
xgbe_set_mac_addn_addrs(pdata);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -738,6 +771,89 @@ static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
/* Enable VLAN filtering */
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
|
||||
|
||||
/* Enable VLAN Hash Table filtering */
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
|
||||
|
||||
/* Disable VLAN tag inverse matching */
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
|
||||
|
||||
/* Only filter on the lower 12-bits of the VLAN tag */
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
|
||||
|
||||
/* In order for the VLAN Hash Table filtering to be effective,
|
||||
* the VLAN tag identifier in the VLAN Tag Register must not
|
||||
* be zero. Set the VLAN tag identifier to "1" to enable the
|
||||
* VLAN Hash Table filtering. This implies that a VLAN tag of
|
||||
* 1 will always pass filtering.
|
||||
*/
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
/* Disable VLAN filtering */
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CRCPOLY_LE
|
||||
#define CRCPOLY_LE 0xedb88320
|
||||
#endif
|
||||
static u32 xgbe_vid_crc32_le(__le16 vid_le)
|
||||
{
|
||||
u32 poly = CRCPOLY_LE;
|
||||
u32 crc = ~0;
|
||||
u32 temp = 0;
|
||||
unsigned char *data = (unsigned char *)&vid_le;
|
||||
unsigned char data_byte = 0;
|
||||
int i, bits;
|
||||
|
||||
bits = get_bitmask_order(VLAN_VID_MASK);
|
||||
for (i = 0; i < bits; i++) {
|
||||
if ((i % 8) == 0)
|
||||
data_byte = data[i / 8];
|
||||
|
||||
temp = ((crc & 1) ^ data_byte) & 1;
|
||||
crc >>= 1;
|
||||
data_byte >>= 1;
|
||||
|
||||
if (temp)
|
||||
crc ^= poly;
|
||||
}
|
||||
|
||||
return crc;
|
||||
}
|
||||
|
||||
static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
u32 crc;
|
||||
u16 vid;
|
||||
__le16 vid_le;
|
||||
u16 vlan_hash_table = 0;
|
||||
|
||||
/* Generate the VLAN Hash Table value */
|
||||
for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
|
||||
/* Get the CRC32 value of the VLAN ID */
|
||||
vid_le = cpu_to_le16(vid);
|
||||
crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
|
||||
|
||||
vlan_hash_table |= (1 << crc);
|
||||
}
|
||||
|
||||
/* Set the VLAN Hash Table filtering register */
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
|
||||
{
|
||||
struct xgbe_ring_desc *rdesc = rdata->rdesc;
|
||||
|
@ -766,7 +882,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
|
|||
|
||||
/* Initialze all descriptors */
|
||||
for (i = 0; i < ring->rdesc_count; i++) {
|
||||
rdata = GET_DESC_DATA(ring, i);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, i);
|
||||
rdesc = rdata->rdesc;
|
||||
|
||||
/* Initialize Tx descriptor
|
||||
|
@ -791,7 +907,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
|
|||
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
|
||||
|
||||
/* Update the starting address of descriptor ring */
|
||||
rdata = GET_DESC_DATA(ring, start_index);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, start_index);
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
|
||||
upper_32_bits(rdata->rdesc_dma));
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
|
||||
|
@ -848,7 +964,7 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
|
|||
|
||||
/* Initialize all descriptors */
|
||||
for (i = 0; i < ring->rdesc_count; i++) {
|
||||
rdata = GET_DESC_DATA(ring, i);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, i);
|
||||
rdesc = rdata->rdesc;
|
||||
|
||||
/* Initialize Rx descriptor
|
||||
|
@ -882,14 +998,14 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
|
|||
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
|
||||
|
||||
/* Update the starting address of descriptor ring */
|
||||
rdata = GET_DESC_DATA(ring, start_index);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, start_index);
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
|
||||
upper_32_bits(rdata->rdesc_dma));
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
|
||||
lower_32_bits(rdata->rdesc_dma));
|
||||
|
||||
/* Update the Rx Descriptor Tail Pointer */
|
||||
rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
|
||||
lower_32_bits(rdata->rdesc_dma));
|
||||
|
||||
|
@ -933,7 +1049,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
|
|||
if (tx_coalesce && !channel->tx_timer_active)
|
||||
ring->coalesce_count = 0;
|
||||
|
||||
rdata = GET_DESC_DATA(ring, ring->cur);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
rdesc = rdata->rdesc;
|
||||
|
||||
/* Create a context descriptor if this is a TSO packet */
|
||||
|
@ -977,7 +1093,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
|
|||
}
|
||||
|
||||
ring->cur++;
|
||||
rdata = GET_DESC_DATA(ring, ring->cur);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
rdesc = rdata->rdesc;
|
||||
}
|
||||
|
||||
|
@ -1034,7 +1150,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
|
|||
|
||||
for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
|
||||
ring->cur++;
|
||||
rdata = GET_DESC_DATA(ring, ring->cur);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
rdesc = rdata->rdesc;
|
||||
|
||||
/* Update buffer address */
|
||||
|
@ -1074,7 +1190,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
|
|||
wmb();
|
||||
|
||||
/* Set OWN bit for the first descriptor */
|
||||
rdata = GET_DESC_DATA(ring, start_index);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, start_index);
|
||||
rdesc = rdata->rdesc;
|
||||
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
|
||||
|
||||
|
@ -1088,7 +1204,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
|
|||
/* Issue a poll command to Tx DMA by writing address
|
||||
* of next immediate free descriptor */
|
||||
ring->cur++;
|
||||
rdata = GET_DESC_DATA(ring, ring->cur);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
|
||||
lower_32_bits(rdata->rdesc_dma));
|
||||
|
||||
|
@ -1113,11 +1229,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
|||
struct xgbe_ring_data *rdata;
|
||||
struct xgbe_ring_desc *rdesc;
|
||||
struct xgbe_packet_data *packet = &ring->packet_data;
|
||||
struct net_device *netdev = channel->pdata->netdev;
|
||||
unsigned int err, etlt;
|
||||
|
||||
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
|
||||
|
||||
rdata = GET_DESC_DATA(ring, ring->cur);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
rdesc = rdata->rdesc;
|
||||
|
||||
/* Check for data availability */
|
||||
|
@ -1153,7 +1270,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
|||
DBGPR(" err=%u, etlt=%#x\n", err, etlt);
|
||||
|
||||
if (!err || (err && !etlt)) {
|
||||
if (etlt == 0x09) {
|
||||
if ((etlt == 0x09) &&
|
||||
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
VLAN_CTAG, 1);
|
||||
packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
|
||||
|
@ -1195,7 +1313,7 @@ static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
|
|||
|
||||
if (int_state == XGMAC_INT_STATE_SAVE) {
|
||||
channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
|
||||
channel->saved_ier &= DMA_INTERRUPT_MASK;
|
||||
channel->saved_ier &= XGBE_DMA_INTERRUPT_MASK;
|
||||
} else {
|
||||
dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
|
||||
dma_ch_ier |= channel->saved_ier;
|
||||
|
@ -1275,7 +1393,7 @@ static int xgbe_disable_int(struct xgbe_channel *channel,
|
|||
xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
|
||||
|
||||
dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
|
||||
dma_ch_ier &= ~DMA_INTERRUPT_MASK;
|
||||
dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
|
||||
break;
|
||||
default:
|
||||
|
@ -1342,23 +1460,23 @@ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
|
|||
unsigned int arcache, awcache;
|
||||
|
||||
arcache = 0;
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, XGBE_DMA_ARCACHE);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, XGBE_DMA_ARDOMAIN);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, XGBE_DMA_ARCACHE);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, XGBE_DMA_ARDOMAIN);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, XGBE_DMA_ARCACHE);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, XGBE_DMA_ARDOMAIN);
|
||||
XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
|
||||
|
||||
awcache = 0;
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, XGBE_DMA_AWCACHE);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, XGBE_DMA_AWDOMAIN);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, XGBE_DMA_AWCACHE);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, XGBE_DMA_AWDOMAIN);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, XGBE_DMA_AWCACHE);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, XGBE_DMA_AWDOMAIN);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, XGBE_DMA_AWCACHE);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, XGBE_DMA_AWDOMAIN);
|
||||
XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
|
||||
}
|
||||
|
||||
|
@ -1388,66 +1506,66 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
|
|||
/* Calculate Tx/Rx fifo share per queue */
|
||||
switch (fifo_size) {
|
||||
case 0:
|
||||
q_fifo_size = FIFO_SIZE_B(128);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_B(128);
|
||||
break;
|
||||
case 1:
|
||||
q_fifo_size = FIFO_SIZE_B(256);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_B(256);
|
||||
break;
|
||||
case 2:
|
||||
q_fifo_size = FIFO_SIZE_B(512);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_B(512);
|
||||
break;
|
||||
case 3:
|
||||
q_fifo_size = FIFO_SIZE_KB(1);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(1);
|
||||
break;
|
||||
case 4:
|
||||
q_fifo_size = FIFO_SIZE_KB(2);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(2);
|
||||
break;
|
||||
case 5:
|
||||
q_fifo_size = FIFO_SIZE_KB(4);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(4);
|
||||
break;
|
||||
case 6:
|
||||
q_fifo_size = FIFO_SIZE_KB(8);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(8);
|
||||
break;
|
||||
case 7:
|
||||
q_fifo_size = FIFO_SIZE_KB(16);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(16);
|
||||
break;
|
||||
case 8:
|
||||
q_fifo_size = FIFO_SIZE_KB(32);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(32);
|
||||
break;
|
||||
case 9:
|
||||
q_fifo_size = FIFO_SIZE_KB(64);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(64);
|
||||
break;
|
||||
case 10:
|
||||
q_fifo_size = FIFO_SIZE_KB(128);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(128);
|
||||
break;
|
||||
case 11:
|
||||
q_fifo_size = FIFO_SIZE_KB(256);
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(256);
|
||||
break;
|
||||
}
|
||||
q_fifo_size = q_fifo_size / queue_count;
|
||||
|
||||
/* Set the queue fifo size programmable value */
|
||||
if (q_fifo_size >= FIFO_SIZE_KB(256))
|
||||
if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
|
||||
else if (q_fifo_size >= FIFO_SIZE_KB(128))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
|
||||
else if (q_fifo_size >= FIFO_SIZE_KB(64))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
|
||||
else if (q_fifo_size >= FIFO_SIZE_KB(32))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
|
||||
else if (q_fifo_size >= FIFO_SIZE_KB(16))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
|
||||
else if (q_fifo_size >= FIFO_SIZE_KB(8))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
|
||||
else if (q_fifo_size >= FIFO_SIZE_KB(4))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
|
||||
else if (q_fifo_size >= FIFO_SIZE_KB(2))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
|
||||
else if (q_fifo_size >= FIFO_SIZE_KB(1))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
|
||||
else if (q_fifo_size >= FIFO_SIZE_B(512))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_512;
|
||||
else if (q_fifo_size >= FIFO_SIZE_B(256))
|
||||
else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
|
||||
p_fifo = XGMAC_MTL_FIFO_SIZE_256;
|
||||
|
||||
return p_fifo;
|
||||
|
@ -1520,6 +1638,13 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
|
|||
static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
|
||||
|
||||
/* Filtering is done using perfect filtering and hash filtering */
|
||||
if (pdata->hw_feat.hash_table_size) {
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
|
||||
|
@ -1541,6 +1666,18 @@ static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
|
|||
|
||||
static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
/* Indicate that VLAN Tx CTAGs come from context descriptors */
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
|
||||
|
||||
/* Set the current VLAN Hash Table register value */
|
||||
xgbe_update_vlan_hash_table(pdata);
|
||||
|
||||
if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
xgbe_enable_rx_vlan_filtering(pdata);
|
||||
else
|
||||
xgbe_disable_rx_vlan_filtering(pdata);
|
||||
|
||||
if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
xgbe_enable_rx_vlan_stripping(pdata);
|
||||
else
|
||||
|
@ -2104,7 +2241,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
|
|||
|
||||
hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
|
||||
hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
|
||||
hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
|
||||
hw_if->add_mac_addresses = xgbe_add_mac_addresses;
|
||||
hw_if->set_mac_address = xgbe_set_mac_address;
|
||||
|
||||
hw_if->enable_rx_csum = xgbe_enable_rx_csum;
|
||||
|
@ -2112,6 +2249,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
|
|||
|
||||
hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
|
||||
hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
|
||||
hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
|
||||
hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
|
||||
hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
|
||||
|
||||
hw_if->read_mmd_regs = xgbe_read_mmd_regs;
|
||||
hw_if->write_mmd_regs = xgbe_write_mmd_regs;
|
||||
|
|
|
@ -144,9 +144,10 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
|
|||
}
|
||||
|
||||
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
|
||||
if (rx_buf_size < RX_MIN_BUF_SIZE)
|
||||
rx_buf_size = RX_MIN_BUF_SIZE;
|
||||
rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
|
||||
if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
|
||||
rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
|
||||
rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
|
||||
~(XGBE_RX_BUF_ALIGN - 1);
|
||||
|
||||
return rx_buf_size;
|
||||
}
|
||||
|
@ -377,6 +378,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
|
|||
hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
|
||||
hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
|
||||
|
||||
/* Translate the Hash Table size into actual number */
|
||||
switch (hw_feat->hash_table_size) {
|
||||
case 0:
|
||||
break;
|
||||
case 1:
|
||||
hw_feat->hash_table_size = 64;
|
||||
break;
|
||||
case 2:
|
||||
hw_feat->hash_table_size = 128;
|
||||
break;
|
||||
case 3:
|
||||
hw_feat->hash_table_size = 256;
|
||||
break;
|
||||
}
|
||||
|
||||
/* The Queue and Channel counts are zero based so increment them
|
||||
* to get the actual number
|
||||
*/
|
||||
|
@ -446,7 +462,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
|
|||
break;
|
||||
|
||||
for (j = 0; j < ring->rdesc_count; j++) {
|
||||
rdata = GET_DESC_DATA(ring, j);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, j);
|
||||
desc_if->unmap_skb(pdata, rdata);
|
||||
}
|
||||
}
|
||||
|
@ -471,7 +487,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
|
|||
break;
|
||||
|
||||
for (j = 0; j < ring->rdesc_count; j++) {
|
||||
rdata = GET_DESC_DATA(ring, j);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, j);
|
||||
desc_if->unmap_skb(pdata, rdata);
|
||||
}
|
||||
}
|
||||
|
@ -726,14 +742,14 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
|
|||
|
||||
for (len = skb_headlen(skb); len;) {
|
||||
packet->rdesc_count++;
|
||||
len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
|
||||
len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
|
||||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
for (len = skb_frag_size(frag); len; ) {
|
||||
packet->rdesc_count++;
|
||||
len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
|
||||
len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -911,18 +927,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
|
|||
pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
|
||||
am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
|
||||
|
||||
if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
|
||||
pr_mode = 1;
|
||||
if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
|
||||
am_mode = 1;
|
||||
if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
|
||||
pdata->hw_feat.addn_mac)
|
||||
pr_mode = 1;
|
||||
|
||||
hw_if->set_promiscuous_mode(pdata, pr_mode);
|
||||
hw_if->set_all_multicast_mode(pdata, am_mode);
|
||||
if (!pr_mode)
|
||||
hw_if->set_addn_mac_addrs(pdata, am_mode);
|
||||
|
||||
hw_if->add_mac_addresses(pdata);
|
||||
|
||||
DBGPR("<--xgbe_set_rx_mode\n");
|
||||
}
|
||||
|
@ -999,6 +1007,38 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
|
|||
return s;
|
||||
}
|
||||
|
||||
static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
|
||||
u16 vid)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
|
||||
DBGPR("-->%s\n", __func__);
|
||||
|
||||
set_bit(vid, pdata->active_vlans);
|
||||
hw_if->update_vlan_hash_table(pdata);
|
||||
|
||||
DBGPR("<--%s\n", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
|
||||
u16 vid)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
|
||||
DBGPR("-->%s\n", __func__);
|
||||
|
||||
clear_bit(vid, pdata->active_vlans);
|
||||
hw_if->update_vlan_hash_table(pdata);
|
||||
|
||||
DBGPR("<--%s\n", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void xgbe_poll_controller(struct net_device *netdev)
|
||||
{
|
||||
|
@ -1021,26 +1061,26 @@ static int xgbe_set_features(struct net_device *netdev,
|
|||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
unsigned int rxcsum_enabled, rxvlan_enabled;
|
||||
unsigned int rxcsum, rxvlan, rxvlan_filter;
|
||||
|
||||
rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
|
||||
rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
|
||||
rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
|
||||
rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
|
||||
rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
|
||||
if ((features & NETIF_F_RXCSUM) && !rxcsum)
|
||||
hw_if->enable_rx_csum(pdata);
|
||||
netdev_alert(netdev, "state change - rxcsum enabled\n");
|
||||
} else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
|
||||
else if (!(features & NETIF_F_RXCSUM) && rxcsum)
|
||||
hw_if->disable_rx_csum(pdata);
|
||||
netdev_alert(netdev, "state change - rxcsum disabled\n");
|
||||
}
|
||||
|
||||
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
|
||||
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
|
||||
hw_if->enable_rx_vlan_stripping(pdata);
|
||||
netdev_alert(netdev, "state change - rxvlan enabled\n");
|
||||
} else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
|
||||
else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
|
||||
hw_if->disable_rx_vlan_stripping(pdata);
|
||||
netdev_alert(netdev, "state change - rxvlan disabled\n");
|
||||
}
|
||||
|
||||
if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
|
||||
hw_if->enable_rx_vlan_filtering(pdata);
|
||||
else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
|
||||
hw_if->disable_rx_vlan_filtering(pdata);
|
||||
|
||||
pdata->netdev_features = features;
|
||||
|
||||
|
@ -1058,6 +1098,8 @@ static const struct net_device_ops xgbe_netdev_ops = {
|
|||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_change_mtu = xgbe_change_mtu,
|
||||
.ndo_get_stats64 = xgbe_get_stats64,
|
||||
.ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = xgbe_poll_controller,
|
||||
#endif
|
||||
|
@ -1089,8 +1131,9 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
|
|||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
|
||||
while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
|
||||
rdata = GET_DESC_DATA(ring, ring->dirty);
|
||||
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
|
||||
(ring->dirty < ring->cur)) {
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
|
||||
rdesc = rdata->rdesc;
|
||||
|
||||
if (!hw_if->tx_complete(rdesc))
|
||||
|
@ -1109,7 +1152,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
|
|||
}
|
||||
|
||||
if ((ring->tx.queue_stopped == 1) &&
|
||||
(xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
|
||||
(xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
|
||||
ring->tx.queue_stopped = 0;
|
||||
netif_wake_subqueue(netdev, channel->queue_index);
|
||||
}
|
||||
|
@ -1152,7 +1195,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
cur_len = 0;
|
||||
|
||||
read_again:
|
||||
rdata = GET_DESC_DATA(ring, ring->cur);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
|
||||
if (hw_if->dev_read(channel))
|
||||
break;
|
||||
|
@ -1244,7 +1287,7 @@ read_again:
|
|||
|
||||
/* Update the Rx Tail Pointer Register with address of
|
||||
* the last cleaned entry */
|
||||
rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
|
||||
lower_32_bits(rdata->rdesc_dma));
|
||||
}
|
||||
|
@ -1296,7 +1339,7 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
|
|||
struct xgbe_ring_desc *rdesc;
|
||||
|
||||
while (count--) {
|
||||
rdata = GET_DESC_DATA(ring, idx);
|
||||
rdata = XGBE_GET_DESC_DATA(ring, idx);
|
||||
rdesc = rdata->rdesc;
|
||||
DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
|
||||
(flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
|
||||
|
|
|
@ -247,16 +247,16 @@ static int xgbe_probe(struct platform_device *pdev)
|
|||
mutex_init(&pdata->xpcs_mutex);
|
||||
|
||||
/* Set and validate the number of descriptors for a ring */
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
|
||||
pdata->tx_desc_count = TX_DESC_CNT;
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
|
||||
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
|
||||
if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
|
||||
dev_err(dev, "tx descriptor count (%d) is not valid\n",
|
||||
pdata->tx_desc_count);
|
||||
ret = -EINVAL;
|
||||
goto err_io;
|
||||
}
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
|
||||
pdata->rx_desc_count = RX_DESC_CNT;
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
|
||||
pdata->rx_desc_count = XGBE_RX_DESC_CNT;
|
||||
if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
|
||||
dev_err(dev, "rx descriptor count (%d) is not valid\n",
|
||||
pdata->rx_desc_count);
|
||||
|
@ -385,7 +385,8 @@ static int xgbe_probe(struct platform_device *pdev)
|
|||
NETIF_F_TSO6 |
|
||||
NETIF_F_GRO |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
netdev->vlan_features |= NETIF_F_SG |
|
||||
NETIF_F_IP_CSUM |
|
||||
|
@ -396,6 +397,8 @@ static int xgbe_probe(struct platform_device *pdev)
|
|||
netdev->features |= netdev->hw_features;
|
||||
pdata->netdev_features = netdev->features;
|
||||
|
||||
netdev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
xgbe_init_rx_coalesce(pdata);
|
||||
xgbe_init_tx_coalesce(pdata);
|
||||
|
||||
|
|
|
@ -121,6 +121,8 @@
|
|||
#include <linux/netdevice.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
|
||||
#define XGBE_DRV_NAME "amd-xgbe"
|
||||
|
@ -128,22 +130,25 @@
|
|||
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
|
||||
|
||||
/* Descriptor related defines */
|
||||
#define TX_DESC_CNT 512
|
||||
#define TX_DESC_MIN_FREE (TX_DESC_CNT >> 3)
|
||||
#define TX_DESC_MAX_PROC (TX_DESC_CNT >> 1)
|
||||
#define RX_DESC_CNT 512
|
||||
#define XGBE_TX_DESC_CNT 512
|
||||
#define XGBE_TX_DESC_MIN_FREE (XGBE_TX_DESC_CNT >> 3)
|
||||
#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
|
||||
#define XGBE_RX_DESC_CNT 512
|
||||
|
||||
#define TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
|
||||
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
|
||||
|
||||
#define RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
|
||||
#define RX_BUF_ALIGN 64
|
||||
#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
|
||||
#define XGBE_RX_BUF_ALIGN 64
|
||||
|
||||
#define XGBE_MAX_DMA_CHANNELS 16
|
||||
#define DMA_ARDOMAIN_SETTING 0x2
|
||||
#define DMA_ARCACHE_SETTING 0xb
|
||||
#define DMA_AWDOMAIN_SETTING 0x2
|
||||
#define DMA_AWCACHE_SETTING 0x7
|
||||
#define DMA_INTERRUPT_MASK 0x31c7
|
||||
|
||||
/* DMA cache settings - Outer sharable, write-back, write-allocate */
|
||||
#define XGBE_DMA_ARDOMAIN 0x2
|
||||
#define XGBE_DMA_ARCACHE 0xb
|
||||
#define XGBE_DMA_AWDOMAIN 0x2
|
||||
#define XGBE_DMA_AWCACHE 0x7
|
||||
|
||||
#define XGBE_DMA_INTERRUPT_MASK 0x31c7
|
||||
|
||||
#define XGMAC_MIN_PACKET 60
|
||||
#define XGMAC_STD_PACKET_MTU 1500
|
||||
|
@ -151,10 +156,6 @@
|
|||
#define XGMAC_JUMBO_PACKET_MTU 9000
|
||||
#define XGMAC_MAX_JUMBO_PACKET 9018
|
||||
|
||||
#define MAX_MULTICAST_LIST 14
|
||||
#define TX_FLAGS_IP_PKT 0x00000001
|
||||
#define TX_FLAGS_TCP_PKT 0x00000002
|
||||
|
||||
/* MDIO bus phy name */
|
||||
#define XGBE_PHY_NAME "amd_xgbe_phy"
|
||||
#define XGBE_PRTAD 0
|
||||
|
@ -163,18 +164,18 @@
|
|||
#define XGMAC_DRIVER_CONTEXT 1
|
||||
#define XGMAC_IOCTL_CONTEXT 2
|
||||
|
||||
#define FIFO_SIZE_B(x) (x)
|
||||
#define FIFO_SIZE_KB(x) (x * 1024)
|
||||
#define XGBE_FIFO_SIZE_B(x) (x)
|
||||
#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
|
||||
|
||||
#define XGBE_TC_CNT 2
|
||||
|
||||
/* Helper macro for descriptor handling
|
||||
* Always use GET_DESC_DATA to access the descriptor data
|
||||
* Always use XGBE_GET_DESC_DATA to access the descriptor data
|
||||
* since the index is free-running and needs to be and-ed
|
||||
* with the descriptor count value of the ring to index to
|
||||
* the proper descriptor data.
|
||||
*/
|
||||
#define GET_DESC_DATA(_ring, _idx) \
|
||||
#define XGBE_GET_DESC_DATA(_ring, _idx) \
|
||||
((_ring)->rdata + \
|
||||
((_idx) & ((_ring)->rdesc_count - 1)))
|
||||
|
||||
|
@ -190,6 +191,8 @@
|
|||
/* Flow control queue count */
|
||||
#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
|
||||
|
||||
/* Maximum MAC address hash table size (256 bits = 8 bytes) */
|
||||
#define XGBE_MAC_HASH_TABLE_SIZE 8
|
||||
|
||||
struct xgbe_prv_data;
|
||||
|
||||
|
@ -219,7 +222,7 @@ struct xgbe_ring_desc {
|
|||
|
||||
/* Structure used to hold information related to the descriptor
|
||||
* and the packet associated with the descriptor (always use
|
||||
* use the GET_DESC_DATA macro to access this data from the ring)
|
||||
* use the XGBE_GET_DESC_DATA macro to access this data from the ring)
|
||||
*/
|
||||
struct xgbe_ring_data {
|
||||
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
|
||||
|
@ -250,7 +253,7 @@ struct xgbe_ring {
|
|||
unsigned int rdesc_count;
|
||||
|
||||
/* Array of descriptor data corresponding the descriptor memory
|
||||
* (always use the GET_DESC_DATA macro to access this data)
|
||||
* (always use the XGBE_GET_DESC_DATA macro to access this data)
|
||||
*/
|
||||
struct xgbe_ring_data *rdata;
|
||||
|
||||
|
@ -386,7 +389,7 @@ struct xgbe_hw_if {
|
|||
|
||||
int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
|
||||
int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
|
||||
int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
|
||||
int (*add_mac_addresses)(struct xgbe_prv_data *);
|
||||
int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
|
||||
|
||||
int (*enable_rx_csum)(struct xgbe_prv_data *);
|
||||
|
@ -394,6 +397,9 @@ struct xgbe_hw_if {
|
|||
|
||||
int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
|
||||
int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
|
||||
int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *);
|
||||
int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *);
|
||||
int (*update_vlan_hash_table)(struct xgbe_prv_data *);
|
||||
|
||||
int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
|
||||
void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
|
||||
|
@ -589,6 +595,9 @@ struct xgbe_prv_data {
|
|||
struct napi_struct napi;
|
||||
struct xgbe_mmc_stats mmc_stats;
|
||||
|
||||
/* Filtering support */
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
|
||||
/* System clock value used for Rx watchdog */
|
||||
struct clk *sysclock;
|
||||
|
||||
|
|
Loading…
Reference in New Issue