net-next: Add multiqueue support to vmxnet3 driver
Add multiqueue support to vmxnet3 driver This change adds multiqueue and thus receive side scaling support to vmxnet3 device driver. Number of rx queues is limited to 1 in cases where MSI is not configured or one MSIx vector is not available per rx queue Signed-off-by: Shreyas Bhatewara <sbhatewara@vmware.com> Reviewed-by: Bhavesh Davda <bhavesh@vmware.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
24912420e9
commit
09c5088e5c
File diff suppressed because it is too large
Load Diff
|
@ -151,44 +151,42 @@ vmxnet3_get_stats(struct net_device *netdev)
|
|||
struct UPT1_TxStats *devTxStats;
|
||||
struct UPT1_RxStats *devRxStats;
|
||||
struct net_device_stats *net_stats = &netdev->stats;
|
||||
int i;
|
||||
|
||||
adapter = netdev_priv(netdev);
|
||||
|
||||
/* Collect the dev stats into the shared area */
|
||||
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
|
||||
|
||||
/* Assuming that we have a single queue device */
|
||||
devTxStats = &adapter->tqd_start->stats;
|
||||
devRxStats = &adapter->rqd_start->stats;
|
||||
|
||||
/* Get access to the driver stats per queue */
|
||||
drvTxStats = &adapter->tx_queue.stats;
|
||||
drvRxStats = &adapter->rx_queue.stats;
|
||||
|
||||
memset(net_stats, 0, sizeof(*net_stats));
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
devTxStats = &adapter->tqd_start[i].stats;
|
||||
drvTxStats = &adapter->tx_queue[i].stats;
|
||||
net_stats->tx_packets += devTxStats->ucastPktsTxOK +
|
||||
devTxStats->mcastPktsTxOK +
|
||||
devTxStats->bcastPktsTxOK;
|
||||
net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
|
||||
devTxStats->mcastBytesTxOK +
|
||||
devTxStats->bcastBytesTxOK;
|
||||
net_stats->tx_errors += devTxStats->pktsTxError;
|
||||
net_stats->tx_dropped += drvTxStats->drop_total;
|
||||
}
|
||||
|
||||
net_stats->rx_packets = devRxStats->ucastPktsRxOK +
|
||||
devRxStats->mcastPktsRxOK +
|
||||
devRxStats->bcastPktsRxOK;
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
devRxStats = &adapter->rqd_start[i].stats;
|
||||
drvRxStats = &adapter->rx_queue[i].stats;
|
||||
net_stats->rx_packets += devRxStats->ucastPktsRxOK +
|
||||
devRxStats->mcastPktsRxOK +
|
||||
devRxStats->bcastPktsRxOK;
|
||||
|
||||
net_stats->tx_packets = devTxStats->ucastPktsTxOK +
|
||||
devTxStats->mcastPktsTxOK +
|
||||
devTxStats->bcastPktsTxOK;
|
||||
|
||||
net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
|
||||
devRxStats->mcastBytesRxOK +
|
||||
devRxStats->bcastBytesRxOK;
|
||||
|
||||
net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
|
||||
devTxStats->mcastBytesTxOK +
|
||||
devTxStats->bcastBytesTxOK;
|
||||
|
||||
net_stats->rx_errors = devRxStats->pktsRxError;
|
||||
net_stats->tx_errors = devTxStats->pktsTxError;
|
||||
net_stats->rx_dropped = drvRxStats->drop_total;
|
||||
net_stats->tx_dropped = drvTxStats->drop_total;
|
||||
net_stats->multicast = devRxStats->mcastPktsRxOK;
|
||||
net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
|
||||
devRxStats->mcastBytesRxOK +
|
||||
devRxStats->bcastBytesRxOK;
|
||||
|
||||
net_stats->rx_errors += devRxStats->pktsRxError;
|
||||
net_stats->rx_dropped += drvRxStats->drop_total;
|
||||
net_stats->multicast += devRxStats->mcastPktsRxOK;
|
||||
}
|
||||
return net_stats;
|
||||
}
|
||||
|
||||
|
@ -307,24 +305,26 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
|
|||
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
||||
u8 *base;
|
||||
int i;
|
||||
int j = 0;
|
||||
|
||||
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
|
||||
|
||||
/* this does assume each counter is 64-bit wide */
|
||||
/* TODO change this for multiple queues */
|
||||
|
||||
base = (u8 *)&adapter->tqd_start->stats;
|
||||
base = (u8 *)&adapter->tqd_start[j].stats;
|
||||
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
|
||||
*buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
|
||||
|
||||
base = (u8 *)&adapter->tx_queue.stats;
|
||||
base = (u8 *)&adapter->tx_queue[j].stats;
|
||||
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
|
||||
*buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
|
||||
|
||||
base = (u8 *)&adapter->rqd_start->stats;
|
||||
base = (u8 *)&adapter->rqd_start[j].stats;
|
||||
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
|
||||
*buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
|
||||
|
||||
base = (u8 *)&adapter->rx_queue.stats;
|
||||
base = (u8 *)&adapter->rx_queue[j].stats;
|
||||
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
|
||||
*buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
|
||||
|
||||
|
@ -339,6 +339,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
|
|||
{
|
||||
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
||||
u32 *buf = p;
|
||||
int i = 0;
|
||||
|
||||
memset(p, 0, vmxnet3_get_regs_len(netdev));
|
||||
|
||||
|
@ -347,28 +348,29 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
|
|||
/* Update vmxnet3_get_regs_len if we want to dump more registers */
|
||||
|
||||
/* make each ring use multiple of 16 bytes */
|
||||
buf[0] = adapter->tx_queue.tx_ring.next2fill;
|
||||
buf[1] = adapter->tx_queue.tx_ring.next2comp;
|
||||
buf[2] = adapter->tx_queue.tx_ring.gen;
|
||||
/* TODO change this for multiple queues */
|
||||
buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
|
||||
buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
|
||||
buf[2] = adapter->tx_queue[i].tx_ring.gen;
|
||||
buf[3] = 0;
|
||||
|
||||
buf[4] = adapter->tx_queue.comp_ring.next2proc;
|
||||
buf[5] = adapter->tx_queue.comp_ring.gen;
|
||||
buf[6] = adapter->tx_queue.stopped;
|
||||
buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
|
||||
buf[5] = adapter->tx_queue[i].comp_ring.gen;
|
||||
buf[6] = adapter->tx_queue[i].stopped;
|
||||
buf[7] = 0;
|
||||
|
||||
buf[8] = adapter->rx_queue.rx_ring[0].next2fill;
|
||||
buf[9] = adapter->rx_queue.rx_ring[0].next2comp;
|
||||
buf[10] = adapter->rx_queue.rx_ring[0].gen;
|
||||
buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
|
||||
buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
|
||||
buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
|
||||
buf[11] = 0;
|
||||
|
||||
buf[12] = adapter->rx_queue.rx_ring[1].next2fill;
|
||||
buf[13] = adapter->rx_queue.rx_ring[1].next2comp;
|
||||
buf[14] = adapter->rx_queue.rx_ring[1].gen;
|
||||
buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
|
||||
buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
|
||||
buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
|
||||
buf[15] = 0;
|
||||
|
||||
buf[16] = adapter->rx_queue.comp_ring.next2proc;
|
||||
buf[17] = adapter->rx_queue.comp_ring.gen;
|
||||
buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
|
||||
buf[17] = adapter->rx_queue[i].comp_ring.gen;
|
||||
buf[18] = 0;
|
||||
buf[19] = 0;
|
||||
}
|
||||
|
@ -435,8 +437,10 @@ vmxnet3_get_ringparam(struct net_device *netdev,
|
|||
param->rx_mini_max_pending = 0;
|
||||
param->rx_jumbo_max_pending = 0;
|
||||
|
||||
param->rx_pending = adapter->rx_queue.rx_ring[0].size;
|
||||
param->tx_pending = adapter->tx_queue.tx_ring.size;
|
||||
param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
|
||||
adapter->num_rx_queues;
|
||||
param->tx_pending = adapter->tx_queue[0].tx_ring.size *
|
||||
adapter->num_tx_queues;
|
||||
param->rx_mini_pending = 0;
|
||||
param->rx_jumbo_pending = 0;
|
||||
}
|
||||
|
@ -480,8 +484,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
|
|||
sz) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_tx_ring_size == adapter->tx_queue.tx_ring.size &&
|
||||
new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) {
|
||||
if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
|
||||
new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -498,11 +502,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
|
|||
|
||||
/* recreate the rx queue and the tx queue based on the
|
||||
* new sizes */
|
||||
vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
|
||||
vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
|
||||
vmxnet3_tq_destroy_all(adapter);
|
||||
vmxnet3_rq_destroy_all(adapter);
|
||||
|
||||
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
|
||||
new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
|
||||
|
||||
if (err) {
|
||||
/* failed, most likely because of OOM, try default
|
||||
* size */
|
||||
|
@ -535,6 +540,65 @@ out:
|
|||
}
|
||||
|
||||
|
||||
static int
|
||||
vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
|
||||
void *rules)
|
||||
{
|
||||
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
||||
switch (info->cmd) {
|
||||
case ETHTOOL_GRXRINGS:
|
||||
info->data = adapter->num_rx_queues;
|
||||
return 0;
|
||||
}
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
vmxnet3_get_rss_indir(struct net_device *netdev,
|
||||
struct ethtool_rxfh_indir *p)
|
||||
{
|
||||
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
||||
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
|
||||
unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
|
||||
|
||||
p->size = rssConf->indTableSize;
|
||||
while (n--)
|
||||
p->ring_index[n] = rssConf->indTable[n];
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static int
|
||||
vmxnet3_set_rss_indir(struct net_device *netdev,
|
||||
const struct ethtool_rxfh_indir *p)
|
||||
{
|
||||
unsigned int i;
|
||||
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
||||
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
|
||||
|
||||
if (p->size != rssConf->indTableSize)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < rssConf->indTableSize; i++) {
|
||||
/*
|
||||
* Return with error code if any of the queue indices
|
||||
* is out of range
|
||||
*/
|
||||
if (p->ring_index[i] < 0 ||
|
||||
p->ring_index[i] >= adapter->num_rx_queues)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < rssConf->indTableSize; i++)
|
||||
rssConf->indTable[i] = p->ring_index[i];
|
||||
|
||||
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
||||
VMXNET3_CMD_UPDATE_RSSIDT);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static struct ethtool_ops vmxnet3_ethtool_ops = {
|
||||
.get_settings = vmxnet3_get_settings,
|
||||
.get_drvinfo = vmxnet3_get_drvinfo,
|
||||
|
@ -558,6 +622,9 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
|
|||
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
|
||||
.get_ringparam = vmxnet3_get_ringparam,
|
||||
.set_ringparam = vmxnet3_set_ringparam,
|
||||
.get_rxnfc = vmxnet3_get_rxnfc,
|
||||
.get_rxfh_indir = vmxnet3_get_rss_indir,
|
||||
.set_rxfh_indir = vmxnet3_set_rss_indir,
|
||||
};
|
||||
|
||||
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
|
||||
|
|
|
@ -68,11 +68,15 @@
|
|||
/*
|
||||
* Version numbers
|
||||
*/
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k"
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k"
|
||||
|
||||
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01000E00
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01001000
|
||||
|
||||
#if defined(CONFIG_PCI_MSI)
|
||||
/* RSS only makes sense if MSI-X is supported. */
|
||||
#define VMXNET3_RSS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Capabilities
|
||||
|
@ -218,16 +222,19 @@ struct vmxnet3_tx_ctx {
|
|||
};
|
||||
|
||||
struct vmxnet3_tx_queue {
|
||||
char name[IFNAMSIZ+8]; /* To identify interrupt */
|
||||
struct vmxnet3_adapter *adapter;
|
||||
spinlock_t tx_lock;
|
||||
struct vmxnet3_cmd_ring tx_ring;
|
||||
struct vmxnet3_tx_buf_info *buf_info;
|
||||
struct vmxnet3_tx_buf_info *buf_info;
|
||||
struct vmxnet3_tx_data_ring data_ring;
|
||||
struct vmxnet3_comp_ring comp_ring;
|
||||
struct Vmxnet3_TxQueueCtrl *shared;
|
||||
struct Vmxnet3_TxQueueCtrl *shared;
|
||||
struct vmxnet3_tq_driver_stats stats;
|
||||
bool stopped;
|
||||
int num_stop; /* # of times the queue is
|
||||
* stopped */
|
||||
int qid;
|
||||
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
|
||||
|
||||
enum vmxnet3_rx_buf_type {
|
||||
|
@ -259,6 +266,9 @@ struct vmxnet3_rq_driver_stats {
|
|||
};
|
||||
|
||||
struct vmxnet3_rx_queue {
|
||||
char name[IFNAMSIZ + 8]; /* To identify interrupt */
|
||||
struct vmxnet3_adapter *adapter;
|
||||
struct napi_struct napi;
|
||||
struct vmxnet3_cmd_ring rx_ring[2];
|
||||
struct vmxnet3_comp_ring comp_ring;
|
||||
struct vmxnet3_rx_ctx rx_ctx;
|
||||
|
@ -271,7 +281,16 @@ struct vmxnet3_rx_queue {
|
|||
struct vmxnet3_rq_driver_stats stats;
|
||||
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
|
||||
|
||||
#define VMXNET3_LINUX_MAX_MSIX_VECT 1
|
||||
#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
|
||||
#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
|
||||
|
||||
/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
|
||||
#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
|
||||
|
||||
#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
|
||||
VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
|
||||
#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */
|
||||
|
||||
|
||||
struct vmxnet3_intr {
|
||||
enum vmxnet3_intr_mask_mode mask_mode;
|
||||
|
@ -279,27 +298,32 @@ struct vmxnet3_intr {
|
|||
u8 num_intrs; /* # of intr vectors */
|
||||
u8 event_intr_idx; /* idx of the intr vector for event */
|
||||
u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
|
||||
char event_msi_vector_name[IFNAMSIZ+11];
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Interrupt sharing schemes, share_intr */
|
||||
#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
|
||||
#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
|
||||
#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
|
||||
|
||||
|
||||
#define VMXNET3_STATE_BIT_RESETTING 0
|
||||
#define VMXNET3_STATE_BIT_QUIESCED 1
|
||||
struct vmxnet3_adapter {
|
||||
struct vmxnet3_tx_queue tx_queue;
|
||||
struct vmxnet3_rx_queue rx_queue;
|
||||
struct napi_struct napi;
|
||||
struct vlan_group *vlan_grp;
|
||||
|
||||
struct vmxnet3_intr intr;
|
||||
|
||||
struct Vmxnet3_DriverShared *shared;
|
||||
struct Vmxnet3_PMConf *pm_conf;
|
||||
struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */
|
||||
struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */
|
||||
struct net_device *netdev;
|
||||
struct pci_dev *pdev;
|
||||
struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
|
||||
struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
|
||||
struct vlan_group *vlan_grp;
|
||||
struct vmxnet3_intr intr;
|
||||
struct Vmxnet3_DriverShared *shared;
|
||||
struct Vmxnet3_PMConf *pm_conf;
|
||||
struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
|
||||
struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
|
||||
struct net_device *netdev;
|
||||
struct net_device_stats net_stats;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
u8 __iomem *hw_addr0; /* for BAR 0 */
|
||||
u8 __iomem *hw_addr1; /* for BAR 1 */
|
||||
|
@ -308,6 +332,12 @@ struct vmxnet3_adapter {
|
|||
bool rxcsum;
|
||||
bool lro;
|
||||
bool jumbo_frame;
|
||||
#ifdef VMXNET3_RSS
|
||||
struct UPT1_RSSConf *rss_conf;
|
||||
bool rss;
|
||||
#endif
|
||||
u32 num_rx_queues;
|
||||
u32 num_tx_queues;
|
||||
|
||||
/* rx buffer related */
|
||||
unsigned skb_buf_size;
|
||||
|
@ -327,6 +357,7 @@ struct vmxnet3_adapter {
|
|||
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
|
||||
|
||||
int dev_number;
|
||||
int share_intr;
|
||||
};
|
||||
|
||||
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
|
||||
|
@ -366,12 +397,10 @@ void
|
|||
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
|
||||
|
||||
void
|
||||
vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
|
||||
struct vmxnet3_adapter *adapter);
|
||||
vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
|
||||
|
||||
void
|
||||
vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
|
||||
struct vmxnet3_adapter *adapter);
|
||||
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
|
||||
|
||||
int
|
||||
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
|
||||
|
|
Loading…
Reference in New Issue