Merge branch 'nfp-xdp_adjust_head'
Jakub Kicinski says: ==================== nfp: XDP adjust head support This series adds support for XDP adjust head. Bulk of the code is actually just paying technical debt. On reconfiguration request nfp was allocating new resources separately leaving device running with the existing set of rings. We used to manage the new resources in special ring set structures. This set is simply separating the datapath part of the device structure from the control information allowing the new datapath structure to be allocated with all new memory and rings. The swap operation is now greatly simplified. We also save a lot of parameter passing this way. Hopefully the churn is worth the negative diffstat. Support for XDP adjust head is done in a pretty standard way. NFP is a bit special because it prepends metadata before packet data so we have to do a bit of memcpying in case XDP will run. We also luck out a little bit because the fact that we already have prepend space allocated means that one byte is enough to store the extra XDP space (256 of standard prepend space is a bit inconvenient since it would normally require 16bits or boolean with additional shifts). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d152309941
|
@ -50,14 +50,14 @@
|
|||
|
||||
#include "nfp_net_ctrl.h"
|
||||
|
||||
#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args)
|
||||
#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args)
|
||||
#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args)
|
||||
#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args)
|
||||
#define nn_warn_ratelimit(nn, fmt, args...) \
|
||||
#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args)
|
||||
#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
|
||||
#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
|
||||
#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args)
|
||||
#define nn_dp_warn(dp, fmt, args...) \
|
||||
do { \
|
||||
if (unlikely(net_ratelimit())) \
|
||||
netdev_warn((nn)->netdev, fmt, ## args); \
|
||||
netdev_warn((dp)->netdev, fmt, ## args); \
|
||||
} while (0)
|
||||
|
||||
/* Max time to wait for NFP to respond on updates (in seconds) */
|
||||
|
@ -316,8 +316,6 @@ struct nfp_net_rx_buf {
|
|||
* @rxds: Virtual address of FL/RX ring in host memory
|
||||
* @dma: DMA address of the FL/RX ring
|
||||
* @size: Size, in bytes, of the FL/RX ring (needed to free)
|
||||
* @bufsz: Buffer allocation size for convenience of management routines
|
||||
* (NOTE: this is in second cache line, do not use on fast path!)
|
||||
*/
|
||||
struct nfp_net_rx_ring {
|
||||
struct nfp_net_r_vector *r_vec;
|
||||
|
@ -339,7 +337,6 @@ struct nfp_net_rx_ring {
|
|||
|
||||
dma_addr_t dma;
|
||||
unsigned int size;
|
||||
unsigned int bufsz;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/**
|
||||
|
@ -434,18 +431,73 @@ struct nfp_stat_pair {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct nfp_net - NFP network device structure
|
||||
* struct nfp_net_dp - NFP network device datapath data structure
|
||||
* @dev: Backpointer to struct device
|
||||
* @netdev: Backpointer to net_device structure
|
||||
* @is_vf: Is the driver attached to a VF?
|
||||
* @netdev: Backpointer to net_device structure
|
||||
* @is_vf: Is the driver attached to a VF?
|
||||
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
|
||||
* @bpf_offload_xdp: Offloaded BPF program is XDP
|
||||
* @chained_metadata_format: Firemware will use new metadata format
|
||||
* @ctrl: Local copy of the control register/word.
|
||||
* @fl_bufsz: Currently configured size of the freelist buffers
|
||||
* @rx_dma_dir: Mapping direction for RX buffers
|
||||
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
|
||||
* @rx_offset: Offset in the RX buffers where packet data starts
|
||||
* @ctrl: Local copy of the control register/word.
|
||||
* @fl_bufsz: Currently configured size of the freelist buffers
|
||||
* @xdp_prog: Installed XDP program
|
||||
* @fw_ver: Firmware version
|
||||
* @tx_rings: Array of pre-allocated TX ring structures
|
||||
* @rx_rings: Array of pre-allocated RX ring structures
|
||||
* @ctrl_bar: Pointer to mapped control BAR
|
||||
*
|
||||
* @txd_cnt: Size of the TX ring in number of descriptors
|
||||
* @rxd_cnt: Size of the RX ring in number of descriptors
|
||||
* @num_r_vecs: Number of used ring vectors
|
||||
* @num_tx_rings: Currently configured number of TX rings
|
||||
* @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
|
||||
* @num_rx_rings: Currently configured number of RX rings
|
||||
* @mtu: Device MTU
|
||||
*/
|
||||
struct nfp_net_dp {
|
||||
struct device *dev;
|
||||
struct net_device *netdev;
|
||||
|
||||
u8 is_vf:1;
|
||||
u8 bpf_offload_skip_sw:1;
|
||||
u8 bpf_offload_xdp:1;
|
||||
u8 chained_metadata_format:1;
|
||||
|
||||
u8 rx_dma_dir;
|
||||
u8 rx_dma_off;
|
||||
|
||||
u8 rx_offset;
|
||||
|
||||
u32 ctrl;
|
||||
u32 fl_bufsz;
|
||||
|
||||
struct bpf_prog *xdp_prog;
|
||||
|
||||
struct nfp_net_tx_ring *tx_rings;
|
||||
struct nfp_net_rx_ring *rx_rings;
|
||||
|
||||
u8 __iomem *ctrl_bar;
|
||||
|
||||
/* Cold data follows */
|
||||
|
||||
unsigned int txd_cnt;
|
||||
unsigned int rxd_cnt;
|
||||
|
||||
unsigned int num_r_vecs;
|
||||
|
||||
unsigned int num_tx_rings;
|
||||
unsigned int num_stack_tx_rings;
|
||||
unsigned int num_rx_rings;
|
||||
|
||||
unsigned int mtu;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nfp_net - NFP network device structure
|
||||
* @dp: Datapath structure
|
||||
* @fw_ver: Firmware version
|
||||
* @cap: Capabilities advertised by the Firmware
|
||||
* @max_mtu: Maximum support MTU advertised by the Firmware
|
||||
* @rss_hfunc: RSS selected hash function
|
||||
|
@ -457,17 +509,9 @@ struct nfp_stat_pair {
|
|||
* @rx_filter_change: Jiffies when statistics last changed
|
||||
* @rx_filter_stats_timer: Timer for polling filter offload statistics
|
||||
* @rx_filter_lock: Lock protecting timer state changes (teardown)
|
||||
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
|
||||
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
|
||||
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
|
||||
* @num_tx_rings: Currently configured number of TX rings
|
||||
* @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
|
||||
* @num_rx_rings: Currently configured number of RX rings
|
||||
* @txd_cnt: Size of the TX ring in number of descriptors
|
||||
* @rxd_cnt: Size of the RX ring in number of descriptors
|
||||
* @tx_rings: Array of pre-allocated TX ring structures
|
||||
* @rx_rings: Array of pre-allocated RX ring structures
|
||||
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
|
||||
* @num_r_vecs: Number of used ring vectors
|
||||
* @r_vecs: Pre-allocated array of ring vectors
|
||||
* @irq_entries: Pre-allocated array of MSI-X entries
|
||||
* @lsc_handler: Handler for Link State Change interrupt
|
||||
|
@ -491,7 +535,6 @@ struct nfp_stat_pair {
|
|||
* @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW
|
||||
* @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts
|
||||
* @qcp_cfg: Pointer to QCP queue used for configuration notification
|
||||
* @ctrl_bar: Pointer to mapped control BAR
|
||||
* @tx_bar: Pointer to mapped TX queues
|
||||
* @rx_bar: Pointer to mapped FL/RX queues
|
||||
* @debugfs_dir: Device directory in debugfs
|
||||
|
@ -502,25 +545,10 @@ struct nfp_stat_pair {
|
|||
* @eth_port: Translated ETH Table port entry
|
||||
*/
|
||||
struct nfp_net {
|
||||
struct device *dev;
|
||||
struct net_device *netdev;
|
||||
|
||||
unsigned is_vf:1;
|
||||
unsigned bpf_offload_skip_sw:1;
|
||||
unsigned bpf_offload_xdp:1;
|
||||
unsigned chained_metadata_format:1;
|
||||
|
||||
u32 ctrl;
|
||||
u32 fl_bufsz;
|
||||
|
||||
u32 rx_offset;
|
||||
|
||||
struct bpf_prog *xdp_prog;
|
||||
|
||||
struct nfp_net_tx_ring *tx_rings;
|
||||
struct nfp_net_rx_ring *rx_rings;
|
||||
struct nfp_net_dp dp;
|
||||
|
||||
struct nfp_net_fw_version fw_ver;
|
||||
|
||||
u32 cap;
|
||||
u32 max_mtu;
|
||||
|
||||
|
@ -537,18 +565,10 @@ struct nfp_net {
|
|||
unsigned int max_tx_rings;
|
||||
unsigned int max_rx_rings;
|
||||
|
||||
unsigned int num_tx_rings;
|
||||
unsigned int num_stack_tx_rings;
|
||||
unsigned int num_rx_rings;
|
||||
|
||||
int stride_tx;
|
||||
int stride_rx;
|
||||
|
||||
int txd_cnt;
|
||||
int rxd_cnt;
|
||||
|
||||
unsigned int max_r_vecs;
|
||||
unsigned int num_r_vecs;
|
||||
struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
|
||||
struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
|
||||
|
||||
|
@ -582,7 +602,6 @@ struct nfp_net {
|
|||
|
||||
u8 __iomem *qcp_cfg;
|
||||
|
||||
u8 __iomem *ctrl_bar;
|
||||
u8 __iomem *tx_bar;
|
||||
u8 __iomem *rx_bar;
|
||||
|
||||
|
@ -597,54 +616,47 @@ struct nfp_net {
|
|||
struct nfp_eth_table_port *eth_port;
|
||||
};
|
||||
|
||||
struct nfp_net_ring_set {
|
||||
unsigned int n_rings;
|
||||
unsigned int mtu;
|
||||
unsigned int dcnt;
|
||||
void *rings;
|
||||
};
|
||||
|
||||
/* Functions to read/write from/to a BAR
|
||||
* Performs any endian conversion necessary.
|
||||
*/
|
||||
static inline u16 nn_readb(struct nfp_net *nn, int off)
|
||||
{
|
||||
return readb(nn->ctrl_bar + off);
|
||||
return readb(nn->dp.ctrl_bar + off);
|
||||
}
|
||||
|
||||
static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
|
||||
{
|
||||
writeb(val, nn->ctrl_bar + off);
|
||||
writeb(val, nn->dp.ctrl_bar + off);
|
||||
}
|
||||
|
||||
static inline u16 nn_readw(struct nfp_net *nn, int off)
|
||||
{
|
||||
return readw(nn->ctrl_bar + off);
|
||||
return readw(nn->dp.ctrl_bar + off);
|
||||
}
|
||||
|
||||
static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
|
||||
{
|
||||
writew(val, nn->ctrl_bar + off);
|
||||
writew(val, nn->dp.ctrl_bar + off);
|
||||
}
|
||||
|
||||
static inline u32 nn_readl(struct nfp_net *nn, int off)
|
||||
{
|
||||
return readl(nn->ctrl_bar + off);
|
||||
return readl(nn->dp.ctrl_bar + off);
|
||||
}
|
||||
|
||||
static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
|
||||
{
|
||||
writel(val, nn->ctrl_bar + off);
|
||||
writel(val, nn->dp.ctrl_bar + off);
|
||||
}
|
||||
|
||||
static inline u64 nn_readq(struct nfp_net *nn, int off)
|
||||
{
|
||||
return readq(nn->ctrl_bar + off);
|
||||
return readq(nn->dp.ctrl_bar + off);
|
||||
}
|
||||
|
||||
static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
|
||||
{
|
||||
writeq(val, nn->ctrl_bar + off);
|
||||
writeq(val, nn->dp.ctrl_bar + off);
|
||||
}
|
||||
|
||||
/* Flush posted PCI writes by reading something without side effects */
|
||||
|
@ -798,9 +810,9 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
|
|||
void
|
||||
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
|
||||
unsigned int n);
|
||||
int
|
||||
nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
|
||||
struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
|
||||
|
||||
struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
|
||||
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new);
|
||||
|
||||
#ifdef CONFIG_NFP_DEBUG
|
||||
void nfp_net_debugfs_create(void);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -54,7 +54,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
|
|||
goto out;
|
||||
nn = r_vec->nfp_net;
|
||||
rx_ring = r_vec->rx_ring;
|
||||
if (!netif_running(nn->netdev))
|
||||
if (!netif_running(nn->dp.netdev))
|
||||
goto out;
|
||||
|
||||
rxd_cnt = rx_ring->cnt;
|
||||
|
@ -145,7 +145,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
|
|||
if (!r_vec->nfp_net || !tx_ring)
|
||||
goto out;
|
||||
nn = r_vec->nfp_net;
|
||||
if (!netif_running(nn->netdev))
|
||||
if (!netif_running(nn->dp.netdev))
|
||||
goto out;
|
||||
|
||||
txd_cnt = tx_ring->cnt;
|
||||
|
|
|
@ -127,9 +127,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
|
|||
};
|
||||
|
||||
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
|
||||
#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
|
||||
#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3)
|
||||
#define NN_ET_RVEC_GATHER_STATS 7
|
||||
#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
|
||||
#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2)
|
||||
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
|
||||
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
|
||||
|
||||
|
@ -180,30 +180,22 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
|
|||
|
||||
ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
|
||||
ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
|
||||
ring->rx_pending = nn->rxd_cnt;
|
||||
ring->tx_pending = nn->txd_cnt;
|
||||
ring->rx_pending = nn->dp.rxd_cnt;
|
||||
ring->tx_pending = nn->dp.txd_cnt;
|
||||
}
|
||||
|
||||
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
|
||||
{
|
||||
struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
|
||||
struct nfp_net_ring_set rx = {
|
||||
.n_rings = nn->num_rx_rings,
|
||||
.mtu = nn->netdev->mtu,
|
||||
.dcnt = rxd_cnt,
|
||||
};
|
||||
struct nfp_net_ring_set tx = {
|
||||
.n_rings = nn->num_tx_rings,
|
||||
.dcnt = txd_cnt,
|
||||
};
|
||||
struct nfp_net_dp *dp;
|
||||
|
||||
if (nn->rxd_cnt != rxd_cnt)
|
||||
reconfig_rx = ℞
|
||||
if (nn->txd_cnt != txd_cnt)
|
||||
reconfig_tx = &tx;
|
||||
dp = nfp_net_clone_dp(nn);
|
||||
if (!dp)
|
||||
return -ENOMEM;
|
||||
|
||||
return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
|
||||
reconfig_rx, reconfig_tx);
|
||||
dp->rxd_cnt = rxd_cnt;
|
||||
dp->txd_cnt = txd_cnt;
|
||||
|
||||
return nfp_net_ring_reconfig(nn, dp);
|
||||
}
|
||||
|
||||
static int nfp_net_set_ringparam(struct net_device *netdev,
|
||||
|
@ -224,11 +216,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
|
|||
txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
|
||||
return -EINVAL;
|
||||
|
||||
if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
|
||||
if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
|
||||
return 0;
|
||||
|
||||
nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
|
||||
nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
|
||||
nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
|
||||
|
||||
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
|
||||
}
|
||||
|
@ -246,7 +238,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
|
|||
memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < nn->num_r_vecs; i++) {
|
||||
for (i = 0; i < nn->dp.num_r_vecs; i++) {
|
||||
sprintf(p, "rvec_%u_rx_pkts", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "rvec_%u_tx_pkts", i);
|
||||
|
@ -268,13 +260,13 @@ static void nfp_net_get_strings(struct net_device *netdev,
|
|||
p += ETH_GSTRING_LEN;
|
||||
strncpy(p, "tx_lso", ETH_GSTRING_LEN);
|
||||
p += ETH_GSTRING_LEN;
|
||||
for (i = 0; i < nn->num_tx_rings; i++) {
|
||||
for (i = 0; i < nn->dp.num_tx_rings; i++) {
|
||||
sprintf(p, "txq_%u_pkts", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "txq_%u_bytes", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < nn->num_rx_rings; i++) {
|
||||
for (i = 0; i < nn->dp.num_rx_rings; i++) {
|
||||
sprintf(p, "rxq_%u_pkts", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "rxq_%u_bytes", i);
|
||||
|
@ -307,12 +299,12 @@ static void nfp_net_get_stats(struct net_device *netdev,
|
|||
break;
|
||||
|
||||
case NFP_NET_DEV_ET_STATS:
|
||||
io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
|
||||
io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off;
|
||||
data[i] = readq(io_p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (j = 0; j < nn->num_r_vecs; j++) {
|
||||
for (j = 0; j < nn->dp.num_r_vecs; j++) {
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
|
@ -338,16 +330,16 @@ static void nfp_net_get_stats(struct net_device *netdev,
|
|||
}
|
||||
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
|
||||
data[i++] = gathered_stats[j];
|
||||
for (j = 0; j < nn->num_tx_rings; j++) {
|
||||
io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
|
||||
for (j = 0; j < nn->dp.num_tx_rings; j++) {
|
||||
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
|
||||
data[i++] = readq(io_p);
|
||||
io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
|
||||
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
|
||||
data[i++] = readq(io_p);
|
||||
}
|
||||
for (j = 0; j < nn->num_rx_rings; j++) {
|
||||
io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
|
||||
for (j = 0; j < nn->dp.num_rx_rings; j++) {
|
||||
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
|
||||
data[i++] = readq(io_p);
|
||||
io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
|
||||
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
|
||||
data[i++] = readq(io_p);
|
||||
}
|
||||
}
|
||||
|
@ -411,7 +403,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
|
|||
|
||||
switch (cmd->cmd) {
|
||||
case ETHTOOL_GRXRINGS:
|
||||
cmd->data = nn->num_rx_rings;
|
||||
cmd->data = nn->dp.num_rx_rings;
|
||||
return 0;
|
||||
case ETHTOOL_GRXFH:
|
||||
return nfp_net_get_rss_hash_opts(nn, cmd);
|
||||
|
@ -461,7 +453,7 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
|
|||
if (new_rss_cfg == nn->rss_cfg)
|
||||
return 0;
|
||||
|
||||
writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL);
|
||||
writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
|
||||
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -573,7 +565,7 @@ static void nfp_net_get_regs(struct net_device *netdev,
|
|||
regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
|
||||
|
||||
for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
|
||||
regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32)));
|
||||
regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
|
||||
}
|
||||
|
||||
static int nfp_net_get_coalesce(struct net_device *netdev,
|
||||
|
@ -745,16 +737,16 @@ static void nfp_net_get_channels(struct net_device *netdev,
|
|||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
unsigned int num_tx_rings;
|
||||
|
||||
num_tx_rings = nn->num_tx_rings;
|
||||
if (nn->xdp_prog)
|
||||
num_tx_rings -= nn->num_rx_rings;
|
||||
num_tx_rings = nn->dp.num_tx_rings;
|
||||
if (nn->dp.xdp_prog)
|
||||
num_tx_rings -= nn->dp.num_rx_rings;
|
||||
|
||||
channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
|
||||
channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
|
||||
channel->max_combined = min(channel->max_rx, channel->max_tx);
|
||||
channel->max_other = NFP_NET_NON_Q_VECTORS;
|
||||
channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
|
||||
channel->rx_count = nn->num_rx_rings - channel->combined_count;
|
||||
channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
|
||||
channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
|
||||
channel->tx_count = num_tx_rings - channel->combined_count;
|
||||
channel->other_count = NFP_NET_NON_Q_VECTORS;
|
||||
}
|
||||
|
@ -762,29 +754,19 @@ static void nfp_net_get_channels(struct net_device *netdev,
|
|||
static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
|
||||
unsigned int total_tx)
|
||||
{
|
||||
struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
|
||||
struct nfp_net_ring_set rx = {
|
||||
.n_rings = total_rx,
|
||||
.mtu = nn->netdev->mtu,
|
||||
.dcnt = nn->rxd_cnt,
|
||||
};
|
||||
struct nfp_net_ring_set tx = {
|
||||
.n_rings = total_tx,
|
||||
.dcnt = nn->txd_cnt,
|
||||
};
|
||||
struct nfp_net_dp *dp;
|
||||
|
||||
if (nn->num_rx_rings != total_rx)
|
||||
reconfig_rx = ℞
|
||||
if (nn->num_stack_tx_rings != total_tx ||
|
||||
(nn->xdp_prog && reconfig_rx))
|
||||
reconfig_tx = &tx;
|
||||
dp = nfp_net_clone_dp(nn);
|
||||
if (!dp)
|
||||
return -ENOMEM;
|
||||
|
||||
/* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
|
||||
if (nn->xdp_prog)
|
||||
tx.n_rings += total_rx;
|
||||
dp->num_rx_rings = total_rx;
|
||||
dp->num_tx_rings = total_tx;
|
||||
/* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
|
||||
if (dp->xdp_prog)
|
||||
dp->num_tx_rings += total_rx;
|
||||
|
||||
return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
|
||||
reconfig_rx, reconfig_tx);
|
||||
return nfp_net_ring_reconfig(nn, dp);
|
||||
}
|
||||
|
||||
static int nfp_net_set_channels(struct net_device *netdev,
|
||||
|
|
|
@ -130,7 +130,7 @@ err_area:
|
|||
}
|
||||
|
||||
static void
|
||||
nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
|
||||
nfp_net_get_mac_addr_hwinfo(struct nfp_net_dp *dp, struct nfp_cpp *cpp,
|
||||
unsigned int id)
|
||||
{
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
|
@ -141,22 +141,22 @@ nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
|
|||
|
||||
mac_str = nfp_hwinfo_lookup(cpp, name);
|
||||
if (!mac_str) {
|
||||
dev_warn(nn->dev, "Can't lookup MAC address. Generate\n");
|
||||
eth_hw_addr_random(nn->netdev);
|
||||
dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
|
||||
eth_hw_addr_random(dp->netdev);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
|
||||
&mac_addr[0], &mac_addr[1], &mac_addr[2],
|
||||
&mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
|
||||
dev_warn(nn->dev,
|
||||
dev_warn(dp->dev,
|
||||
"Can't parse MAC address (%s). Generate.\n", mac_str);
|
||||
eth_hw_addr_random(nn->netdev);
|
||||
eth_hw_addr_random(dp->netdev);
|
||||
return;
|
||||
}
|
||||
|
||||
ether_addr_copy(nn->netdev->dev_addr, mac_addr);
|
||||
ether_addr_copy(nn->netdev->perm_addr, mac_addr);
|
||||
ether_addr_copy(dp->netdev->dev_addr, mac_addr);
|
||||
ether_addr_copy(dp->netdev->perm_addr, mac_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -179,12 +179,12 @@ nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
|
|||
|
||||
nn->eth_port = &pf->eth_tbl->ports[i];
|
||||
|
||||
ether_addr_copy(nn->netdev->dev_addr, mac_addr);
|
||||
ether_addr_copy(nn->netdev->perm_addr, mac_addr);
|
||||
ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
|
||||
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
|
||||
nfp_net_get_mac_addr_hwinfo(&nn->dp, pf->cpp, id);
|
||||
}
|
||||
|
||||
static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
|
||||
|
@ -306,10 +306,10 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
|
|||
|
||||
nn->cpp = pf->cpp;
|
||||
nn->fw_ver = *fw_ver;
|
||||
nn->ctrl_bar = ctrl_bar;
|
||||
nn->dp.ctrl_bar = ctrl_bar;
|
||||
nn->tx_bar = tx_bar;
|
||||
nn->rx_bar = rx_bar;
|
||||
nn->is_vf = 0;
|
||||
nn->dp.is_vf = 0;
|
||||
nn->stride_rx = stride;
|
||||
nn->stride_tx = stride;
|
||||
|
||||
|
@ -331,7 +331,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
|
|||
*/
|
||||
nn->me_freq_mhz = 1200;
|
||||
|
||||
err = nfp_net_netdev_init(nn->netdev);
|
||||
err = nfp_net_netdev_init(nn->dp.netdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -400,7 +400,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
|
|||
/* Get MSI-X vectors */
|
||||
wanted_irqs = 0;
|
||||
list_for_each_entry(nn, &pf->ports, port_list)
|
||||
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
|
||||
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
|
||||
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
|
||||
GFP_KERNEL);
|
||||
if (!pf->irq_entries) {
|
||||
|
@ -445,7 +445,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
|
|||
err_prev_deinit:
|
||||
list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
|
||||
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
|
||||
nfp_net_netdev_clean(nn->netdev);
|
||||
nfp_net_netdev_clean(nn->dp.netdev);
|
||||
}
|
||||
nfp_net_irqs_disable(pf->pdev);
|
||||
err_vec_free:
|
||||
|
@ -571,7 +571,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
|
|||
list_for_each_entry(nn, &pf->ports, port_list) {
|
||||
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
|
||||
|
||||
nfp_net_netdev_clean(nn->netdev);
|
||||
nfp_net_netdev_clean(nn->dp.netdev);
|
||||
}
|
||||
|
||||
nfp_net_pf_free_netdevs(pf);
|
||||
|
|
|
@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data)
|
|||
|
||||
spin_lock_bh(&nn->rx_filter_lock);
|
||||
|
||||
if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
|
||||
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
|
||||
mod_timer(&nn->rx_filter_stats_timer,
|
||||
jiffies + NFP_NET_STAT_POLL_IVL);
|
||||
|
||||
|
@ -132,7 +132,7 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
|
|||
return NN_ACT_TC_DROP;
|
||||
|
||||
if (is_tcf_mirred_egress_redirect(a) &&
|
||||
tcf_mirred_ifindex(a) == nn->netdev->ifindex)
|
||||
tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
|
||||
return NN_ACT_TC_REDIR;
|
||||
}
|
||||
|
||||
|
@ -160,7 +160,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
|
|||
act = ret;
|
||||
|
||||
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
||||
if (max_mtu < nn->netdev->mtu) {
|
||||
if (max_mtu < nn->dp.netdev->mtu) {
|
||||
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
|
|||
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
|
||||
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
|
||||
|
||||
*code = dma_zalloc_coherent(nn->dev, code_sz, dma_addr, GFP_KERNEL);
|
||||
*code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
|
||||
if (!*code)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -180,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
|
|||
return 0;
|
||||
|
||||
out:
|
||||
dma_free_coherent(nn->dev, code_sz, *code, *dma_addr);
|
||||
dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
|
|||
u64 bpf_addr = dma_addr;
|
||||
int err;
|
||||
|
||||
nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
|
||||
nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
|
||||
|
||||
if (dense_mode)
|
||||
bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
|
||||
|
@ -207,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
|
|||
nn_err(nn, "FW command error while loading BPF: %d\n", err);
|
||||
|
||||
/* Enable passing packets through BPF function */
|
||||
nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
|
||||
nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
|
||||
nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
|
||||
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
|
||||
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
|
||||
if (err)
|
||||
nn_err(nn, "FW command error while enabling BPF: %d\n", err);
|
||||
|
||||
dma_free_coherent(nn->dev, code_sz, code, dma_addr);
|
||||
dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
|
||||
|
||||
nfp_net_bpf_stats_reset(nn);
|
||||
mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
|
||||
|
@ -221,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
|
|||
|
||||
static int nfp_net_bpf_stop(struct nfp_net *nn)
|
||||
{
|
||||
if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
|
||||
if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
|
||||
return 0;
|
||||
|
||||
spin_lock_bh(&nn->rx_filter_lock);
|
||||
nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
|
||||
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
|
||||
spin_unlock_bh(&nn->rx_filter_lock);
|
||||
nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
|
||||
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
|
||||
|
||||
del_timer_sync(&nn->rx_filter_stats_timer);
|
||||
nn->bpf_offload_skip_sw = 0;
|
||||
nn->dp.bpf_offload_skip_sw = 0;
|
||||
|
||||
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
|
|||
* frames which didn't have BPF applied in the hardware should
|
||||
* be fine if software fallback is available, though.
|
||||
*/
|
||||
if (nn->bpf_offload_skip_sw)
|
||||
if (nn->dp.bpf_offload_skip_sw)
|
||||
return -EBUSY;
|
||||
|
||||
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
|
||||
|
@ -269,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
|
|||
return 0;
|
||||
|
||||
case TC_CLSBPF_ADD:
|
||||
if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
|
||||
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
|
||||
return -EBUSY;
|
||||
|
||||
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
|
||||
|
|
|
@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
|
|||
put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
|
||||
|
||||
if (!is_valid_ether_addr(mac_addr)) {
|
||||
eth_hw_addr_random(nn->netdev);
|
||||
eth_hw_addr_random(nn->dp.netdev);
|
||||
return;
|
||||
}
|
||||
|
||||
ether_addr_copy(nn->netdev->dev_addr, mac_addr);
|
||||
ether_addr_copy(nn->netdev->perm_addr, mac_addr);
|
||||
ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
|
||||
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
|
||||
}
|
||||
|
||||
static int nfp_netvf_pci_probe(struct pci_dev *pdev,
|
||||
|
@ -210,8 +210,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
|
|||
vf->nn = nn;
|
||||
|
||||
nn->fw_ver = fw_ver;
|
||||
nn->ctrl_bar = ctrl_bar;
|
||||
nn->is_vf = 1;
|
||||
nn->dp.ctrl_bar = ctrl_bar;
|
||||
nn->dp.is_vf = 1;
|
||||
nn->stride_tx = stride;
|
||||
nn->stride_rx = stride;
|
||||
|
||||
|
@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
|
|||
|
||||
num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
|
||||
NFP_NET_MIN_PORT_IRQS,
|
||||
NFP_NET_NON_Q_VECTORS + nn->num_r_vecs);
|
||||
NFP_NET_NON_Q_VECTORS +
|
||||
nn->dp.num_r_vecs);
|
||||
if (!num_irqs) {
|
||||
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
|
||||
err = -EIO;
|
||||
|
@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
|
|||
*/
|
||||
nn->me_freq_mhz = 1200;
|
||||
|
||||
err = nfp_net_netdev_init(nn->netdev);
|
||||
err = nfp_net_netdev_init(nn->dp.netdev);
|
||||
if (err)
|
||||
goto err_irqs_disable;
|
||||
|
||||
|
@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
|
|||
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
|
||||
nfp_net_debugfs_dir_clean(&vf->ddir);
|
||||
|
||||
nfp_net_netdev_clean(nn->netdev);
|
||||
nfp_net_netdev_clean(nn->dp.netdev);
|
||||
|
||||
nfp_net_irqs_disable(pdev);
|
||||
|
||||
|
@ -337,7 +338,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
|
|||
} else {
|
||||
iounmap(vf->q_bar);
|
||||
}
|
||||
iounmap(nn->ctrl_bar);
|
||||
iounmap(nn->dp.ctrl_bar);
|
||||
|
||||
nfp_net_netdev_free(nn);
|
||||
|
||||
|
|
Loading…
Reference in New Issue