Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 10GbE Intel Wired LAN Driver Updates 2018-10-03 This series contains updates to ixgbe/ixgbevf and few fixes for i40e & iavf. Shannon Nelson fixes the message length for IPsec mailbox messages. Radoslaw fixes a transmit hang that occurs when XDP_TX exceeds the queue limit. Fixes a crash when we restor flow director filters after a reset. YueHaibing cleans up dead code, which did not have any callers. Dan Carpenter fixes an "off by one" error in IPsec for ixgbe. Nathan Chancellor fixes the i40e driver to use the correct enum for link speed. Also remove a debug statement since it was not producing useful information and equated to always "TRUE". Most notably, Björn introduces zero-copy AF_XDP support for the ixgbe driver. The ixgbe zero-copy code is located in its own file ixgbe_xsk.[ch], analogous to the i40e ZC support. Again, as in i40e, code paths have been copied from the XDP path to the zero-copy path. Going forward we will try to generalize more code between the AF_XDP ZC drivers, and also reduce the heavy C&P. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
7bdaae270c
|
@ -132,8 +132,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
|
|||
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
|
||||
(unsigned long int)nd->vlan_features);
|
||||
}
|
||||
dev_info(&pf->pdev->dev, " active_vlans is %s\n",
|
||||
vsi->active_vlans ? "<valid>" : "<null>");
|
||||
dev_info(&pf->pdev->dev,
|
||||
" flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
|
||||
vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
|
||||
|
|
|
@ -4256,7 +4256,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
|
|||
vf->link_forced = true;
|
||||
vf->link_up = true;
|
||||
pfe.event_data.link_event.link_status = true;
|
||||
pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
|
||||
pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
|
||||
break;
|
||||
case IFLA_VF_LINK_STATE_DISABLE:
|
||||
vf->link_forced = true;
|
||||
|
|
|
@ -342,7 +342,7 @@ struct iavf_adapter {
|
|||
struct iavf_channel_config ch_config;
|
||||
u8 num_tc;
|
||||
struct list_head cloud_filter_list;
|
||||
/* lock to protest access to the cloud filter list */
|
||||
/* lock to protect access to the cloud filter list */
|
||||
spinlock_t cloud_filter_list_lock;
|
||||
u16 num_cloud_filters;
|
||||
};
|
||||
|
|
|
@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
|
|||
|
||||
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
|
||||
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
|
||||
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
|
||||
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
|
||||
ixgbe_xsk.o
|
||||
|
||||
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
|
||||
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
|
||||
|
|
|
@ -228,13 +228,17 @@ struct ixgbe_tx_buffer {
|
|||
struct ixgbe_rx_buffer {
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t dma;
|
||||
struct page *page;
|
||||
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
|
||||
__u32 page_offset;
|
||||
#else
|
||||
__u16 page_offset;
|
||||
#endif
|
||||
__u16 pagecnt_bias;
|
||||
union {
|
||||
struct {
|
||||
struct page *page;
|
||||
__u32 page_offset;
|
||||
__u16 pagecnt_bias;
|
||||
};
|
||||
struct {
|
||||
void *addr;
|
||||
u64 handle;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct ixgbe_queue_stats {
|
||||
|
@ -271,6 +275,7 @@ enum ixgbe_ring_state_t {
|
|||
__IXGBE_TX_DETECT_HANG,
|
||||
__IXGBE_HANG_CHECK_ARMED,
|
||||
__IXGBE_TX_XDP_RING,
|
||||
__IXGBE_TX_DISABLED,
|
||||
};
|
||||
|
||||
#define ring_uses_build_skb(ring) \
|
||||
|
@ -347,6 +352,10 @@ struct ixgbe_ring {
|
|||
struct ixgbe_rx_queue_stats rx_stats;
|
||||
};
|
||||
struct xdp_rxq_info xdp_rxq;
|
||||
struct xdp_umem *xsk_umem;
|
||||
struct zero_copy_allocator zca; /* ZC allocator anchor */
|
||||
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
|
||||
u16 rx_buf_len;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
enum ixgbe_ring_f_enum {
|
||||
|
@ -764,6 +773,11 @@ struct ixgbe_adapter {
|
|||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
struct ixgbe_ipsec *ipsec;
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
|
||||
/* AF_XDP zero-copy */
|
||||
struct xdp_umem **xsk_umems;
|
||||
u16 num_xsk_umems_used;
|
||||
u16 num_xsk_umems;
|
||||
};
|
||||
|
||||
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
|
||||
|
|
|
@ -3484,17 +3484,6 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
|
|||
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode
|
||||
* @hw: pointer to hardware structure
|
||||
*/
|
||||
bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw)
|
||||
{
|
||||
if (hw->mac.ops.fw_recovery_mode)
|
||||
return hw->mac.ops.fw_recovery_mode(hw);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_device_caps_generic - Get additional device capabilities
|
||||
* @hw: pointer to hardware structure
|
||||
|
|
|
@ -1055,7 +1055,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
|
|||
int txr_remaining = adapter->num_tx_queues;
|
||||
int xdp_remaining = adapter->num_xdp_queues;
|
||||
int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
|
||||
int err;
|
||||
int err, i;
|
||||
|
||||
/* only one q_vector if MSI-X is disabled. */
|
||||
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
|
||||
|
@ -1097,6 +1097,21 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
|
|||
xdp_idx += xqpv;
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
if (adapter->rx_ring[i])
|
||||
adapter->rx_ring[i]->ring_idx = i;
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
if (adapter->tx_ring[i])
|
||||
adapter->tx_ring[i]->ring_idx = i;
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
||||
if (adapter->xdp_ring[i])
|
||||
adapter->xdp_ring[i]->ring_idx = i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
|
|
|
@ -34,12 +34,14 @@
|
|||
#include <net/tc_act/tc_mirred.h>
|
||||
#include <net/vxlan.h>
|
||||
#include <net/mpls.h>
|
||||
#include <net/xdp_sock.h>
|
||||
|
||||
#include "ixgbe.h"
|
||||
#include "ixgbe_common.h"
|
||||
#include "ixgbe_dcb_82599.h"
|
||||
#include "ixgbe_sriov.h"
|
||||
#include "ixgbe_model.h"
|
||||
#include "ixgbe_txrx_common.h"
|
||||
|
||||
char ixgbe_driver_name[] = "ixgbe";
|
||||
static const char ixgbe_driver_string[] =
|
||||
|
@ -893,8 +895,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
|
|||
}
|
||||
}
|
||||
|
||||
static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
|
||||
u64 qmask)
|
||||
void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
|
||||
u64 qmask)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
|
@ -1673,9 +1675,9 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
|
|||
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
|
||||
* other fields within the skb.
|
||||
**/
|
||||
static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = rx_ring->netdev;
|
||||
u32 flags = rx_ring->q_vector->adapter->flags;
|
||||
|
@ -1708,8 +1710,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
|
|||
skb->protocol = eth_type_trans(skb, dev);
|
||||
}
|
||||
|
||||
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
|
||||
struct sk_buff *skb)
|
||||
void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
napi_gro_receive(&q_vector->napi, skb);
|
||||
}
|
||||
|
@ -1868,9 +1870,9 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
|
|||
*
|
||||
* Returns true if an error was encountered and skb was freed.
|
||||
**/
|
||||
static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *netdev = rx_ring->netdev;
|
||||
|
||||
|
@ -2186,14 +2188,6 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
|
|||
return skb;
|
||||
}
|
||||
|
||||
#define IXGBE_XDP_PASS 0
|
||||
#define IXGBE_XDP_CONSUMED BIT(0)
|
||||
#define IXGBE_XDP_TX BIT(1)
|
||||
#define IXGBE_XDP_REDIR BIT(2)
|
||||
|
||||
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
|
||||
struct xdp_frame *xdpf);
|
||||
|
||||
static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *rx_ring,
|
||||
struct xdp_buff *xdp)
|
||||
|
@ -3167,7 +3161,11 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
|
|||
#endif
|
||||
|
||||
ixgbe_for_each_ring(ring, q_vector->tx) {
|
||||
if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
|
||||
bool wd = ring->xsk_umem ?
|
||||
ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
|
||||
ixgbe_clean_tx_irq(q_vector, ring, budget);
|
||||
|
||||
if (!wd)
|
||||
clean_complete = false;
|
||||
}
|
||||
|
||||
|
@ -3183,7 +3181,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
|
|||
per_ring_budget = budget;
|
||||
|
||||
ixgbe_for_each_ring(ring, q_vector->rx) {
|
||||
int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
|
||||
int cleaned = ring->xsk_umem ?
|
||||
ixgbe_clean_rx_irq_zc(q_vector, ring,
|
||||
per_ring_budget) :
|
||||
ixgbe_clean_rx_irq(q_vector, ring,
|
||||
per_ring_budget);
|
||||
|
||||
work_done += cleaned;
|
||||
|
@ -3473,6 +3474,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
|
|||
u32 txdctl = IXGBE_TXDCTL_ENABLE;
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
|
||||
ring->xsk_umem = NULL;
|
||||
if (ring_is_xdp(ring))
|
||||
ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
|
||||
|
||||
/* disable queue to avoid issues while updating state */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
@ -3577,12 +3582,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
|
|||
else
|
||||
mtqc |= IXGBE_MTQC_64VF;
|
||||
} else {
|
||||
if (tcs > 4)
|
||||
if (tcs > 4) {
|
||||
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
|
||||
else if (tcs > 1)
|
||||
} else if (tcs > 1) {
|
||||
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
|
||||
else
|
||||
mtqc = IXGBE_MTQC_64Q_1PB;
|
||||
} else {
|
||||
u8 max_txq = adapter->num_tx_queues +
|
||||
adapter->num_xdp_queues;
|
||||
if (max_txq > 63)
|
||||
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
|
||||
else
|
||||
mtqc = IXGBE_MTQC_64Q_1PB;
|
||||
}
|
||||
}
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
|
||||
|
@ -3705,10 +3716,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
|
|||
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
|
||||
|
||||
/* configure the packet buffer length */
|
||||
if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
|
||||
if (rx_ring->xsk_umem) {
|
||||
u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
|
||||
XDP_PACKET_HEADROOM;
|
||||
|
||||
/* If the MAC support setting RXDCTL.RLPML, the
|
||||
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
|
||||
* RXDCTL.RLPML is set to the actual UMEM buffer
|
||||
* size. If not, then we are stuck with a 1k buffer
|
||||
* size resolution. In this case frames larger than
|
||||
* the UMEM buffer size viewed in a 1k resolution will
|
||||
* be dropped.
|
||||
*/
|
||||
if (hw->mac.type != ixgbe_mac_82599EB)
|
||||
srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
||||
else
|
||||
srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
||||
} else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
|
||||
srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
||||
else
|
||||
} else {
|
||||
srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
||||
}
|
||||
|
||||
/* configure descriptor type */
|
||||
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
|
||||
|
@ -4031,6 +4059,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|||
u32 rxdctl;
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
|
||||
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
|
||||
ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
|
||||
if (ring->xsk_umem) {
|
||||
ring->zca.free = ixgbe_zca_free;
|
||||
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
|
||||
MEM_TYPE_ZERO_COPY,
|
||||
&ring->zca));
|
||||
|
||||
} else {
|
||||
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
|
||||
MEM_TYPE_PAGE_SHARED, NULL));
|
||||
}
|
||||
|
||||
/* disable queue to avoid use of these values while updating state */
|
||||
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
|
||||
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
|
||||
|
@ -4080,6 +4121,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|||
#endif
|
||||
}
|
||||
|
||||
if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
|
||||
u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
|
||||
XDP_PACKET_HEADROOM;
|
||||
|
||||
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
|
||||
IXGBE_RXDCTL_RLPML_EN);
|
||||
rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
|
||||
|
||||
ring->rx_buf_len = xsk_buf_len;
|
||||
}
|
||||
|
||||
/* initialize rx_buffer_info */
|
||||
memset(ring->rx_buffer_info, 0,
|
||||
sizeof(struct ixgbe_rx_buffer) * ring->count);
|
||||
|
@ -4093,7 +4145,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|||
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
|
||||
|
||||
ixgbe_rx_desc_queue_enable(adapter, ring);
|
||||
ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
|
||||
if (ring->xsk_umem)
|
||||
ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
|
||||
else
|
||||
ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
|
||||
}
|
||||
|
||||
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
|
||||
|
@ -5173,6 +5228,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
|
|||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct hlist_node *node2;
|
||||
struct ixgbe_fdir_filter *filter;
|
||||
u64 action;
|
||||
|
||||
spin_lock(&adapter->fdir_perfect_lock);
|
||||
|
||||
|
@ -5181,12 +5237,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
|
|||
|
||||
hlist_for_each_entry_safe(filter, node2,
|
||||
&adapter->fdir_filter_list, fdir_node) {
|
||||
action = filter->action;
|
||||
if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
|
||||
action =
|
||||
(action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
|
||||
|
||||
ixgbe_fdir_write_perfect_filter_82599(hw,
|
||||
&filter->filter,
|
||||
filter->sw_idx,
|
||||
(filter->action == IXGBE_FDIR_DROP_QUEUE) ?
|
||||
(action == IXGBE_FDIR_DROP_QUEUE) ?
|
||||
IXGBE_FDIR_DROP_QUEUE :
|
||||
adapter->rx_ring[filter->action]->reg_idx);
|
||||
adapter->rx_ring[action]->reg_idx);
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->fdir_perfect_lock);
|
||||
|
@ -5201,6 +5262,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
|||
u16 i = rx_ring->next_to_clean;
|
||||
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
|
||||
|
||||
if (rx_ring->xsk_umem) {
|
||||
ixgbe_xsk_clean_rx_ring(rx_ring);
|
||||
goto skip_free;
|
||||
}
|
||||
|
||||
/* Free all the Rx ring sk_buffs */
|
||||
while (i != rx_ring->next_to_alloc) {
|
||||
if (rx_buffer->skb) {
|
||||
|
@ -5239,6 +5305,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
|||
}
|
||||
}
|
||||
|
||||
skip_free:
|
||||
rx_ring->next_to_alloc = 0;
|
||||
rx_ring->next_to_clean = 0;
|
||||
rx_ring->next_to_use = 0;
|
||||
|
@ -5883,6 +5950,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
|
|||
u16 i = tx_ring->next_to_clean;
|
||||
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
|
||||
|
||||
if (tx_ring->xsk_umem) {
|
||||
ixgbe_xsk_clean_tx_ring(tx_ring);
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (i != tx_ring->next_to_use) {
|
||||
union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
|
||||
|
||||
|
@ -5934,6 +6006,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
|
|||
if (!ring_is_xdp(tx_ring))
|
||||
netdev_tx_reset_queue(txring_txq(tx_ring));
|
||||
|
||||
out:
|
||||
/* reset next_to_use and next_to_clean */
|
||||
tx_ring->next_to_use = 0;
|
||||
tx_ring->next_to_clean = 0;
|
||||
|
@ -6434,7 +6507,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
|
|||
struct device *dev = rx_ring->dev;
|
||||
int orig_node = dev_to_node(dev);
|
||||
int ring_node = -1;
|
||||
int size, err;
|
||||
int size;
|
||||
|
||||
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
|
||||
|
||||
|
@ -6471,13 +6544,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
|
|||
rx_ring->queue_index) < 0)
|
||||
goto err;
|
||||
|
||||
err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
|
||||
MEM_TYPE_PAGE_SHARED, NULL);
|
||||
if (err) {
|
||||
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rx_ring->xdp_prog = adapter->xdp_prog;
|
||||
|
||||
return 0;
|
||||
|
@ -8102,9 +8168,6 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
|
|||
return __ixgbe_maybe_stop_tx(tx_ring, size);
|
||||
}
|
||||
|
||||
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
|
||||
IXGBE_TXD_CMD_RS)
|
||||
|
||||
static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
||||
struct ixgbe_tx_buffer *first,
|
||||
const u8 hdr_len)
|
||||
|
@ -8457,8 +8520,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
#endif
|
||||
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
|
||||
struct xdp_frame *xdpf)
|
||||
int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
|
||||
struct xdp_frame *xdpf)
|
||||
{
|
||||
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
|
||||
struct ixgbe_tx_buffer *tx_buffer;
|
||||
|
@ -8680,6 +8743,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
|
|||
return NETDEV_TX_OK;
|
||||
|
||||
tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
|
||||
if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
|
||||
}
|
||||
|
@ -10191,12 +10256,19 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
|||
xdp->prog_id = adapter->xdp_prog ?
|
||||
adapter->xdp_prog->aux->id : 0;
|
||||
return 0;
|
||||
case XDP_QUERY_XSK_UMEM:
|
||||
return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem,
|
||||
xdp->xsk.queue_id);
|
||||
case XDP_SETUP_XSK_UMEM:
|
||||
return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
|
||||
xdp->xsk.queue_id);
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
|
||||
void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
|
||||
{
|
||||
/* Force memory writes to complete before letting h/w know there
|
||||
* are new descriptors to fetch.
|
||||
|
@ -10226,6 +10298,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
|
|||
if (unlikely(!ring))
|
||||
return -ENXIO;
|
||||
|
||||
if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
|
||||
return -ENXIO;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct xdp_frame *xdpf = frames[i];
|
||||
int err;
|
||||
|
@ -10287,8 +10362,162 @@ static const struct net_device_ops ixgbe_netdev_ops = {
|
|||
.ndo_features_check = ixgbe_features_check,
|
||||
.ndo_bpf = ixgbe_xdp,
|
||||
.ndo_xdp_xmit = ixgbe_xdp_xmit,
|
||||
.ndo_xsk_async_xmit = ixgbe_xsk_async_xmit,
|
||||
};
|
||||
|
||||
static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *tx_ring)
|
||||
{
|
||||
unsigned long wait_delay, delay_interval;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u8 reg_idx = tx_ring->reg_idx;
|
||||
int wait_loop;
|
||||
u32 txdctl;
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
|
||||
|
||||
/* delay mechanism from ixgbe_disable_tx */
|
||||
delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
|
||||
|
||||
wait_loop = IXGBE_MAX_RX_DESC_POLL;
|
||||
wait_delay = delay_interval;
|
||||
|
||||
while (wait_loop--) {
|
||||
usleep_range(wait_delay, wait_delay + 10);
|
||||
wait_delay += delay_interval * 2;
|
||||
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
|
||||
|
||||
if (!(txdctl & IXGBE_TXDCTL_ENABLE))
|
||||
return;
|
||||
}
|
||||
|
||||
e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
|
||||
}
|
||||
|
||||
static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *tx_ring)
|
||||
{
|
||||
set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
|
||||
ixgbe_disable_txr_hw(adapter, tx_ring);
|
||||
}
|
||||
|
||||
static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *rx_ring)
|
||||
{
|
||||
unsigned long wait_delay, delay_interval;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u8 reg_idx = rx_ring->reg_idx;
|
||||
int wait_loop;
|
||||
u32 rxdctl;
|
||||
|
||||
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
|
||||
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
|
||||
rxdctl |= IXGBE_RXDCTL_SWFLSH;
|
||||
|
||||
/* write value back with RXDCTL.ENABLE bit cleared */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
|
||||
|
||||
/* RXDCTL.EN may not change on 82598 if link is down, so skip it */
|
||||
if (hw->mac.type == ixgbe_mac_82598EB &&
|
||||
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
|
||||
return;
|
||||
|
||||
/* delay mechanism from ixgbe_disable_rx */
|
||||
delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
|
||||
|
||||
wait_loop = IXGBE_MAX_RX_DESC_POLL;
|
||||
wait_delay = delay_interval;
|
||||
|
||||
while (wait_loop--) {
|
||||
usleep_range(wait_delay, wait_delay + 10);
|
||||
wait_delay += delay_interval * 2;
|
||||
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
|
||||
|
||||
if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
|
||||
return;
|
||||
}
|
||||
|
||||
e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
|
||||
}
|
||||
|
||||
static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
|
||||
{
|
||||
memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
|
||||
memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
|
||||
}
|
||||
|
||||
static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
|
||||
{
|
||||
memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
|
||||
memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
|
||||
* @adapter: adapter structure
|
||||
* @ring: ring index
|
||||
*
|
||||
* This function disables a certain Rx/Tx/XDP Tx ring. The function
|
||||
* assumes that the netdev is running.
|
||||
**/
|
||||
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
|
||||
{
|
||||
struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
|
||||
|
||||
rx_ring = adapter->rx_ring[ring];
|
||||
tx_ring = adapter->tx_ring[ring];
|
||||
xdp_ring = adapter->xdp_ring[ring];
|
||||
|
||||
ixgbe_disable_txr(adapter, tx_ring);
|
||||
if (xdp_ring)
|
||||
ixgbe_disable_txr(adapter, xdp_ring);
|
||||
ixgbe_disable_rxr_hw(adapter, rx_ring);
|
||||
|
||||
if (xdp_ring)
|
||||
synchronize_sched();
|
||||
|
||||
/* Rx/Tx/XDP Tx share the same napi context. */
|
||||
napi_disable(&rx_ring->q_vector->napi);
|
||||
|
||||
ixgbe_clean_tx_ring(tx_ring);
|
||||
if (xdp_ring)
|
||||
ixgbe_clean_tx_ring(xdp_ring);
|
||||
ixgbe_clean_rx_ring(rx_ring);
|
||||
|
||||
ixgbe_reset_txr_stats(tx_ring);
|
||||
if (xdp_ring)
|
||||
ixgbe_reset_txr_stats(xdp_ring);
|
||||
ixgbe_reset_rxr_stats(rx_ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
|
||||
* @adapter: adapter structure
|
||||
* @ring: ring index
|
||||
*
|
||||
* This function enables a certain Rx/Tx/XDP Tx ring. The function
|
||||
* assumes that the netdev is running.
|
||||
**/
|
||||
void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
|
||||
{
|
||||
struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
|
||||
|
||||
rx_ring = adapter->rx_ring[ring];
|
||||
tx_ring = adapter->tx_ring[ring];
|
||||
xdp_ring = adapter->xdp_ring[ring];
|
||||
|
||||
/* Rx/Tx/XDP Tx share the same napi context. */
|
||||
napi_enable(&rx_ring->q_vector->napi);
|
||||
|
||||
ixgbe_configure_tx_ring(adapter, tx_ring);
|
||||
if (xdp_ring)
|
||||
ixgbe_configure_tx_ring(adapter, xdp_ring);
|
||||
ixgbe_configure_rx_ring(adapter, rx_ring);
|
||||
|
||||
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
|
||||
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_enumerate_functions - Get the number of ports this device has
|
||||
* @adapter: adapter structure
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright(c) 2018 Intel Corporation. */
|
||||
|
||||
#ifndef _IXGBE_TXRX_COMMON_H_
|
||||
#define _IXGBE_TXRX_COMMON_H_
|
||||
|
||||
#define IXGBE_XDP_PASS 0
|
||||
#define IXGBE_XDP_CONSUMED BIT(0)
|
||||
#define IXGBE_XDP_TX BIT(1)
|
||||
#define IXGBE_XDP_REDIR BIT(2)
|
||||
|
||||
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
|
||||
IXGBE_TXD_CMD_RS)
|
||||
|
||||
int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
|
||||
struct xdp_frame *xdpf);
|
||||
bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb);
|
||||
void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb);
|
||||
void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
|
||||
struct sk_buff *skb);
|
||||
void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
|
||||
void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
|
||||
|
||||
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
|
||||
void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
|
||||
|
||||
struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *ring);
|
||||
int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
|
||||
u16 qid);
|
||||
int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
|
||||
u16 qid);
|
||||
|
||||
void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
|
||||
|
||||
void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
|
||||
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
|
||||
struct ixgbe_ring *rx_ring,
|
||||
const int budget);
|
||||
void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
|
||||
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
||||
struct ixgbe_ring *tx_ring, int napi_budget);
|
||||
int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id);
|
||||
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
|
||||
|
||||
#endif /* #define _IXGBE_TXRX_COMMON_H_ */
|
|
@ -0,0 +1,801 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright(c) 2018 Intel Corporation. */
|
||||
|
||||
#include <linux/bpf_trace.h>
|
||||
#include <net/xdp_sock.h>
|
||||
#include <net/xdp.h>
|
||||
|
||||
#include "ixgbe.h"
|
||||
#include "ixgbe_txrx_common.h"
|
||||
|
||||
struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *ring)
|
||||
{
|
||||
bool xdp_on = READ_ONCE(adapter->xdp_prog);
|
||||
int qid = ring->ring_idx;
|
||||
|
||||
if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
|
||||
qid >= adapter->num_xsk_umems || !xdp_on)
|
||||
return NULL;
|
||||
|
||||
return adapter->xsk_umems[qid];
|
||||
}
|
||||
|
||||
static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
if (adapter->xsk_umems)
|
||||
return 0;
|
||||
|
||||
adapter->num_xsk_umems_used = 0;
|
||||
adapter->num_xsk_umems = adapter->num_rx_queues;
|
||||
adapter->xsk_umems = kcalloc(adapter->num_xsk_umems,
|
||||
sizeof(*adapter->xsk_umems),
|
||||
GFP_KERNEL);
|
||||
if (!adapter->xsk_umems) {
|
||||
adapter->num_xsk_umems = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,
|
||||
struct xdp_umem *umem,
|
||||
u16 qid)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = ixgbe_alloc_xsk_umems(adapter);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
adapter->xsk_umems[qid] = umem;
|
||||
adapter->num_xsk_umems_used++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)
|
||||
{
|
||||
adapter->xsk_umems[qid] = NULL;
|
||||
adapter->num_xsk_umems_used--;
|
||||
|
||||
if (adapter->num_xsk_umems == 0) {
|
||||
kfree(adapter->xsk_umems);
|
||||
adapter->xsk_umems = NULL;
|
||||
adapter->num_xsk_umems = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
|
||||
struct xdp_umem *umem)
|
||||
{
|
||||
struct device *dev = &adapter->pdev->dev;
|
||||
unsigned int i, j;
|
||||
dma_addr_t dma;
|
||||
|
||||
for (i = 0; i < umem->npgs; i++) {
|
||||
dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
|
||||
if (dma_mapping_error(dev, dma))
|
||||
goto out_unmap;
|
||||
|
||||
umem->pages[i].dma = dma;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
for (j = 0; j < i; j++) {
|
||||
dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
|
||||
umem->pages[i].dma = 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
|
||||
struct xdp_umem *umem)
|
||||
{
|
||||
struct device *dev = &adapter->pdev->dev;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < umem->npgs; i++) {
|
||||
dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
|
||||
|
||||
umem->pages[i].dma = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
|
||||
struct xdp_umem *umem,
|
||||
u16 qid)
|
||||
{
|
||||
struct xdp_umem_fq_reuse *reuseq;
|
||||
bool if_running;
|
||||
int err;
|
||||
|
||||
if (qid >= adapter->num_rx_queues)
|
||||
return -EINVAL;
|
||||
|
||||
if (adapter->xsk_umems) {
|
||||
if (qid >= adapter->num_xsk_umems)
|
||||
return -EINVAL;
|
||||
if (adapter->xsk_umems[qid])
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
|
||||
if (!reuseq)
|
||||
return -ENOMEM;
|
||||
|
||||
xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
|
||||
|
||||
err = ixgbe_xsk_umem_dma_map(adapter, umem);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if_running = netif_running(adapter->netdev) &&
|
||||
READ_ONCE(adapter->xdp_prog);
|
||||
|
||||
if (if_running)
|
||||
ixgbe_txrx_ring_disable(adapter, qid);
|
||||
|
||||
err = ixgbe_add_xsk_umem(adapter, umem, qid);
|
||||
|
||||
if (if_running)
|
||||
ixgbe_txrx_ring_enable(adapter, qid);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
|
||||
{
|
||||
bool if_running;
|
||||
|
||||
if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||
|
||||
!adapter->xsk_umems[qid])
|
||||
return -EINVAL;
|
||||
|
||||
if_running = netif_running(adapter->netdev) &&
|
||||
READ_ONCE(adapter->xdp_prog);
|
||||
|
||||
if (if_running)
|
||||
ixgbe_txrx_ring_disable(adapter, qid);
|
||||
|
||||
ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);
|
||||
ixgbe_remove_xsk_umem(adapter, qid);
|
||||
|
||||
if (if_running)
|
||||
ixgbe_txrx_ring_enable(adapter, qid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
|
||||
u16 qid)
|
||||
{
|
||||
if (qid >= adapter->num_rx_queues)
|
||||
return -EINVAL;
|
||||
|
||||
if (adapter->xsk_umems) {
|
||||
if (qid >= adapter->num_xsk_umems)
|
||||
return -EINVAL;
|
||||
*umem = adapter->xsk_umems[qid];
|
||||
return 0;
|
||||
}
|
||||
|
||||
*umem = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
|
||||
u16 qid)
|
||||
{
|
||||
return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
|
||||
ixgbe_xsk_umem_disable(adapter, qid);
|
||||
}
|
||||
|
||||
static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *rx_ring,
|
||||
struct xdp_buff *xdp)
|
||||
{
|
||||
int err, result = IXGBE_XDP_PASS;
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct xdp_frame *xdpf;
|
||||
u32 act;
|
||||
|
||||
rcu_read_lock();
|
||||
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
|
||||
act = bpf_prog_run_xdp(xdp_prog, xdp);
|
||||
xdp->handle += xdp->data - xdp->data_hard_start;
|
||||
switch (act) {
|
||||
case XDP_PASS:
|
||||
break;
|
||||
case XDP_TX:
|
||||
xdpf = convert_to_xdp_frame(xdp);
|
||||
if (unlikely(!xdpf)) {
|
||||
result = IXGBE_XDP_CONSUMED;
|
||||
break;
|
||||
}
|
||||
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
|
||||
break;
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
|
||||
result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
/* fallthrough */
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
|
||||
/* fallthrough -- handle aborts by dropping packet */
|
||||
case XDP_DROP:
|
||||
result = IXGBE_XDP_CONSUMED;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return result;
|
||||
}
|
||||
|
||||
static struct
|
||||
ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
|
||||
unsigned int size)
|
||||
{
|
||||
struct ixgbe_rx_buffer *bi;
|
||||
|
||||
bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
||||
|
||||
/* we are reusing so sync this buffer for CPU use */
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
bi->dma, 0,
|
||||
size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
return bi;
|
||||
}
|
||||
|
||||
static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *obi)
|
||||
{
|
||||
unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
|
||||
u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
|
||||
u16 nta = rx_ring->next_to_alloc;
|
||||
struct ixgbe_rx_buffer *nbi;
|
||||
|
||||
nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
|
||||
/* update, and store next to alloc */
|
||||
nta++;
|
||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||
|
||||
/* transfer page from old buffer to new buffer */
|
||||
nbi->dma = obi->dma & mask;
|
||||
nbi->dma += hr;
|
||||
|
||||
nbi->addr = (void *)((unsigned long)obi->addr & mask);
|
||||
nbi->addr += hr;
|
||||
|
||||
nbi->handle = obi->handle & mask;
|
||||
nbi->handle += rx_ring->xsk_umem->headroom;
|
||||
|
||||
obi->addr = NULL;
|
||||
obi->skb = NULL;
|
||||
}
|
||||
|
||||
void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
|
||||
{
|
||||
struct ixgbe_rx_buffer *bi;
|
||||
struct ixgbe_ring *rx_ring;
|
||||
u64 hr, mask;
|
||||
u16 nta;
|
||||
|
||||
rx_ring = container_of(alloc, struct ixgbe_ring, zca);
|
||||
hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
|
||||
mask = rx_ring->xsk_umem->chunk_mask;
|
||||
|
||||
nta = rx_ring->next_to_alloc;
|
||||
bi = rx_ring->rx_buffer_info;
|
||||
|
||||
nta++;
|
||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||
|
||||
handle &= mask;
|
||||
|
||||
bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
|
||||
bi->dma += hr;
|
||||
|
||||
bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
|
||||
bi->addr += hr;
|
||||
|
||||
bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
|
||||
}
|
||||
|
||||
static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *bi)
|
||||
{
|
||||
struct xdp_umem *umem = rx_ring->xsk_umem;
|
||||
void *addr = bi->addr;
|
||||
u64 handle, hr;
|
||||
|
||||
if (addr)
|
||||
return true;
|
||||
|
||||
if (!xsk_umem_peek_addr(umem, &handle)) {
|
||||
rx_ring->rx_stats.alloc_rx_page_failed++;
|
||||
return false;
|
||||
}
|
||||
|
||||
hr = umem->headroom + XDP_PACKET_HEADROOM;
|
||||
|
||||
bi->dma = xdp_umem_get_dma(umem, handle);
|
||||
bi->dma += hr;
|
||||
|
||||
bi->addr = xdp_umem_get_data(umem, handle);
|
||||
bi->addr += hr;
|
||||
|
||||
bi->handle = handle + umem->headroom;
|
||||
|
||||
xsk_umem_discard_addr(umem);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *bi)
|
||||
{
|
||||
struct xdp_umem *umem = rx_ring->xsk_umem;
|
||||
u64 handle, hr;
|
||||
|
||||
if (!xsk_umem_peek_addr_rq(umem, &handle)) {
|
||||
rx_ring->rx_stats.alloc_rx_page_failed++;
|
||||
return false;
|
||||
}
|
||||
|
||||
handle &= rx_ring->xsk_umem->chunk_mask;
|
||||
|
||||
hr = umem->headroom + XDP_PACKET_HEADROOM;
|
||||
|
||||
bi->dma = xdp_umem_get_dma(umem, handle);
|
||||
bi->dma += hr;
|
||||
|
||||
bi->addr = xdp_umem_get_data(umem, handle);
|
||||
bi->addr += hr;
|
||||
|
||||
bi->handle = handle + umem->headroom;
|
||||
|
||||
xsk_umem_discard_addr_rq(umem);
|
||||
return true;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
|
||||
bool alloc(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *bi))
|
||||
{
|
||||
union ixgbe_adv_rx_desc *rx_desc;
|
||||
struct ixgbe_rx_buffer *bi;
|
||||
u16 i = rx_ring->next_to_use;
|
||||
bool ok = true;
|
||||
|
||||
/* nothing to do */
|
||||
if (!cleaned_count)
|
||||
return true;
|
||||
|
||||
rx_desc = IXGBE_RX_DESC(rx_ring, i);
|
||||
bi = &rx_ring->rx_buffer_info[i];
|
||||
i -= rx_ring->count;
|
||||
|
||||
do {
|
||||
if (!alloc(rx_ring, bi)) {
|
||||
ok = false;
|
||||
break;
|
||||
}
|
||||
|
||||
/* sync the buffer for use by the device */
|
||||
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
|
||||
bi->page_offset,
|
||||
rx_ring->rx_buf_len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Refresh the desc even if buffer_addrs didn't change
|
||||
* because each write-back erases this info.
|
||||
*/
|
||||
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
|
||||
|
||||
rx_desc++;
|
||||
bi++;
|
||||
i++;
|
||||
if (unlikely(!i)) {
|
||||
rx_desc = IXGBE_RX_DESC(rx_ring, 0);
|
||||
bi = rx_ring->rx_buffer_info;
|
||||
i -= rx_ring->count;
|
||||
}
|
||||
|
||||
/* clear the length for the next_to_use descriptor */
|
||||
rx_desc->wb.upper.length = 0;
|
||||
|
||||
cleaned_count--;
|
||||
} while (cleaned_count);
|
||||
|
||||
i += rx_ring->count;
|
||||
|
||||
if (rx_ring->next_to_use != i) {
|
||||
rx_ring->next_to_use = i;
|
||||
|
||||
/* update next to alloc since we have filled the ring */
|
||||
rx_ring->next_to_alloc = i;
|
||||
|
||||
/* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch. (Only
|
||||
* applicable for weak-ordered memory model archs,
|
||||
* such as IA-64).
|
||||
*/
|
||||
wmb();
|
||||
writel(i, rx_ring->tail);
|
||||
}
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
|
||||
{
|
||||
__ixgbe_alloc_rx_buffers_zc(rx_ring, count,
|
||||
ixgbe_alloc_buffer_slow_zc);
|
||||
}
|
||||
|
||||
static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
|
||||
u16 count)
|
||||
{
|
||||
return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
|
||||
ixgbe_alloc_buffer_zc);
|
||||
}
|
||||
|
||||
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *bi,
|
||||
struct xdp_buff *xdp)
|
||||
{
|
||||
unsigned int metasize = xdp->data - xdp->data_meta;
|
||||
unsigned int datasize = xdp->data_end - xdp->data;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* allocate a skb to store the frags */
|
||||
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
|
||||
xdp->data_end - xdp->data_hard_start,
|
||||
GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
skb_reserve(skb, xdp->data - xdp->data_hard_start);
|
||||
memcpy(__skb_put(skb, datasize), xdp->data, datasize);
|
||||
if (metasize)
|
||||
skb_metadata_set(skb, metasize);
|
||||
|
||||
ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
|
||||
{
|
||||
u32 ntc = rx_ring->next_to_clean + 1;
|
||||
|
||||
ntc = (ntc < rx_ring->count) ? ntc : 0;
|
||||
rx_ring->next_to_clean = ntc;
|
||||
prefetch(IXGBE_RX_DESC(rx_ring, ntc));
|
||||
}
|
||||
|
||||
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
|
||||
struct ixgbe_ring *rx_ring,
|
||||
const int budget)
|
||||
{
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
||||
struct ixgbe_adapter *adapter = q_vector->adapter;
|
||||
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
|
||||
unsigned int xdp_res, xdp_xmit = 0;
|
||||
bool failure = false;
|
||||
struct sk_buff *skb;
|
||||
struct xdp_buff xdp;
|
||||
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
|
||||
while (likely(total_rx_packets < budget)) {
|
||||
union ixgbe_adv_rx_desc *rx_desc;
|
||||
struct ixgbe_rx_buffer *bi;
|
||||
unsigned int size;
|
||||
|
||||
/* return some buffers to hardware, one at a time is too slow */
|
||||
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
|
||||
failure = failure ||
|
||||
!ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
|
||||
cleaned_count);
|
||||
cleaned_count = 0;
|
||||
}
|
||||
|
||||
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
|
||||
size = le16_to_cpu(rx_desc->wb.upper.length);
|
||||
if (!size)
|
||||
break;
|
||||
|
||||
/* This memory barrier is needed to keep us from reading
|
||||
* any other fields out of the rx_desc until we know the
|
||||
* descriptor has been written back
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
|
||||
|
||||
if (unlikely(!ixgbe_test_staterr(rx_desc,
|
||||
IXGBE_RXD_STAT_EOP))) {
|
||||
struct ixgbe_rx_buffer *next_bi;
|
||||
|
||||
ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
|
||||
ixgbe_inc_ntc(rx_ring);
|
||||
next_bi =
|
||||
&rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
||||
next_bi->skb = ERR_PTR(-EINVAL);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (unlikely(bi->skb)) {
|
||||
ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
|
||||
ixgbe_inc_ntc(rx_ring);
|
||||
continue;
|
||||
}
|
||||
|
||||
xdp.data = bi->addr;
|
||||
xdp.data_meta = xdp.data;
|
||||
xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
|
||||
xdp.data_end = xdp.data + size;
|
||||
xdp.handle = bi->handle;
|
||||
|
||||
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
|
||||
|
||||
if (xdp_res) {
|
||||
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
|
||||
xdp_xmit |= xdp_res;
|
||||
bi->addr = NULL;
|
||||
bi->skb = NULL;
|
||||
} else {
|
||||
ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
|
||||
}
|
||||
total_rx_packets++;
|
||||
total_rx_bytes += size;
|
||||
|
||||
cleaned_count++;
|
||||
ixgbe_inc_ntc(rx_ring);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* XDP_PASS path */
|
||||
skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
|
||||
if (!skb) {
|
||||
rx_ring->rx_stats.alloc_rx_buff_failed++;
|
||||
break;
|
||||
}
|
||||
|
||||
cleaned_count++;
|
||||
ixgbe_inc_ntc(rx_ring);
|
||||
|
||||
if (eth_skb_pad(skb))
|
||||
continue;
|
||||
|
||||
total_rx_bytes += skb->len;
|
||||
total_rx_packets++;
|
||||
|
||||
ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
|
||||
ixgbe_rx_skb(q_vector, skb);
|
||||
}
|
||||
|
||||
if (xdp_xmit & IXGBE_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
|
||||
if (xdp_xmit & IXGBE_XDP_TX) {
|
||||
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
|
||||
|
||||
/* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch.
|
||||
*/
|
||||
wmb();
|
||||
writel(ring->next_to_use, ring->tail);
|
||||
}
|
||||
|
||||
u64_stats_update_begin(&rx_ring->syncp);
|
||||
rx_ring->stats.packets += total_rx_packets;
|
||||
rx_ring->stats.bytes += total_rx_bytes;
|
||||
u64_stats_update_end(&rx_ring->syncp);
|
||||
q_vector->rx.total_packets += total_rx_packets;
|
||||
q_vector->rx.total_bytes += total_rx_bytes;
|
||||
|
||||
return failure ? budget : (int)total_rx_packets;
|
||||
}
|
||||
|
||||
void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
||||
{
|
||||
u16 i = rx_ring->next_to_clean;
|
||||
struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
|
||||
|
||||
while (i != rx_ring->next_to_alloc) {
|
||||
xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
|
||||
i++;
|
||||
bi++;
|
||||
if (i == rx_ring->count) {
|
||||
i = 0;
|
||||
bi = rx_ring->rx_buffer_info;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
|
||||
{
|
||||
union ixgbe_adv_tx_desc *tx_desc = NULL;
|
||||
struct ixgbe_tx_buffer *tx_bi;
|
||||
bool work_done = true;
|
||||
u32 len, cmd_type;
|
||||
dma_addr_t dma;
|
||||
|
||||
while (budget-- > 0) {
|
||||
if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
|
||||
work_done = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
|
||||
break;
|
||||
|
||||
dma_sync_single_for_device(xdp_ring->dev, dma, len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
|
||||
tx_bi->bytecount = len;
|
||||
tx_bi->xdpf = NULL;
|
||||
|
||||
tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
|
||||
tx_desc->read.buffer_addr = cpu_to_le64(dma);
|
||||
|
||||
/* put descriptor type bits */
|
||||
cmd_type = IXGBE_ADVTXD_DTYP_DATA |
|
||||
IXGBE_ADVTXD_DCMD_DEXT |
|
||||
IXGBE_ADVTXD_DCMD_IFCS;
|
||||
cmd_type |= len | IXGBE_TXD_CMD;
|
||||
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
|
||||
tx_desc->read.olinfo_status =
|
||||
cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
|
||||
|
||||
xdp_ring->next_to_use++;
|
||||
if (xdp_ring->next_to_use == xdp_ring->count)
|
||||
xdp_ring->next_to_use = 0;
|
||||
}
|
||||
|
||||
if (tx_desc) {
|
||||
ixgbe_xdp_ring_update_tail(xdp_ring);
|
||||
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
|
||||
}
|
||||
|
||||
return !!budget && work_done;
|
||||
}
|
||||
|
||||
static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
|
||||
struct ixgbe_tx_buffer *tx_bi)
|
||||
{
|
||||
xdp_return_frame(tx_bi->xdpf);
|
||||
dma_unmap_single(tx_ring->dev,
|
||||
dma_unmap_addr(tx_bi, dma),
|
||||
dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
|
||||
dma_unmap_len_set(tx_bi, len, 0);
|
||||
}
|
||||
|
||||
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
||||
struct ixgbe_ring *tx_ring, int napi_budget)
|
||||
{
|
||||
unsigned int total_packets = 0, total_bytes = 0;
|
||||
u32 i = tx_ring->next_to_clean, xsk_frames = 0;
|
||||
unsigned int budget = q_vector->tx.work_limit;
|
||||
struct xdp_umem *umem = tx_ring->xsk_umem;
|
||||
union ixgbe_adv_tx_desc *tx_desc;
|
||||
struct ixgbe_tx_buffer *tx_bi;
|
||||
bool xmit_done;
|
||||
|
||||
tx_bi = &tx_ring->tx_buffer_info[i];
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, i);
|
||||
i -= tx_ring->count;
|
||||
|
||||
do {
|
||||
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
|
||||
break;
|
||||
|
||||
total_bytes += tx_bi->bytecount;
|
||||
total_packets += tx_bi->gso_segs;
|
||||
|
||||
if (tx_bi->xdpf)
|
||||
ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
|
||||
else
|
||||
xsk_frames++;
|
||||
|
||||
tx_bi->xdpf = NULL;
|
||||
total_bytes += tx_bi->bytecount;
|
||||
|
||||
tx_bi++;
|
||||
tx_desc++;
|
||||
i++;
|
||||
if (unlikely(!i)) {
|
||||
i -= tx_ring->count;
|
||||
tx_bi = tx_ring->tx_buffer_info;
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
|
||||
}
|
||||
|
||||
/* issue prefetch for next Tx descriptor */
|
||||
prefetch(tx_desc);
|
||||
|
||||
/* update budget accounting */
|
||||
budget--;
|
||||
} while (likely(budget));
|
||||
|
||||
i += tx_ring->count;
|
||||
tx_ring->next_to_clean = i;
|
||||
|
||||
u64_stats_update_begin(&tx_ring->syncp);
|
||||
tx_ring->stats.bytes += total_bytes;
|
||||
tx_ring->stats.packets += total_packets;
|
||||
u64_stats_update_end(&tx_ring->syncp);
|
||||
q_vector->tx.total_bytes += total_bytes;
|
||||
q_vector->tx.total_packets += total_packets;
|
||||
|
||||
if (xsk_frames)
|
||||
xsk_umem_complete_tx(umem, xsk_frames);
|
||||
|
||||
xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
|
||||
return budget > 0 && xmit_done;
|
||||
}
|
||||
|
||||
int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
struct ixgbe_ring *ring;
|
||||
|
||||
if (test_bit(__IXGBE_DOWN, &adapter->state))
|
||||
return -ENETDOWN;
|
||||
|
||||
if (!READ_ONCE(adapter->xdp_prog))
|
||||
return -ENXIO;
|
||||
|
||||
if (qid >= adapter->num_xdp_queues)
|
||||
return -ENXIO;
|
||||
|
||||
if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
|
||||
return -ENXIO;
|
||||
|
||||
ring = adapter->xdp_ring[qid];
|
||||
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
|
||||
u64 eics = BIT_ULL(ring->q_vector->v_idx);
|
||||
|
||||
ixgbe_irq_rearm_queues(adapter, eics);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
|
||||
{
|
||||
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
|
||||
struct xdp_umem *umem = tx_ring->xsk_umem;
|
||||
struct ixgbe_tx_buffer *tx_bi;
|
||||
u32 xsk_frames = 0;
|
||||
|
||||
while (ntc != ntu) {
|
||||
tx_bi = &tx_ring->tx_buffer_info[ntc];
|
||||
|
||||
if (tx_bi->xdpf)
|
||||
ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
|
||||
else
|
||||
xsk_frames++;
|
||||
|
||||
tx_bi->xdpf = NULL;
|
||||
|
||||
ntc++;
|
||||
if (ntc == tx_ring->count)
|
||||
ntc = 0;
|
||||
}
|
||||
|
||||
if (xsk_frames)
|
||||
xsk_umem_complete_tx(umem, xsk_frames);
|
||||
}
|
|
@ -21,7 +21,6 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
|
|||
u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct sa_mbx_msg *sam;
|
||||
u16 msglen;
|
||||
int ret;
|
||||
|
||||
/* send the important bits to the PF */
|
||||
|
@ -38,16 +37,14 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
|
|||
memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
|
||||
|
||||
msgbuf[0] = IXGBE_VF_IPSEC_ADD;
|
||||
msglen = sizeof(*sam) + sizeof(msgbuf[0]);
|
||||
|
||||
spin_lock_bh(&adapter->mbx_lock);
|
||||
|
||||
ret = hw->mbx.ops.write_posted(hw, msgbuf, msglen);
|
||||
ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
msglen = sizeof(msgbuf[0]) * 2;
|
||||
ret = hw->mbx.ops.read_posted(hw, msgbuf, msglen);
|
||||
ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -80,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
|
|||
|
||||
spin_lock_bh(&adapter->mbx_lock);
|
||||
|
||||
err = hw->mbx.ops.write_posted(hw, msgbuf, sizeof(msgbuf));
|
||||
err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = hw->mbx.ops.read_posted(hw, msgbuf, sizeof(msgbuf));
|
||||
err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -470,7 +467,7 @@ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
|
|||
}
|
||||
|
||||
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
|
||||
if (unlikely(sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
|
||||
if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
|
||||
netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
|
||||
__func__, sa_idx, xs->xso.offload_handle);
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue