mlx5-fixes-2020-09-18
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl9pQ8EACgkQSD+KveBX +j7I2wf/cu9W3mC8sNeJaZKIbJ+H6KhgZsGbeLud5tFscjcf5IaCpR97hyeZPfEG doNRtcsT9Pj5YJn458L/p+zTVeWOuaOGPMsV8pdP/8OlFzjJW/rGXnBrEUt0ehkS Sa//xGD6V8+nW9Z34fwQqrrqJeZik3H9V/RkriZUTsJ/zR/otLF3fVOQFwrS9Ka2 /dl1ERFepjBWupY39PSMFS2S2BZ6LYY8G/ewgHKeexbqLykxU27P3+mFz46YPmP6 jdIMmvo+fuPqyu9Tjtg6pGjYpCWttnBBtDmeSg+ewf61qW4mSemJzfGcbZYY2XT6 CxRsm4aTJ5COTEx05JFOqIhpP5LuAA== =Hcsv -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2020-09-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes-2020-09-18 This series introduces some fixes to mlx5 driver. Please pull and let me know if there is any problem. v1->v2: Remove missing patch from -stable list. For -stable v5.1 ('net/mlx5: Fix FTE cleanup') For -stable v5.3 ('net/mlx5e: TLS, Do not expose FPGA TLS counter if not supported') ('net/mlx5e: Enable adding peer miss rules only if merged eswitch is supported') For -stable v5.7 ('net/mlx5e: Fix memory leak of tunnel info when rule under multipath not ready') For -stable v5.8 ('net/mlx5e: Use RCU to protect rq->xdp_prog') ('net/mlx5e: Fix endianness when calculating pedit mask first bit') ('net/mlx5e: Use synchronize_rcu to sync with NAPI') ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
47cec3f68c
|
@ -600,7 +600,7 @@ struct mlx5e_rq {
|
|||
struct dim dim; /* Dynamic Interrupt Moderation */
|
||||
|
||||
/* XDP */
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct bpf_prog __rcu *xdp_prog;
|
||||
struct mlx5e_xdpsq *xdpsq;
|
||||
DECLARE_BITMAP(flags, 8);
|
||||
struct page_pool *page_pool;
|
||||
|
@ -1005,7 +1005,6 @@ int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
|
|||
void mlx5e_update_carrier(struct mlx5e_priv *priv);
|
||||
int mlx5e_close(struct net_device *netdev);
|
||||
int mlx5e_open(struct net_device *netdev);
|
||||
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
|
||||
|
||||
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
|
||||
int mlx5e_bits_invert(unsigned long a, int size);
|
||||
|
|
|
@ -51,7 +51,7 @@ static void mlx5e_monitor_counters_work(struct work_struct *work)
|
|||
monitor_counters_work);
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
mlx5e_update_ndo_stats(priv);
|
||||
mlx5e_stats_update_ndo_stats(priv);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
mlx5e_monitor_counter_arm(priv);
|
||||
}
|
||||
|
|
|
@ -490,11 +490,8 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
|
|||
int err;
|
||||
int i;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, pcam_reg))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!MLX5_CAP_PCAM_REG(dev, pplm))
|
||||
return -EOPNOTSUPP;
|
||||
if (!MLX5_CAP_GEN(dev, pcam_reg) || !MLX5_CAP_PCAM_REG(dev, pplm))
|
||||
return false;
|
||||
|
||||
MLX5_SET(pplm_reg, in, local_port, 1);
|
||||
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
|
||||
|
|
|
@ -699,6 +699,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
|||
err_rule:
|
||||
mlx5e_mod_hdr_detach(ct_priv->esw->dev,
|
||||
&esw->offloads.mod_hdr, zone_rule->mh);
|
||||
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
|
||||
err_mod_hdr:
|
||||
kfree(spec);
|
||||
return err;
|
||||
|
@ -958,12 +959,22 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr)
|
||||
{
|
||||
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
|
||||
|
||||
if (!ct_priv || !ct_attr->ct_labels_id)
|
||||
return;
|
||||
|
||||
mapping_remove(ct_priv->labels_mapping, ct_attr->ct_labels_id);
|
||||
}
|
||||
|
||||
int
|
||||
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct flow_cls_offload *f,
|
||||
struct mlx5_ct_attr *ct_attr,
|
||||
struct netlink_ext_ack *extack)
|
||||
mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct flow_cls_offload *f,
|
||||
struct mlx5_ct_attr *ct_attr,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
|
|
|
@ -87,12 +87,15 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv);
|
|||
void
|
||||
mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv);
|
||||
|
||||
void
|
||||
mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr);
|
||||
|
||||
int
|
||||
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct flow_cls_offload *f,
|
||||
struct mlx5_ct_attr *ct_attr,
|
||||
struct netlink_ext_ack *extack);
|
||||
mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct flow_cls_offload *f,
|
||||
struct mlx5_ct_attr *ct_attr,
|
||||
struct netlink_ext_ack *extack);
|
||||
int
|
||||
mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec);
|
||||
|
@ -130,12 +133,15 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr) {}
|
||||
|
||||
static inline int
|
||||
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct flow_cls_offload *f,
|
||||
struct mlx5_ct_attr *ct_attr,
|
||||
struct netlink_ext_ack *extack)
|
||||
mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct flow_cls_offload *f,
|
||||
struct mlx5_ct_attr *ct_attr,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
|
||||
|
|
|
@ -20,6 +20,11 @@ enum mlx5e_icosq_wqe_type {
|
|||
};
|
||||
|
||||
/* General */
|
||||
static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
|
||||
{
|
||||
return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
|
||||
}
|
||||
|
||||
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
|
||||
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
|
||||
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
|
||||
|
|
|
@ -122,7 +122,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
|
|||
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
|
||||
u32 *len, struct xdp_buff *xdp)
|
||||
{
|
||||
struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
|
||||
struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
|
||||
u32 act;
|
||||
int err;
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
|
|||
{
|
||||
struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
|
||||
u32 cqe_bcnt32 = cqe_bcnt;
|
||||
bool consumed;
|
||||
|
||||
/* Check packet size. Note LRO doesn't use linear SKB */
|
||||
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
|
||||
|
@ -51,10 +50,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
|
|||
xsk_buff_dma_sync_for_cpu(xdp);
|
||||
prefetch(xdp->data);
|
||||
|
||||
rcu_read_lock();
|
||||
consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Possible flows:
|
||||
* - XDP_REDIRECT to XSKMAP:
|
||||
* The page is owned by the userspace from now.
|
||||
|
@ -70,7 +65,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
|
|||
* allocated first from the Reuse Ring, so it has enough space.
|
||||
*/
|
||||
|
||||
if (likely(consumed)) {
|
||||
if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
|
||||
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
|
||||
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
|
||||
return NULL; /* page/packet was consumed by XDP */
|
||||
|
@ -88,7 +83,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
|
|||
u32 cqe_bcnt)
|
||||
{
|
||||
struct xdp_buff *xdp = wi->di->xsk;
|
||||
bool consumed;
|
||||
|
||||
/* wi->offset is not used in this function, because xdp->data and the
|
||||
* DMA address point directly to the necessary place. Furthermore, the
|
||||
|
@ -107,11 +101,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (likely(consumed))
|
||||
if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
|
||||
return NULL; /* page/packet was consumed by XDP */
|
||||
|
||||
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
|
||||
|
|
|
@ -106,8 +106,7 @@ err_free_cparam:
|
|||
void mlx5e_close_xsk(struct mlx5e_channel *c)
|
||||
{
|
||||
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
|
||||
napi_synchronize(&c->napi);
|
||||
synchronize_rcu(); /* Sync with the XSK wakeup. */
|
||||
synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
|
||||
|
||||
mlx5e_close_rq(&c->xskrq);
|
||||
mlx5e_close_cq(&c->xskrq.cq);
|
||||
|
|
|
@ -234,7 +234,7 @@ mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx)
|
|||
|
||||
/* Re-sync */
|
||||
/* Runs in work context */
|
||||
static struct mlx5_wqe_ctrl_seg *
|
||||
static int
|
||||
resync_post_get_progress_params(struct mlx5e_icosq *sq,
|
||||
struct mlx5e_ktls_offload_context_rx *priv_rx)
|
||||
{
|
||||
|
@ -258,15 +258,19 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
|
|||
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
buf->priv_rx = priv_rx;
|
||||
|
||||
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
|
||||
|
||||
spin_lock(&sq->channel->async_icosq_lock);
|
||||
|
||||
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
|
||||
spin_unlock(&sq->channel->async_icosq_lock);
|
||||
err = -ENOSPC;
|
||||
goto err_out;
|
||||
goto err_dma_unmap;
|
||||
}
|
||||
|
||||
pi = mlx5e_icosq_get_next_pi(sq, 1);
|
||||
|
@ -294,12 +298,18 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
|
|||
};
|
||||
icosq_fill_wi(sq, pi, &wi);
|
||||
sq->pc++;
|
||||
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
|
||||
spin_unlock(&sq->channel->async_icosq_lock);
|
||||
|
||||
return cseg;
|
||||
return 0;
|
||||
|
||||
err_dma_unmap:
|
||||
dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
|
||||
err_free:
|
||||
kfree(buf);
|
||||
err_out:
|
||||
priv_rx->stats->tls_resync_req_skip++;
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Function is called with elevated refcount.
|
||||
|
@ -309,10 +319,8 @@ static void resync_handle_work(struct work_struct *work)
|
|||
{
|
||||
struct mlx5e_ktls_offload_context_rx *priv_rx;
|
||||
struct mlx5e_ktls_rx_resync_ctx *resync;
|
||||
struct mlx5_wqe_ctrl_seg *cseg;
|
||||
struct mlx5e_channel *c;
|
||||
struct mlx5e_icosq *sq;
|
||||
struct mlx5_wq_cyc *wq;
|
||||
|
||||
resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work);
|
||||
priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
|
||||
|
@ -324,18 +332,9 @@ static void resync_handle_work(struct work_struct *work)
|
|||
|
||||
c = resync->priv->channels.c[priv_rx->rxq];
|
||||
sq = &c->async_icosq;
|
||||
wq = &sq->wq;
|
||||
|
||||
spin_lock(&c->async_icosq_lock);
|
||||
|
||||
cseg = resync_post_get_progress_params(sq, priv_rx);
|
||||
if (IS_ERR(cseg)) {
|
||||
if (resync_post_get_progress_params(sq, priv_rx))
|
||||
refcount_dec(&resync->refcnt);
|
||||
goto unlock;
|
||||
}
|
||||
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
|
||||
unlock:
|
||||
spin_unlock(&c->async_icosq_lock);
|
||||
}
|
||||
|
||||
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
|
||||
|
@ -386,16 +385,17 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
|
|||
struct mlx5e_ktls_offload_context_rx *priv_rx;
|
||||
struct mlx5e_ktls_rx_resync_ctx *resync;
|
||||
u8 tracker_state, auth_state, *ctx;
|
||||
struct device *dev;
|
||||
u32 hw_seq;
|
||||
|
||||
priv_rx = buf->priv_rx;
|
||||
resync = &priv_rx->resync;
|
||||
|
||||
dev = resync->priv->mdev->device;
|
||||
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
|
||||
goto out;
|
||||
|
||||
dma_sync_single_for_cpu(resync->priv->mdev->device, buf->dma_addr,
|
||||
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
|
||||
dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
ctx = buf->progress.ctx;
|
||||
tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
|
||||
|
@ -411,6 +411,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
|
|||
priv_rx->stats->tls_resync_req_end++;
|
||||
out:
|
||||
refcount_dec(&resync->refcnt);
|
||||
dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
|
@ -659,7 +660,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
|
|||
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
|
||||
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
|
||||
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
|
||||
napi_synchronize(&priv->channels.c[priv_rx->rxq]->napi);
|
||||
synchronize_rcu(); /* Sync with NAPI */
|
||||
if (!cancel_work_sync(&priv_rx->rule.work))
|
||||
/* completion is needed, as the priv_rx in the add flow
|
||||
* is maintained on the wqe info (wi), not on the socket.
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
#include <net/sock.h>
|
||||
|
||||
#include "en.h"
|
||||
#include "accel/tls.h"
|
||||
#include "fpga/sdk.h"
|
||||
#include "en_accel/tls.h"
|
||||
|
||||
|
@ -51,9 +50,14 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
|
|||
|
||||
#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
|
||||
|
||||
static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
|
||||
}
|
||||
|
||||
int mlx5e_tls_get_count(struct mlx5e_priv *priv)
|
||||
{
|
||||
if (!priv->tls)
|
||||
if (!is_tls_atomic_stats(priv))
|
||||
return 0;
|
||||
|
||||
return NUM_TLS_SW_COUNTERS;
|
||||
|
@ -63,7 +67,7 @@ int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
|
|||
{
|
||||
unsigned int i, idx = 0;
|
||||
|
||||
if (!priv->tls)
|
||||
if (!is_tls_atomic_stats(priv))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
|
||||
|
@ -77,7 +81,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
|
|||
{
|
||||
int i, idx = 0;
|
||||
|
||||
if (!priv->tls)
|
||||
if (!is_tls_atomic_stats(priv))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
|
||||
|
|
|
@ -158,16 +158,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
|
|||
mutex_unlock(&priv->state_lock);
|
||||
}
|
||||
|
||||
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--)
|
||||
if (mlx5e_nic_stats_grps[i]->update_stats_mask &
|
||||
MLX5E_NDO_UPDATE_STATS)
|
||||
mlx5e_nic_stats_grps[i]->update_stats(priv);
|
||||
}
|
||||
|
||||
static void mlx5e_update_stats_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
|
||||
|
@ -399,7 +389,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|||
|
||||
if (params->xdp_prog)
|
||||
bpf_prog_inc(params->xdp_prog);
|
||||
rq->xdp_prog = params->xdp_prog;
|
||||
RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
|
||||
|
||||
rq_xdp_ix = rq->ix;
|
||||
if (xsk)
|
||||
|
@ -408,7 +398,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|||
if (err < 0)
|
||||
goto err_rq_wq_destroy;
|
||||
|
||||
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
|
||||
rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
|
||||
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
|
||||
pool_size = 1 << params->log_rq_mtu_frames;
|
||||
|
||||
|
@ -564,8 +554,8 @@ err_free:
|
|||
}
|
||||
|
||||
err_rq_wq_destroy:
|
||||
if (rq->xdp_prog)
|
||||
bpf_prog_put(rq->xdp_prog);
|
||||
if (params->xdp_prog)
|
||||
bpf_prog_put(params->xdp_prog);
|
||||
xdp_rxq_info_unreg(&rq->xdp_rxq);
|
||||
page_pool_destroy(rq->page_pool);
|
||||
mlx5_wq_destroy(&rq->wq_ctrl);
|
||||
|
@ -575,10 +565,16 @@ err_rq_wq_destroy:
|
|||
|
||||
static void mlx5e_free_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5e_channel *c = rq->channel;
|
||||
struct bpf_prog *old_prog = NULL;
|
||||
int i;
|
||||
|
||||
if (rq->xdp_prog)
|
||||
bpf_prog_put(rq->xdp_prog);
|
||||
/* drop_rq has neither channel nor xdp_prog. */
|
||||
if (c)
|
||||
old_prog = rcu_dereference_protected(rq->xdp_prog,
|
||||
lockdep_is_held(&c->priv->state_lock));
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
|
||||
switch (rq->wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
|
@ -867,7 +863,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
|
|||
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
|
||||
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
|
||||
synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
|
||||
}
|
||||
|
||||
void mlx5e_close_rq(struct mlx5e_rq *rq)
|
||||
|
@ -1312,12 +1308,10 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq)
|
|||
|
||||
static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
|
||||
{
|
||||
struct mlx5e_channel *c = sq->channel;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
|
||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
||||
/* prevent netif_tx_wake_queue */
|
||||
napi_synchronize(&c->napi);
|
||||
synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
|
||||
|
||||
mlx5e_tx_disable_queue(sq->txq);
|
||||
|
||||
|
@ -1392,10 +1386,8 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
|
|||
|
||||
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
|
||||
{
|
||||
struct mlx5e_channel *c = icosq->channel;
|
||||
|
||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
|
||||
napi_synchronize(&c->napi);
|
||||
synchronize_rcu(); /* Sync with NAPI. */
|
||||
}
|
||||
|
||||
void mlx5e_close_icosq(struct mlx5e_icosq *sq)
|
||||
|
@ -1474,7 +1466,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
|
|||
struct mlx5e_channel *c = sq->channel;
|
||||
|
||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
||||
napi_synchronize(&c->napi);
|
||||
synchronize_rcu(); /* Sync with NAPI. */
|
||||
|
||||
mlx5e_destroy_sq(c->mdev, sq->sqn);
|
||||
mlx5e_free_xdpsq_descs(sq);
|
||||
|
@ -3567,6 +3559,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
|
|||
|
||||
s->rx_packets += rq_stats->packets + xskrq_stats->packets;
|
||||
s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
|
||||
s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets;
|
||||
|
||||
for (j = 0; j < priv->max_opened_tc; j++) {
|
||||
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
|
||||
|
@ -3582,7 +3575,6 @@ void
|
|||
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
|
||||
/* In switchdev mode, monitor counters doesn't monitor
|
||||
|
@ -3617,12 +3609,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|||
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
|
||||
stats->rx_frame_errors;
|
||||
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
|
||||
|
||||
/* vport multicast also counts packets that are dropped due to steering
|
||||
* or rx out of buffer
|
||||
*/
|
||||
stats->multicast =
|
||||
VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
|
||||
}
|
||||
|
||||
static void mlx5e_set_rx_mode(struct net_device *dev)
|
||||
|
@ -4330,6 +4316,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog *old_prog;
|
||||
|
||||
old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
|
||||
lockdep_is_held(&rq->channel->priv->state_lock));
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
}
|
||||
|
||||
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
@ -4388,29 +4384,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
|
|||
*/
|
||||
for (i = 0; i < priv->channels.num; i++) {
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
|
||||
|
||||
clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
|
||||
if (xsk_open)
|
||||
clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
|
||||
napi_synchronize(&c->napi);
|
||||
/* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
|
||||
|
||||
old_prog = xchg(&c->rq.xdp_prog, prog);
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
|
||||
if (xsk_open) {
|
||||
old_prog = xchg(&c->xskrq.xdp_prog, prog);
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
}
|
||||
|
||||
set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
|
||||
if (xsk_open)
|
||||
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
|
||||
/* napi_schedule in case we have missed anything */
|
||||
napi_schedule(&c->napi);
|
||||
mlx5e_rq_replace_xdp_prog(&c->rq, prog);
|
||||
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
|
||||
mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
|
||||
}
|
||||
|
||||
unlock:
|
||||
|
@ -5200,7 +5177,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
|
|||
.enable = mlx5e_nic_enable,
|
||||
.disable = mlx5e_nic_disable,
|
||||
.update_rx = mlx5e_update_nic_rx,
|
||||
.update_stats = mlx5e_update_ndo_stats,
|
||||
.update_stats = mlx5e_stats_update_ndo_stats,
|
||||
.update_carrier = mlx5e_update_carrier,
|
||||
.rx_handlers = &mlx5e_rx_handlers_nic,
|
||||
.max_tc = MLX5E_MAX_NUM_TC,
|
||||
|
|
|
@ -1171,7 +1171,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
|
|||
.cleanup_tx = mlx5e_cleanup_rep_tx,
|
||||
.enable = mlx5e_rep_enable,
|
||||
.update_rx = mlx5e_update_rep_rx,
|
||||
.update_stats = mlx5e_update_ndo_stats,
|
||||
.update_stats = mlx5e_stats_update_ndo_stats,
|
||||
.rx_handlers = &mlx5e_rx_handlers_rep,
|
||||
.max_tc = 1,
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
|
||||
|
@ -1189,7 +1189,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
|
|||
.enable = mlx5e_uplink_rep_enable,
|
||||
.disable = mlx5e_uplink_rep_disable,
|
||||
.update_rx = mlx5e_update_rep_rx,
|
||||
.update_stats = mlx5e_update_ndo_stats,
|
||||
.update_stats = mlx5e_stats_update_ndo_stats,
|
||||
.update_carrier = mlx5e_update_carrier,
|
||||
.rx_handlers = &mlx5e_rx_handlers_rep,
|
||||
.max_tc = MLX5E_MAX_NUM_TC,
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
#include "en/xsk/rx.h"
|
||||
#include "en/health.h"
|
||||
#include "en/params.h"
|
||||
#include "en/txrx.h"
|
||||
|
||||
static struct sk_buff *
|
||||
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
||||
|
@ -1080,6 +1081,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
|||
mlx5e_enable_ecn(rq, skb);
|
||||
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
|
||||
if (unlikely(mlx5e_skb_is_multicast(skb)))
|
||||
stats->mcast_packets++;
|
||||
}
|
||||
|
||||
static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
|
||||
|
@ -1132,7 +1136,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
|||
struct xdp_buff xdp;
|
||||
struct sk_buff *skb;
|
||||
void *va, *data;
|
||||
bool consumed;
|
||||
u32 frag_size;
|
||||
|
||||
va = page_address(di->page) + wi->offset;
|
||||
|
@ -1144,11 +1147,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
|||
prefetchw(va); /* xdp_frame data area */
|
||||
prefetch(data);
|
||||
|
||||
rcu_read_lock();
|
||||
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
|
||||
consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp);
|
||||
rcu_read_unlock();
|
||||
if (consumed)
|
||||
if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
|
||||
return NULL; /* page/packet was consumed by XDP */
|
||||
|
||||
rx_headroom = xdp.data - xdp.data_hard_start;
|
||||
|
@ -1438,7 +1438,6 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
|||
struct sk_buff *skb;
|
||||
void *va, *data;
|
||||
u32 frag_size;
|
||||
bool consumed;
|
||||
|
||||
/* Check packet size. Note LRO doesn't use linear SKB */
|
||||
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
|
||||
|
@ -1455,11 +1454,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
|||
prefetchw(va); /* xdp_frame data area */
|
||||
prefetch(data);
|
||||
|
||||
rcu_read_lock();
|
||||
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
|
||||
consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp);
|
||||
rcu_read_unlock();
|
||||
if (consumed) {
|
||||
if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
|
||||
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
|
||||
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
|
||||
return NULL; /* page/packet was consumed by XDP */
|
||||
|
|
|
@ -54,6 +54,18 @@ unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
|
|||
return total;
|
||||
}
|
||||
|
||||
void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
|
||||
const unsigned int num_stats_grps = stats_grps_num(priv);
|
||||
int i;
|
||||
|
||||
for (i = num_stats_grps - 1; i >= 0; i--)
|
||||
if (stats_grps[i]->update_stats &&
|
||||
stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
|
||||
stats_grps[i]->update_stats(priv);
|
||||
}
|
||||
|
||||
void mlx5e_stats_update(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
|
||||
|
|
|
@ -103,6 +103,7 @@ unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
|
|||
void mlx5e_stats_update(struct mlx5e_priv *priv);
|
||||
void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
|
||||
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
|
||||
void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
|
||||
|
||||
/* Concrete NIC Stats */
|
||||
|
||||
|
@ -119,6 +120,7 @@ struct mlx5e_sw_stats {
|
|||
u64 tx_nop;
|
||||
u64 rx_lro_packets;
|
||||
u64 rx_lro_bytes;
|
||||
u64 rx_mcast_packets;
|
||||
u64 rx_ecn_mark;
|
||||
u64 rx_removed_vlan_packets;
|
||||
u64 rx_csum_unnecessary;
|
||||
|
@ -298,6 +300,7 @@ struct mlx5e_rq_stats {
|
|||
u64 csum_none;
|
||||
u64 lro_packets;
|
||||
u64 lro_bytes;
|
||||
u64 mcast_packets;
|
||||
u64 ecn_mark;
|
||||
u64 removed_vlan_packets;
|
||||
u64 xdp_drop;
|
||||
|
|
|
@ -1290,11 +1290,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
|
|||
|
||||
mlx5e_put_flow_tunnel_id(flow);
|
||||
|
||||
if (flow_flag_test(flow, NOT_READY)) {
|
||||
if (flow_flag_test(flow, NOT_READY))
|
||||
remove_unready_flow(flow);
|
||||
kvfree(attr->parse_attr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mlx5e_is_offloaded_flow(flow)) {
|
||||
if (flow_flag_test(flow, SLOW))
|
||||
|
@ -1315,6 +1312,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
|
|||
}
|
||||
kvfree(attr->parse_attr);
|
||||
|
||||
mlx5_tc_ct_match_del(priv, &flow->esw_attr->ct_attr);
|
||||
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||
mlx5e_detach_mod_hdr(priv, flow);
|
||||
|
||||
|
@ -2625,6 +2624,22 @@ static struct mlx5_fields fields[] = {
|
|||
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
|
||||
};
|
||||
|
||||
static unsigned long mask_to_le(unsigned long mask, int size)
|
||||
{
|
||||
__be32 mask_be32;
|
||||
__be16 mask_be16;
|
||||
|
||||
if (size == 32) {
|
||||
mask_be32 = (__force __be32)(mask);
|
||||
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
|
||||
} else if (size == 16) {
|
||||
mask_be32 = (__force __be32)(mask);
|
||||
mask_be16 = *(__be16 *)&mask_be32;
|
||||
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
||||
static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
int namespace,
|
||||
struct pedit_headers_action *hdrs,
|
||||
|
@ -2638,9 +2653,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
|||
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_acts;
|
||||
struct mlx5_fields *f;
|
||||
unsigned long mask;
|
||||
__be32 mask_be32;
|
||||
__be16 mask_be16;
|
||||
unsigned long mask, field_mask;
|
||||
int err;
|
||||
u8 cmd;
|
||||
|
||||
|
@ -2706,14 +2719,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
|||
if (skip)
|
||||
continue;
|
||||
|
||||
if (f->field_bsize == 32) {
|
||||
mask_be32 = (__force __be32)(mask);
|
||||
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
|
||||
} else if (f->field_bsize == 16) {
|
||||
mask_be32 = (__force __be32)(mask);
|
||||
mask_be16 = *(__be16 *)&mask_be32;
|
||||
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
|
||||
}
|
||||
mask = mask_to_le(mask, f->field_bsize);
|
||||
|
||||
first = find_first_bit(&mask, f->field_bsize);
|
||||
next_z = find_next_zero_bit(&mask, f->field_bsize, first);
|
||||
|
@ -2744,9 +2750,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
|||
if (cmd == MLX5_ACTION_TYPE_SET) {
|
||||
int start;
|
||||
|
||||
field_mask = mask_to_le(f->field_mask, f->field_bsize);
|
||||
|
||||
/* if field is bit sized it can start not from first bit */
|
||||
start = find_first_bit((unsigned long *)&f->field_mask,
|
||||
f->field_bsize);
|
||||
start = find_first_bit(&field_mask, f->field_bsize);
|
||||
|
||||
MLX5_SET(set_action_in, action, offset, first - start);
|
||||
/* length is num of bits to be written, zero means length of 32 */
|
||||
|
@ -4402,8 +4409,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
|
|||
goto err_free;
|
||||
|
||||
/* actions validation depends on parsing the ct matches first */
|
||||
err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f,
|
||||
&flow->esw_attr->ct_attr, extack);
|
||||
err = mlx5_tc_ct_match_add(priv, &parse_attr->spec, f,
|
||||
&flow->esw_attr->ct_attr, extack);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
|
|
|
@ -121,13 +121,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
|
|||
struct mlx5e_xdpsq *xsksq = &c->xsksq;
|
||||
struct mlx5e_rq *xskrq = &c->xskrq;
|
||||
struct mlx5e_rq *rq = &c->rq;
|
||||
bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
|
||||
bool aff_change = false;
|
||||
bool busy_xsk = false;
|
||||
bool busy = false;
|
||||
int work_done = 0;
|
||||
bool xsk_open;
|
||||
int i;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
|
||||
|
||||
ch_stats->poll++;
|
||||
|
||||
for (i = 0; i < c->num_tc; i++)
|
||||
|
@ -167,8 +171,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
|
|||
busy |= busy_xsk;
|
||||
|
||||
if (busy) {
|
||||
if (likely(mlx5e_channel_no_affinity_change(c)))
|
||||
return budget;
|
||||
if (likely(mlx5e_channel_no_affinity_change(c))) {
|
||||
work_done = budget;
|
||||
goto out;
|
||||
}
|
||||
ch_stats->aff_change++;
|
||||
aff_change = true;
|
||||
if (budget && work_done == budget)
|
||||
|
@ -176,7 +182,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
|
|||
}
|
||||
|
||||
if (unlikely(!napi_complete_done(napi, work_done)))
|
||||
return work_done;
|
||||
goto out;
|
||||
|
||||
ch_stats->arm++;
|
||||
|
||||
|
@ -203,6 +209,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
|
|||
ch_stats->force_irq++;
|
||||
}
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
|
|
|
@ -1219,36 +1219,38 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
|
|||
}
|
||||
esw->fdb_table.offloads.send_to_vport_grp = g;
|
||||
|
||||
/* create peer esw miss group */
|
||||
memset(flow_group_in, 0, inlen);
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
|
||||
/* create peer esw miss group */
|
||||
memset(flow_group_in, 0, inlen);
|
||||
|
||||
esw_set_flow_group_source_port(esw, flow_group_in);
|
||||
esw_set_flow_group_source_port(esw, flow_group_in);
|
||||
|
||||
if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
|
||||
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
|
||||
flow_group_in,
|
||||
match_criteria);
|
||||
if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
|
||||
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
|
||||
flow_group_in,
|
||||
match_criteria);
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
|
||||
misc_parameters.source_eswitch_owner_vhca_id);
|
||||
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
|
||||
misc_parameters.source_eswitch_owner_vhca_id);
|
||||
|
||||
MLX5_SET(create_flow_group_in, flow_group_in,
|
||||
source_eswitch_owner_vhca_id_valid, 1);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in,
|
||||
source_eswitch_owner_vhca_id_valid, 1);
|
||||
}
|
||||
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
|
||||
ix + esw->total_vports - 1);
|
||||
ix += esw->total_vports;
|
||||
|
||||
g = mlx5_create_flow_group(fdb, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
|
||||
goto peer_miss_err;
|
||||
}
|
||||
esw->fdb_table.offloads.peer_miss_grp = g;
|
||||
}
|
||||
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
|
||||
ix + esw->total_vports - 1);
|
||||
ix += esw->total_vports;
|
||||
|
||||
g = mlx5_create_flow_group(fdb, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
|
||||
goto peer_miss_err;
|
||||
}
|
||||
esw->fdb_table.offloads.peer_miss_grp = g;
|
||||
|
||||
/* create miss group */
|
||||
memset(flow_group_in, 0, inlen);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
|
||||
|
@ -1281,7 +1283,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
|
|||
miss_rule_err:
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
|
||||
miss_err:
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
|
||||
peer_miss_err:
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
|
||||
send_vport_err:
|
||||
|
@ -1305,7 +1308,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
|
|||
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
|
||||
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
|
||||
|
||||
mlx5_esw_chains_destroy(esw);
|
||||
|
|
|
@ -654,7 +654,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
|
|||
fte->action = *flow_act;
|
||||
fte->flow_context = spec->flow_context;
|
||||
|
||||
tree_init_node(&fte->node, NULL, del_sw_fte);
|
||||
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
|
||||
|
||||
return fte;
|
||||
}
|
||||
|
@ -1792,7 +1792,6 @@ skip_search:
|
|||
up_write_ref_node(&g->node, false);
|
||||
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
|
||||
up_write_ref_node(&fte->node, false);
|
||||
tree_put_node(&fte->node, false);
|
||||
return rule;
|
||||
}
|
||||
rule = ERR_PTR(-ENOENT);
|
||||
|
@ -1891,7 +1890,6 @@ search_again_locked:
|
|||
up_write_ref_node(&g->node, false);
|
||||
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
|
||||
up_write_ref_node(&fte->node, false);
|
||||
tree_put_node(&fte->node, false);
|
||||
tree_put_node(&g->node, false);
|
||||
return rule;
|
||||
|
||||
|
@ -2001,7 +1999,9 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
|
|||
up_write_ref_node(&fte->node, false);
|
||||
} else {
|
||||
del_hw_fte(&fte->node);
|
||||
up_write(&fte->node.lock);
|
||||
/* Avoid double call to del_hw_fte */
|
||||
fte->node.del_hw_func = NULL;
|
||||
up_write_ref_node(&fte->node, false);
|
||||
tree_put_node(&fte->node, false);
|
||||
}
|
||||
kfree(handle);
|
||||
|
|
Loading…
Reference in New Issue