net/mlx5e: Fix broken SKB allocation in HW-GRO

In case the HW doesn't perform header-data split, it will write the whole
packet into the data buffer in the WQ, in this case the SHAMPO CQE handler
couldn't use the header entry to build the SKB, instead it should allocate
a new memory to build the SKB using the function:
mlx5e_skb_from_cqe_mpwrq_nonlinear.

Fixes: f97d5c2a45 ("net/mlx5e: Add handle SHAMPO cqe support")
Signed-off-by: Khalid Manaa <khalidm@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Khalid Manaa 2022-01-26 14:25:55 +02:00 committed by Saeed Mahameed
parent b8d91145ed
commit 7957837b81
1 changed files with 17 additions and 9 deletions

View File

@ -1871,7 +1871,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb; return skb;
} }
static void static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index) struct mlx5_cqe64 *cqe, u16 header_index)
{ {
@ -1895,7 +1895,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
if (unlikely(!skb)) if (unlikely(!skb))
return; return NULL;
/* queue up for recycling/reuse */ /* queue up for recycling/reuse */
page_ref_inc(head->page); page_ref_inc(head->page);
@ -1907,7 +1907,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
ALIGN(head_size, sizeof(long))); ALIGN(head_size, sizeof(long)));
if (unlikely(!skb)) { if (unlikely(!skb)) {
rq->stats->buff_alloc_err++; rq->stats->buff_alloc_err++;
return; return NULL;
} }
prefetchw(skb->data); prefetchw(skb->data);
@ -1918,9 +1918,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb->tail += head_size; skb->tail += head_size;
skb->len += head_size; skb->len += head_size;
} }
rq->hw_gro_data->skb = skb; return skb;
NAPI_GRO_CB(skb)->count = 1;
skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
} }
static void static void
@ -1980,6 +1978,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id); u16 wqe_id = be16_to_cpu(cqe->wqe_id);
u32 page_idx = wqe_offset >> PAGE_SHIFT; u32 page_idx = wqe_offset >> PAGE_SHIFT;
u16 head_size = cqe->shampo.header_size;
struct sk_buff **skb = &rq->hw_gro_data->skb; struct sk_buff **skb = &rq->hw_gro_data->skb;
bool flush = cqe->shampo.flush; bool flush = cqe->shampo.flush;
bool match = cqe->shampo.match; bool match = cqe->shampo.match;
@ -2011,9 +2010,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
} }
if (!*skb) { if (!*skb) {
mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); if (likely(head_size))
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
else
*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
page_idx);
if (unlikely(!*skb)) if (unlikely(!*skb))
goto free_hd_entry; goto free_hd_entry;
NAPI_GRO_CB(*skb)->count = 1;
skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
} else { } else {
NAPI_GRO_CB(*skb)->count++; NAPI_GRO_CB(*skb)->count++;
if (NAPI_GRO_CB(*skb)->count == 2 && if (NAPI_GRO_CB(*skb)->count == 2 &&
@ -2027,8 +2033,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
} }
} }
di = &wi->umr.dma_info[page_idx]; if (likely(head_size)) {
mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); di = &wi->umr.dma_info[page_idx];
mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
if (flush) if (flush)