net/mlx5e: Remove the outer loop when allocating legacy RQ WQEs
Legacy RQ WQEs are allocated in a loop in small batches (8 WQEs). As partial batches are allowed, there is no point to have a loop in a loop, so the outer loop is removed, and the batch size is increased up to the total number of WQEs to allocate, still not smaller than 8. Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
3f5fe0b2e6
commit
0b48223237
|
@ -424,7 +424,7 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
|
|||
mlx5e_free_rx_wqe(rq, wi, false);
|
||||
}
|
||||
|
||||
static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
|
||||
static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
|
||||
int i;
|
||||
|
@ -805,38 +805,33 @@ static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
|||
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
|
||||
int wqe_bulk, count;
|
||||
bool busy = false;
|
||||
u8 wqe_bulk;
|
||||
u16 head;
|
||||
|
||||
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
|
||||
return false;
|
||||
|
||||
wqe_bulk = rq->wqe.info.wqe_bulk;
|
||||
|
||||
if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
|
||||
if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
|
||||
return false;
|
||||
|
||||
if (rq->page_pool)
|
||||
page_pool_nid_changed(rq->page_pool, numa_mem_id());
|
||||
|
||||
do {
|
||||
u16 head = mlx5_wq_cyc_get_head(wq);
|
||||
int count;
|
||||
u8 bulk;
|
||||
wqe_bulk = mlx5_wq_cyc_missing(wq);
|
||||
head = mlx5_wq_cyc_get_head(wq);
|
||||
|
||||
/* Don't allow any newly allocated WQEs to share the same page
|
||||
* with old WQEs that aren't completed yet. Stop earlier.
|
||||
*/
|
||||
bulk = wqe_bulk - ((head + wqe_bulk) & rq->wqe.info.wqe_index_mask);
|
||||
/* Don't allow any newly allocated WQEs to share the same page with old
|
||||
* WQEs that aren't completed yet. Stop earlier.
|
||||
*/
|
||||
wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
|
||||
|
||||
count = mlx5e_alloc_rx_wqes(rq, head, bulk);
|
||||
mlx5_wq_cyc_push_n(wq, count);
|
||||
if (unlikely(count != bulk)) {
|
||||
rq->stats->buff_alloc_err++;
|
||||
busy = true;
|
||||
break;
|
||||
}
|
||||
} while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
|
||||
count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
|
||||
mlx5_wq_cyc_push_n(wq, count);
|
||||
if (unlikely(count != wqe_bulk)) {
|
||||
rq->stats->buff_alloc_err++;
|
||||
busy = true;
|
||||
}
|
||||
|
||||
/* ensure wqes are visible to device before updating doorbell record */
|
||||
dma_wmb();
|
||||
|
|
|
@ -123,7 +123,7 @@ static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq)
|
|||
wq->cur_sz++;
|
||||
}
|
||||
|
||||
static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n)
|
||||
static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u16 n)
|
||||
{
|
||||
wq->wqe_ctr += n;
|
||||
wq->cur_sz += n;
|
||||
|
|
Loading…
Reference in New Issue