RDMA/mlx5: Remove order from mlx5_ib_cont_pages()
Only alloc_mr_from_cache() needs order and can trivially compute it, so lift it to the one call site and remove the NULL arguments. Link: https://lore.kernel.org/r/20201026131936.1335664-6-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
f0093fb1a7
commit
95741ee3f0
|
@ -747,7 +747,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
|||
goto err_umem;
|
||||
|
||||
mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
|
||||
&ncont, NULL);
|
||||
&ncont);
|
||||
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
|
||||
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
|
||||
|
||||
|
@ -1155,8 +1155,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
|
|||
return err;
|
||||
}
|
||||
|
||||
mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
|
||||
npas, NULL);
|
||||
mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift, npas);
|
||||
|
||||
cq->resize_umem = umem;
|
||||
*cqe_size = ucmd.cqe_size;
|
||||
|
|
|
@ -2083,7 +2083,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
|
|||
|
||||
mlx5_ib_cont_pages(obj->umem, obj->umem->address,
|
||||
MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
|
||||
&obj->page_shift, &obj->ncont, NULL);
|
||||
&obj->page_shift, &obj->ncont);
|
||||
|
||||
if (!npages) {
|
||||
ib_umem_release(obj->umem);
|
||||
|
|
|
@ -42,12 +42,11 @@
|
|||
* @count: number of PAGE_SIZE pages covered by umem
|
||||
* @shift: page shift for the compound pages found in the region
|
||||
* @ncont: number of compund pages
|
||||
* @order: log2 of the number of compound pages
|
||||
*/
|
||||
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
||||
unsigned long max_page_shift,
|
||||
int *count, int *shift,
|
||||
int *ncont, int *order)
|
||||
int *ncont)
|
||||
{
|
||||
unsigned long tmp;
|
||||
unsigned long m;
|
||||
|
@ -63,8 +62,6 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
|||
*shift = odp->page_shift;
|
||||
*ncont = ib_umem_odp_num_pages(odp);
|
||||
*count = *ncont << (*shift - PAGE_SHIFT);
|
||||
if (order)
|
||||
*order = ilog2(roundup_pow_of_two(*count));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -95,17 +92,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
|||
|
||||
if (i) {
|
||||
m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
|
||||
|
||||
if (order)
|
||||
*order = ilog2(roundup_pow_of_two(i) >> m);
|
||||
|
||||
*ncont = DIV_ROUND_UP(i, (1 << m));
|
||||
} else {
|
||||
m = 0;
|
||||
|
||||
if (order)
|
||||
*order = 0;
|
||||
|
||||
*ncont = 0;
|
||||
}
|
||||
*shift = PAGE_SHIFT + m;
|
||||
|
|
|
@ -1232,7 +1232,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
|
|||
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
||||
unsigned long max_page_shift,
|
||||
int *count, int *shift,
|
||||
int *ncont, int *order);
|
||||
int *ncont);
|
||||
void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
|
||||
int page_shift, size_t offset, size_t num_pages,
|
||||
__be64 *pas, int access_flags);
|
||||
|
|
|
@ -960,11 +960,11 @@ static struct mlx5_ib_mr *alloc_mr_from_cache(struct ib_pd *pd,
|
|||
int npages;
|
||||
int page_shift;
|
||||
int ncont;
|
||||
int order;
|
||||
|
||||
mlx5_ib_cont_pages(umem, iova, MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
|
||||
&page_shift, &ncont, &order);
|
||||
ent = mr_cache_ent_from_order(dev, order);
|
||||
&page_shift, &ncont);
|
||||
ent = mr_cache_ent_from_order(dev, order_base_2(ib_umem_num_dma_blocks(
|
||||
umem, 1UL << page_shift)));
|
||||
if (!ent)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
|
@ -1165,7 +1165,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mlx5_ib_cont_pages(umem, iova, MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
|
||||
&page_shift, &ncont, NULL);
|
||||
&page_shift, &ncont);
|
||||
|
||||
mr->page_shift = page_shift;
|
||||
mr->ibmr.pd = pd;
|
||||
|
|
|
@ -791,7 +791,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
|||
return PTR_ERR(*umem);
|
||||
}
|
||||
|
||||
mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
|
||||
mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont);
|
||||
|
||||
err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
|
||||
if (err) {
|
||||
|
@ -850,7 +850,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
}
|
||||
|
||||
mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
|
||||
&ncont, NULL);
|
||||
&ncont);
|
||||
err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
|
||||
&rwq->rq_page_offset);
|
||||
if (err) {
|
||||
|
|
|
@ -88,7 +88,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
|
|||
}
|
||||
|
||||
mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
|
||||
&page_shift, &ncont, NULL);
|
||||
&page_shift, &ncont);
|
||||
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
|
||||
&offset);
|
||||
if (err) {
|
||||
|
|
Loading…
Reference in New Issue