RDMA/mlx4: Use ib_umem_num_dma_blocks()
For the calls linked to mlx4_ib_umem_calc_optimal_mtt_size() use ib_umem_num_dma_blocks() inside the function, it is just some weird static default. All other places are just using it with PAGE_SIZE, switch to ib_umem_num_dma_blocks(). As this is the last call site, remove ib_umem_num_count(). Link: https://lore.kernel.org/r/15-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
87aebd3f8c
commit
81655d3c4a
|
@ -350,18 +350,6 @@ void ib_umem_release(struct ib_umem *umem)
|
|||
}
|
||||
EXPORT_SYMBOL(ib_umem_release);
|
||||
|
||||
int ib_umem_page_count(struct ib_umem *umem)
|
||||
{
|
||||
int i, n = 0;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
|
||||
n += sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
|
||||
return n;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_umem_page_count);
|
||||
|
||||
/*
|
||||
* Copy from the given ib_umem's pages to the given buffer.
|
||||
*
|
||||
|
|
|
@ -149,7 +149,6 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
|
|||
if (IS_ERR(*umem))
|
||||
return PTR_ERR(*umem);
|
||||
|
||||
n = ib_umem_page_count(*umem);
|
||||
shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
|
||||
err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
|
||||
|
||||
|
|
|
@ -271,6 +271,8 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
|
|||
u64 total_len = 0;
|
||||
int i;
|
||||
|
||||
*num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
|
||||
/*
|
||||
* Initialization - save the first chunk start as the
|
||||
|
@ -421,7 +423,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
goto err_free;
|
||||
}
|
||||
|
||||
n = ib_umem_page_count(mr->umem);
|
||||
shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
|
||||
|
||||
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
|
||||
|
@ -511,7 +512,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
|||
mmr->umem = NULL;
|
||||
goto release_mpt_entry;
|
||||
}
|
||||
n = ib_umem_page_count(mmr->umem);
|
||||
n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
|
||||
shift = PAGE_SHIFT;
|
||||
|
||||
err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
|
||||
|
|
|
@ -922,7 +922,6 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
|
|||
goto err;
|
||||
}
|
||||
|
||||
n = ib_umem_page_count(qp->umem);
|
||||
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
|
||||
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
|
||||
|
||||
|
@ -1117,7 +1116,6 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
|
|||
goto err;
|
||||
}
|
||||
|
||||
n = ib_umem_page_count(qp->umem);
|
||||
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
|
||||
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
|
||||
|
||||
|
|
|
@ -115,8 +115,9 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
|
|||
if (IS_ERR(srq->umem))
|
||||
return PTR_ERR(srq->umem);
|
||||
|
||||
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
|
||||
PAGE_SHIFT, &srq->mtt);
|
||||
err = mlx4_mtt_init(
|
||||
dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
|
||||
PAGE_SHIFT, &srq->mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
|
|
|
@ -74,7 +74,6 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
|
|||
struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
|
||||
size_t size, int access);
|
||||
void ib_umem_release(struct ib_umem *umem);
|
||||
int ib_umem_page_count(struct ib_umem *umem);
|
||||
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
|
||||
size_t length);
|
||||
unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
|
||||
|
@ -92,7 +91,6 @@ static inline struct ib_umem *ib_umem_get(struct ib_device *device,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
static inline void ib_umem_release(struct ib_umem *umem) { }
|
||||
static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
|
||||
static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
|
||||
size_t length) {
|
||||
return -EINVAL;
|
||||
|
|
Loading…
Reference in New Issue