RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB
When ODP is enabled with IB_ACCESS_HUGETLB then the required pages
should be calculated based on the extent of the MR, which is rounded
to the nearest huge page alignment.
Fixes: d2183c6f19
("RDMA/umem: Move page_shift from ib_umem to ib_odp_umem")
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Link: https://lore.kernel.org/r/20190815083834.9245-5-leon@kernel.org
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
d1abaeb3be
commit
27b7fb1ab7
|
@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
|
||||||
|
|
||||||
int ib_umem_page_count(struct ib_umem *umem)
|
int ib_umem_page_count(struct ib_umem *umem)
|
||||||
{
|
{
|
||||||
int i;
|
int i, n = 0;
|
||||||
int n;
|
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
|
|
||||||
if (umem->is_odp)
|
|
||||||
return ib_umem_num_pages(umem);
|
|
||||||
|
|
||||||
n = 0;
|
|
||||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
|
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
|
||||||
n += sg_dma_len(sg) >> PAGE_SHIFT;
|
n += sg_dma_len(sg) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
|
|
@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
||||||
int entry;
|
int entry;
|
||||||
|
|
||||||
if (umem->is_odp) {
|
if (umem->is_odp) {
|
||||||
unsigned int page_shift = to_ib_umem_odp(umem)->page_shift;
|
struct ib_umem_odp *odp = to_ib_umem_odp(umem);
|
||||||
|
unsigned int page_shift = odp->page_shift;
|
||||||
|
|
||||||
*ncont = ib_umem_page_count(umem);
|
*ncont = ib_umem_odp_num_pages(odp);
|
||||||
*count = *ncont << (page_shift - PAGE_SHIFT);
|
*count = *ncont << (page_shift - PAGE_SHIFT);
|
||||||
*shift = page_shift;
|
*shift = page_shift;
|
||||||
if (order)
|
if (order)
|
||||||
|
|
Loading…
Reference in New Issue