RDMA/pvrdma: Use ib_umem_num_dma_blocks() instead of ib_umem_page_count()
This driver always uses PAGE_SIZE. Link: https://lore.kernel.org/r/14-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
b8387f8189
commit
87aebd3f8c
|
@ -142,7 +142,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
goto err_cq;
|
||||
}
|
||||
|
||||
npages = ib_umem_page_count(cq->umem);
|
||||
npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
|
||||
} else {
|
||||
/* One extra page for shared ring state */
|
||||
npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
|
||||
|
|
|
@ -298,9 +298,11 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
|||
goto err_qp;
|
||||
}
|
||||
|
||||
qp->npages_send = ib_umem_page_count(qp->sumem);
|
||||
qp->npages_send =
|
||||
ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE);
|
||||
if (!is_srq)
|
||||
qp->npages_recv = ib_umem_page_count(qp->rumem);
|
||||
qp->npages_recv = ib_umem_num_dma_blocks(
|
||||
qp->rumem, PAGE_SIZE);
|
||||
else
|
||||
qp->npages_recv = 0;
|
||||
qp->npages = qp->npages_send + qp->npages_recv;
|
||||
|
|
|
@ -152,7 +152,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
|
|||
goto err_srq;
|
||||
}
|
||||
|
||||
srq->npages = ib_umem_page_count(srq->umem);
|
||||
srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE);
|
||||
|
||||
if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
|
||||
dev_warn(&dev->pdev->dev,
|
||||
|
|
Loading…
Reference in New Issue