IB: Allow calls to ib_umem_get from kernel ULPs

So far the assumption was that ib_umem_get() and ib_umem_odp_get()
are called from flows that start in UVERBS and therefore has a user
context. This assumption restricts flows that are initiated by ULPs
and need the service that ib_umem_get() provides.

This patch changes ib_umem_get() and ib_umem_odp_get() to get IB device
directly by relying on the fact that both UVERBS and ULPs sets that
field correctly.

Reviewed-by: Guy Levi <guyle@mellanox.com>
Signed-off-by: Moni Shoua <monis@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
This commit is contained in:
Moni Shoua 2020-01-15 14:43:31 +02:00 committed by Leon Romanovsky
parent b3a987b026
commit c320e527e1
34 changed files with 85 additions and 103 deletions

View File

@ -181,15 +181,14 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
/** /**
* ib_umem_get - Pin and DMA map userspace memory. * ib_umem_get - Pin and DMA map userspace memory.
* *
* @udata: userspace context to pin memory for * @device: IB device to connect UMEM
* @addr: userspace virtual address to start at * @addr: userspace virtual address to start at
* @size: length of region to pin * @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned * @access: IB_ACCESS_xxx flags for memory being pinned
*/ */
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access) size_t size, int access)
{ {
struct ib_ucontext *context;
struct ib_umem *umem; struct ib_umem *umem;
struct page **page_list; struct page **page_list;
unsigned long lock_limit; unsigned long lock_limit;
@ -201,14 +200,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct scatterlist *sg; struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE; unsigned int gup_flags = FOLL_WRITE;
if (!udata)
return ERR_PTR(-EIO);
context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
->context;
if (!context)
return ERR_PTR(-EIO);
/* /*
* If the combination of the addr and size requested for this memory * If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error. * region causes an integer overflow, return error.
@ -226,7 +217,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
umem = kzalloc(sizeof(*umem), GFP_KERNEL); umem = kzalloc(sizeof(*umem), GFP_KERNEL);
if (!umem) if (!umem)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
umem->ibdev = context->device; umem->ibdev = device;
umem->length = size; umem->length = size;
umem->address = addr; umem->address = addr;
umem->writable = ib_access_writable(access); umem->writable = ib_access_writable(access);
@ -281,7 +272,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
npages -= ret; npages -= ret;
sg = ib_umem_add_sg_table(sg, page_list, ret, sg = ib_umem_add_sg_table(sg, page_list, ret,
dma_get_max_seg_size(context->device->dma_device), dma_get_max_seg_size(device->dma_device),
&umem->sg_nents); &umem->sg_nents);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
@ -289,7 +280,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
sg_mark_end(sg); sg_mark_end(sg);
umem->nmap = ib_dma_map_sg(context->device, umem->nmap = ib_dma_map_sg(device,
umem->sg_head.sgl, umem->sg_head.sgl,
umem->sg_nents, umem->sg_nents,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
@ -303,7 +294,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
goto out; goto out;
umem_release: umem_release:
__ib_umem_release(context->device, umem, 0); __ib_umem_release(device, umem, 0);
vma: vma:
atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm); atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out: out:

View File

@ -110,15 +110,12 @@ out_page_list:
* They exist only to hold the per_mm reference to help the driver create * They exist only to hold the per_mm reference to help the driver create
* children umems. * children umems.
* *
* @udata: udata from the syscall being used to create the umem * @device: IB device to create UMEM
* @access: ib_reg_mr access flags * @access: ib_reg_mr access flags
*/ */
struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
int access) int access)
{ {
struct ib_ucontext *context =
container_of(udata, struct uverbs_attr_bundle, driver_udata)
->context;
struct ib_umem *umem; struct ib_umem *umem;
struct ib_umem_odp *umem_odp; struct ib_umem_odp *umem_odp;
int ret; int ret;
@ -126,14 +123,11 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
if (access & IB_ACCESS_HUGETLB) if (access & IB_ACCESS_HUGETLB)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (!context)
return ERR_PTR(-EIO);
umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL); umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
if (!umem_odp) if (!umem_odp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
umem = &umem_odp->umem; umem = &umem_odp->umem;
umem->ibdev = context->device; umem->ibdev = device;
umem->writable = ib_access_writable(access); umem->writable = ib_access_writable(access);
umem->owning_mm = current->mm; umem->owning_mm = current->mm;
umem_odp->is_implicit_odp = 1; umem_odp->is_implicit_odp = 1;
@ -201,7 +195,7 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
/** /**
* ib_umem_odp_get - Create a umem_odp for a userspace va * ib_umem_odp_get - Create a umem_odp for a userspace va
* *
* @udata: userspace context to pin memory for * @device: IB device struct to get UMEM
* @addr: userspace virtual address to start at * @addr: userspace virtual address to start at
* @size: length of region to pin * @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned * @access: IB_ACCESS_xxx flags for memory being pinned
@ -210,23 +204,14 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
* pinning, instead, stores the mm for future page fault handling in * pinning, instead, stores the mm for future page fault handling in
* conjunction with MMU notifiers. * conjunction with MMU notifiers.
*/ */
struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
size_t size, int access, unsigned long addr, size_t size, int access,
const struct mmu_interval_notifier_ops *ops) const struct mmu_interval_notifier_ops *ops)
{ {
struct ib_umem_odp *umem_odp; struct ib_umem_odp *umem_odp;
struct ib_ucontext *context;
struct mm_struct *mm; struct mm_struct *mm;
int ret; int ret;
if (!udata)
return ERR_PTR(-EIO);
context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
->context;
if (!context)
return ERR_PTR(-EIO);
if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND))) if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -234,7 +219,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
if (!umem_odp) if (!umem_odp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
umem_odp->umem.ibdev = context->device; umem_odp->umem.ibdev = device;
umem_odp->umem.length = size; umem_odp->umem.length = size;
umem_odp->umem.address = addr; umem_odp->umem.address = addr;
umem_odp->umem.writable = ib_access_writable(access); umem_odp->umem.writable = ib_access_writable(access);

View File

@ -837,7 +837,8 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * psn_sz); bytes += (qplib_qp->sq.max_wqe * psn_sz);
} }
bytes = PAGE_ALIGN(bytes); bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE); umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) if (IS_ERR(umem))
return PTR_ERR(umem); return PTR_ERR(umem);
@ -850,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (!qp->qplib_qp.srq) { if (!qp->qplib_qp.srq) {
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes); bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.qprva, bytes, umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) if (IS_ERR(umem))
goto rqfail; goto rqfail;
@ -1304,7 +1305,8 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes); bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE); umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) if (IS_ERR(umem))
return PTR_ERR(umem); return PTR_ERR(umem);
@ -2545,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto fail; goto fail;
} }
cq->umem = ib_umem_get(udata, req.cq_va, cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
entries * sizeof(struct cq_base), entries * sizeof(struct cq_base),
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) { if (IS_ERR(cq->umem)) {
@ -3514,7 +3516,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */ /* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey; mr->ib_mr.rkey = mr->qplib_mr.rkey;
umem = ib_umem_get(udata, start, length, mr_access_flags); umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem"); dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT; rc = -EFAULT;

View File

@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp; mhp->rhp = rhp;
mhp->umem = ib_umem_get(udata, start, length, acc); mhp->umem = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(mhp->umem)) if (IS_ERR(mhp->umem))
goto err_free_skb; goto err_free_skb;

View File

@ -1384,7 +1384,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out; goto err_out;
} }
mr->umem = ib_umem_get(udata, start, length, access_flags); mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem); err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev, ibdev_dbg(&dev->ibdev,

View File

@ -163,7 +163,7 @@ static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
u32 npages; u32 npages;
int ret; int ret;
*umem = ib_umem_get(udata, ucmd.buf_addr, buf->size, *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size,
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);

View File

@ -31,7 +31,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
refcount_set(&page->refcount, 1); refcount_set(&page->refcount, 1);
page->user_virt = page_addr; page->user_virt = page_addr;
page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0); page->umem = ib_umem_get(context->ibucontext.device, page_addr,
PAGE_SIZE, 0);
if (IS_ERR(page->umem)) { if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem); ret = PTR_ERR(page->umem);
kfree(page); kfree(page);

View File

@ -1145,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(udata, start, length, access_flags); mr->umem = ib_umem_get(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem); ret = PTR_ERR(mr->umem);
goto err_free; goto err_free;
@ -1230,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
} }
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
mr->umem = ib_umem_get(udata, start, length, mr_access_flags); mr->umem = ib_umem_get(ibmr->device, start, length, mr_access_flags);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem); ret = PTR_ERR(mr->umem);
mr->umem = NULL; mr->umem = NULL;

View File

@ -744,7 +744,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_alloc_rq_inline_buf; goto err_alloc_rq_inline_buf;
} }
hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr, hr_qp->umem = ib_umem_get(ib_pd->device, ucmd.buf_addr,
hr_qp->buff_size, 0); hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) { if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n"); dev_err(dev, "ib_umem_get error for create qp\n");

View File

@ -186,7 +186,8 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT; return -EFAULT;
srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0); srq->umem =
ib_umem_get(srq->ibsrq.device, ucmd.buf_addr, srq_buf_size, 0);
if (IS_ERR(srq->umem)) if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem); return PTR_ERR(srq->umem);
@ -205,7 +206,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
goto err_user_srq_mtt; goto err_user_srq_mtt;
/* config index queue BA */ /* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr, srq->idx_que.umem = ib_umem_get(srq->ibsrq.device, ucmd.que_addr,
srq->idx_que.buf_size, 0); srq->idx_que.buf_size, 0);
if (IS_ERR(srq->idx_que.umem)) { if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n"); dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");

View File

@ -1761,7 +1761,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE) if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
region = ib_umem_get(udata, start, length, acc); region = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(region)) if (IS_ERR(region))
return (struct ib_mr *)region; return (struct ib_mr *)region;

View File

@ -144,7 +144,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
int shift; int shift;
int n; int n;
*umem = ib_umem_get(udata, buf_addr, cqe * cqe_size, *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);

View File

@ -64,7 +64,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
page->user_virt = (virt & PAGE_MASK); page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0; page->refcnt = 0;
page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0); page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
PAGE_SIZE, 0);
if (IS_ERR(page->umem)) { if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem); err = PTR_ERR(page->umem);
kfree(page); kfree(page);

View File

@ -367,7 +367,7 @@ end:
return block_shift; return block_shift;
} }
static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
u64 length, int access_flags) u64 length, int access_flags)
{ {
/* /*
@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
} }
return ib_umem_get(udata, start, length, access_flags); return ib_umem_get(device, start, length, access_flags);
} }
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@ -415,7 +415,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags); mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem); err = PTR_ERR(mr->umem);
goto err_free; goto err_free;
@ -504,7 +504,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem); ib_umem_release(mmr->umem);
mmr->umem = mlx4_get_umem_mr(udata, start, length, mmr->umem = mlx4_get_umem_mr(mr->device, start, length,
mr_access_flags); mr_access_flags);
if (IS_ERR(mmr->umem)) { if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem); err = PTR_ERR(mmr->umem);

View File

@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift); (qp->sq.wqe_cnt << qp->sq.wqe_shift);
qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0); qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) { if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem); err = PTR_ERR(qp->umem);
goto err; goto err;
@ -1110,7 +1110,8 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (err) if (err)
goto err; goto err;
qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0); qp->umem =
ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) { if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem); err = PTR_ERR(qp->umem);
goto err; goto err;

View File

@ -110,7 +110,8 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT; return -EFAULT;
srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0); srq->umem =
ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem); return PTR_ERR(srq->umem);

View File

@ -708,8 +708,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*cqe_size = ucmd.cqe_size; *cqe_size = ucmd.cqe_size;
cq->buf.umem = cq->buf.umem =
ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size, ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
IB_ACCESS_LOCAL_WRITE); entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) { if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem); err = PTR_ERR(cq->buf.umem);
return err; return err;
@ -1108,7 +1108,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
return -EINVAL; return -EINVAL;
umem = ib_umem_get(udata, ucmd.buf_addr, umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries, (size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {

View File

@ -2134,7 +2134,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err) if (err)
return err; return err;
obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access); obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
if (IS_ERR(obj->umem)) if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem); return PTR_ERR(obj->umem);

View File

@ -64,7 +64,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
page->user_virt = (virt & PAGE_MASK); page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0; page->refcnt = 0;
page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0); page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
PAGE_SIZE, 0);
if (IS_ERR(page->umem)) { if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem); err = PTR_ERR(page->umem);
kfree(page); kfree(page);

View File

@ -737,10 +737,9 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev)
return MLX5_MAX_UMR_SHIFT; return MLX5_MAX_UMR_SHIFT;
} }
static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length,
u64 start, u64 length, int access_flags, int access_flags, struct ib_umem **umem, int *npages,
struct ib_umem **umem, int *npages, int *page_shift, int *page_shift, int *ncont, int *order)
int *ncont, int *order)
{ {
struct ib_umem *u; struct ib_umem *u;
@ -749,7 +748,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (access_flags & IB_ACCESS_ON_DEMAND) { if (access_flags & IB_ACCESS_ON_DEMAND) {
struct ib_umem_odp *odp; struct ib_umem_odp *odp;
odp = ib_umem_odp_get(udata, start, length, access_flags, odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
&mlx5_mn_ops); &mlx5_mn_ops);
if (IS_ERR(odp)) { if (IS_ERR(odp)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
@ -765,7 +764,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order) if (order)
*order = ilog2(roundup_pow_of_two(*ncont)); *order = ilog2(roundup_pow_of_two(*ncont));
} else { } else {
u = ib_umem_get(udata, start, length, access_flags); u = ib_umem_get(&dev->ib_dev, start, length, access_flags);
if (IS_ERR(u)) { if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u)); mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u); return PTR_ERR(u);
@ -1257,7 +1256,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr; return &mr->ibmr;
} }
err = mr_umem_get(dev, udata, start, length, access_flags, &umem, err = mr_umem_get(dev, start, length, access_flags, &umem,
&npages, &page_shift, &ncont, &order); &npages, &page_shift, &ncont, &order);
if (err < 0) if (err < 0)
@ -1424,9 +1423,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
flags |= IB_MR_REREG_TRANS; flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
mr->umem = NULL; mr->umem = NULL;
err = mr_umem_get(dev, udata, addr, len, access_flags, err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
&mr->umem, &npages, &page_shift, &ncont, &npages, &page_shift, &ncont, &order);
&order);
if (err) if (err)
goto err; goto err;
} }

View File

@ -497,7 +497,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct mlx5_ib_mr *imr; struct mlx5_ib_mr *imr;
int err; int err;
umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags); umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
if (IS_ERR(umem_odp)) if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp); return ERR_CAST(umem_odp);

View File

@ -749,7 +749,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
{ {
int err; int err;
*umem = ib_umem_get(udata, addr, size, 0); *umem = ib_umem_get(&dev->ib_dev, addr, size, 0);
if (IS_ERR(*umem)) { if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n"); mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem); return PTR_ERR(*umem);
@ -806,7 +806,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr) if (!ucmd->buf_addr)
return -EINVAL; return -EINVAL;
rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0); rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) { if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n"); mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem); err = PTR_ERR(rwq->umem);

View File

@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0); srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) { if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem); err = PTR_ERR(srq->umem);

View File

@ -880,7 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(udata, start, length, acc); mr->umem = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem); err = PTR_ERR(mr->umem);
goto err; goto err;

View File

@ -869,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL); mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) if (!mr)
return ERR_PTR(status); return ERR_PTR(status);
mr->umem = ib_umem_get(udata, start, len, acc); mr->umem = ib_umem_get(ibpd->device, start, len, acc);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
status = -EFAULT; status = -EFAULT;
goto umem_err; goto umem_err;

View File

@ -772,7 +772,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
q->buf_addr = buf_addr; q->buf_addr = buf_addr;
q->buf_len = buf_len; q->buf_len = buf_len;
q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access); q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
if (IS_ERR(q->umem)) { if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n", DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem)); PTR_ERR(q->umem));
@ -1415,8 +1415,7 @@ static int qedr_init_srq_user_params(struct ib_udata *udata,
if (rc) if (rc)
return rc; return rc;
srq->prod_umem = srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
ib_umem_get(udata, ureq->prod_pair_addr,
sizeof(struct rdma_srq_producers), access); sizeof(struct rdma_srq_producers), access);
if (IS_ERR(srq->prod_umem)) { if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl); qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
@ -2839,7 +2838,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER; mr->type = QEDR_MR_USER;
mr->umem = ib_umem_get(udata, start, len, acc); mr->umem = ib_umem_get(ibpd->device, start, len, acc);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
rc = -EFAULT; rc = -EFAULT;
goto err0; goto err0;

View File

@ -135,7 +135,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_cq; goto err_cq;
} }
cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size,
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) { if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem); ret = PTR_ERR(cq->umem);

View File

@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
umem = ib_umem_get(udata, start, length, access_flags); umem = ib_umem_get(pd->device, start, length, access_flags);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n"); "could not get umem for mem region\n");

View File

@ -276,7 +276,8 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!is_srq) { if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */ /* set qp->sq.wqe_cnt, shift, buf_size.. */
qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr, qp->rumem =
ib_umem_get(pd->device, ucmd.rbuf_addr,
ucmd.rbuf_size, 0); ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) { if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem); ret = PTR_ERR(qp->rumem);
@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->srq = to_vsrq(init_attr->srq); qp->srq = to_vsrq(init_attr->srq);
} }
qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr, qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr,
ucmd.sbuf_size, 0); ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) { if (IS_ERR(qp->sumem)) {
if (!is_srq) if (!is_srq)

View File

@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq; goto err_srq;
} }
srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0); srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0);
if (IS_ERR(srq->umem)) { if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem); ret = PTR_ERR(srq->umem);
goto err_srq; goto err_srq;

View File

@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0) if (length == 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
umem = ib_umem_get(udata, start, length, mr_access_flags); umem = ib_umem_get(pd->device, start, length, mr_access_flags);
if (IS_ERR(umem)) if (IS_ERR(umem))
return (void *)umem; return (void *)umem;

View File

@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr; void *vaddr;
int err; int err;
umem = ib_umem_get(udata, start, length, access); umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n", pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem)); (int)PTR_ERR(umem));

View File

@ -69,7 +69,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM #ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access); size_t size, int access);
void ib_umem_release(struct ib_umem *umem); void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem); int ib_umem_page_count(struct ib_umem *umem);
@ -83,7 +83,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
#include <linux/err.h> #include <linux/err.h>
static inline struct ib_umem *ib_umem_get(struct ib_udata *udata, static inline struct ib_umem *ib_umem_get(struct ib_device *device,
unsigned long addr, size_t size, unsigned long addr, size_t size,
int access) int access)
{ {

View File

@ -114,9 +114,9 @@ static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_umem_odp * struct ib_umem_odp *
ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, size_t size, ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
int access, const struct mmu_interval_notifier_ops *ops); int access, const struct mmu_interval_notifier_ops *ops);
struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
int access); int access);
struct ib_umem_odp * struct ib_umem_odp *
ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr, ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
@ -134,7 +134,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline struct ib_umem_odp * static inline struct ib_umem_odp *
ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, size_t size, ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
int access, const struct mmu_interval_notifier_ops *ops) int access, const struct mmu_interval_notifier_ops *ops)
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);