RDMA/umem: Refactor exit paths in ib_umem_get
Simplify exit paths in ib_umem_get to use the standard goto unwind pattern. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
40ddacf2dd
commit
1215cb7c88
|
@ -91,7 +91,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
int i;
|
int i;
|
||||||
unsigned long dma_attrs = 0;
|
unsigned long dma_attrs = 0;
|
||||||
struct scatterlist *sg, *sg_list_start;
|
struct scatterlist *sg, *sg_list_start;
|
||||||
int need_release = 0;
|
|
||||||
unsigned int gup_flags = FOLL_WRITE;
|
unsigned int gup_flags = FOLL_WRITE;
|
||||||
|
|
||||||
if (dmasync)
|
if (dmasync)
|
||||||
|
@ -120,10 +119,8 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
|
|
||||||
if (access & IB_ACCESS_ON_DEMAND) {
|
if (access & IB_ACCESS_ON_DEMAND) {
|
||||||
ret = ib_umem_odp_get(context, umem, access);
|
ret = ib_umem_odp_get(context, umem, access);
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree(umem);
|
goto umem_kfree;
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
|
||||||
return umem;
|
return umem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,8 +131,8 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
|
|
||||||
page_list = (struct page **) __get_free_page(GFP_KERNEL);
|
page_list = (struct page **) __get_free_page(GFP_KERNEL);
|
||||||
if (!page_list) {
|
if (!page_list) {
|
||||||
kfree(umem);
|
ret = -ENOMEM;
|
||||||
return ERR_PTR(-ENOMEM);
|
goto umem_kfree;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -155,7 +152,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
|
if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto vma;
|
||||||
}
|
}
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
|
|
||||||
|
@ -163,17 +160,16 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
|
|
||||||
if (npages == 0 || npages > UINT_MAX) {
|
if (npages == 0 || npages > UINT_MAX) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
|
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto vma;
|
||||||
|
|
||||||
if (!umem->writable)
|
if (!umem->writable)
|
||||||
gup_flags |= FOLL_FORCE;
|
gup_flags |= FOLL_FORCE;
|
||||||
|
|
||||||
need_release = 1;
|
|
||||||
sg_list_start = umem->sg_head.sgl;
|
sg_list_start = umem->sg_head.sgl;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
down_read(¤t->mm->mmap_sem);
|
||||||
|
@ -184,7 +180,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
gup_flags, page_list, vma_list);
|
gup_flags, page_list, vma_list);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
up_read(¤t->mm->mmap_sem);
|
||||||
goto out;
|
goto umem_release;
|
||||||
}
|
}
|
||||||
|
|
||||||
umem->npages += ret;
|
umem->npages += ret;
|
||||||
|
@ -211,26 +207,26 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
|
|
||||||
if (!umem->nmap) {
|
if (!umem->nmap) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto umem_release;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
umem_release:
|
||||||
|
__ib_umem_release(context->device, umem, 0);
|
||||||
|
vma:
|
||||||
|
down_write(¤t->mm->mmap_sem);
|
||||||
|
current->mm->pinned_vm -= ib_umem_num_pages(umem);
|
||||||
|
up_write(¤t->mm->mmap_sem);
|
||||||
out:
|
out:
|
||||||
if (ret < 0) {
|
|
||||||
down_write(¤t->mm->mmap_sem);
|
|
||||||
current->mm->pinned_vm -= ib_umem_num_pages(umem);
|
|
||||||
up_write(¤t->mm->mmap_sem);
|
|
||||||
if (need_release)
|
|
||||||
__ib_umem_release(context->device, umem, 0);
|
|
||||||
kfree(umem);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vma_list)
|
if (vma_list)
|
||||||
free_page((unsigned long) vma_list);
|
free_page((unsigned long) vma_list);
|
||||||
free_page((unsigned long) page_list);
|
free_page((unsigned long) page_list);
|
||||||
|
umem_kfree:
|
||||||
return ret < 0 ? ERR_PTR(ret) : umem;
|
if (ret)
|
||||||
|
kfree(umem);
|
||||||
|
return ret ? ERR_PTR(ret) : umem;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_umem_get);
|
EXPORT_SYMBOL(ib_umem_get);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue