rapidio: convert get_user_pages() --> pin_user_pages()
This code was using get_user_pages_fast(), in a "Case 2" scenario (DMA/RDMA), using the categorization from [1]. That means that it's time to convert the get_user_pages_fast() + put_page() calls to pin_user_pages_fast() + unpin_user_pages() calls. There is some helpful background in [2]: basically, this is a small part of fixing a long-standing disconnect between pinning pages, and file systems' use of those pages. [1] Documentation/core-api/pin_user_pages.rst [2] "Explicit pinning of user-space pages": https://lwn.net/Articles/807108/ Signed-off-by: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Matt Porter <mporter@kernel.crashing.org> Cc: Alexandre Bounine <alex.bou9@gmail.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Dan Carpenter <dan.carpenter@oracle.com> Link: http://lkml.kernel.org/r/20200517235620.205225-3-jhubbard@nvidia.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e1c3cdb26a
commit
67446283d8
|
@ -572,14 +572,12 @@ static void dma_req_free(struct kref *ref)
|
|||
struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
|
||||
refcount);
|
||||
struct mport_cdev_priv *priv = req->priv;
|
||||
unsigned int i;
|
||||
|
||||
dma_unmap_sg(req->dmach->device->dev,
|
||||
req->sgt.sgl, req->sgt.nents, req->dir);
|
||||
sg_free_table(&req->sgt);
|
||||
if (req->page_list) {
|
||||
for (i = 0; i < req->nr_pages; i++)
|
||||
put_page(req->page_list[i]);
|
||||
unpin_user_pages(req->page_list, req->nr_pages);
|
||||
kfree(req->page_list);
|
||||
}
|
||||
|
||||
|
@ -815,7 +813,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|||
struct mport_dma_req *req;
|
||||
struct mport_dev *md = priv->md;
|
||||
struct dma_chan *chan;
|
||||
int i, ret;
|
||||
int ret;
|
||||
int nents;
|
||||
|
||||
if (xfer->length == 0)
|
||||
|
@ -862,7 +860,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|||
goto err_req;
|
||||
}
|
||||
|
||||
pinned = get_user_pages_fast(
|
||||
pinned = pin_user_pages_fast(
|
||||
(unsigned long)xfer->loc_addr & PAGE_MASK,
|
||||
nr_pages,
|
||||
dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
|
||||
|
@ -870,7 +868,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|||
|
||||
if (pinned != nr_pages) {
|
||||
if (pinned < 0) {
|
||||
rmcd_error("get_user_pages_unlocked err=%ld",
|
||||
rmcd_error("pin_user_pages_fast err=%ld",
|
||||
pinned);
|
||||
nr_pages = 0;
|
||||
} else
|
||||
|
@ -951,8 +949,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|||
|
||||
err_pg:
|
||||
if (!req->page_list) {
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
put_page(page_list[i]);
|
||||
unpin_user_pages(page_list, nr_pages);
|
||||
kfree(page_list);
|
||||
}
|
||||
err_req:
|
||||
|
|
Loading…
Reference in New Issue