genwqe: convert get_user_pages() --> pin_user_pages()

This code was using get_user_pages*(), in a "Case 2" scenario
(DMA/RDMA), using the categorization from [1]. That means that it's
time to convert the get_user_pages*() + put_page() calls to
pin_user_pages*() + unpin_user_pages() calls.

There is some helpful background in [2]: basically, this is a small
part of fixing a long-standing disconnect between pinning pages, and
file systems' use of those pages.

[1] Documentation/core-api/pin_user_pages.rst

[2] "Explicit pinning of user-space pages":
    https://lwn.net/Articles/807108/

Cc: Frank Haverkamp <haver@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Link: https://lore.kernel.org/r/20200518015237.1568940-1-jhubbard@nvidia.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
John Hubbard 2020-05-17 18:52:37 -07:00 committed by Greg Kroah-Hartman
parent 5459ceedb3
commit ddae1423bc
1 changed files with 9 additions and 33 deletions

View File

@ -514,30 +514,6 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
return rc;
}
/**
* genwqe_free_user_pages() - Give pinned pages back
*
* Documentation of get_user_pages is in mm/gup.c:
*
* If the page is written to, set_page_dirty (or set_page_dirty_lock,
* as appropriate) must be called after the page is finished with, and
* before put_page is called.
*/
static int genwqe_free_user_pages(struct page **page_list,
unsigned int nr_pages, int dirty)
{
unsigned int i;
for (i = 0; i < nr_pages; i++) {
if (page_list[i] != NULL) {
if (dirty)
set_page_dirty_lock(page_list[i]);
put_page(page_list[i]);
}
}
return 0;
}
/**
* genwqe_user_vmap() - Map user-space memory to virtual kernel memory
* @cd: pointer to genwqe device
@ -597,18 +573,18 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
/* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
rc = pin_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
m->write ? FOLL_WRITE : 0, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_get_user_pages;
goto fail_pin_user_pages;
/* assumption: get_user_pages can be killed by signals. */
/* assumption: pin_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
genwqe_free_user_pages(m->page_list, rc, m->write);
unpin_user_pages_dirty_lock(m->page_list, rc, m->write);
rc = -EFAULT;
goto fail_get_user_pages;
goto fail_pin_user_pages;
}
rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
@ -618,9 +594,9 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
return 0;
fail_free_user_pages:
genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write);
fail_get_user_pages:
fail_pin_user_pages:
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;
@ -650,8 +626,8 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
unpin_user_pages_dirty_lock(m->page_list, m->nr_pages,
m->write);
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;