drivers/IB,qib: optimize mmap_sem usage

The driver uses mmap_sem for both pinned_vm accounting and
get_user_pages(). Because rdma drivers might want to use gup_longterm() in
the future we still need some sort of mmap_sem serialization (as opposed
to removing it entirely by using gup_fast()). Now that pinned_vm is atomic
the writer lock can therefore be converted to reader.

This also fixes a bug that __qib_get_user_pages was not taking into
account the current value of pinned_vm.

Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Davidlohr Bueso 2019-02-06 09:59:17 -08:00 committed by Jason Gunthorpe
parent 4f564ff3d4
commit 3a2a1e9056
1 changed files with 27 additions and 46 deletions

View File

@ -49,43 +49,6 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
}
}
/*
* Call with current->mm->mmap_sem held.
*/
static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
struct page **p)
{
unsigned long lock_limit;
size_t got;
int ret;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
goto bail;
}
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages_longterm(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
}
atomic64_add(num_pages, &current->mm->pinned_vm);
ret = 0;
goto bail;
bail_release:
__qib_release_user_pages(p, got, 0);
bail:
return ret;
}
/**
* qib_map_page - a safety wrapper around pci_map_page()
*
@ -137,26 +100,44 @@ int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
int qib_get_user_pages(unsigned long start_page, size_t num_pages,
struct page **p)
{
unsigned long locked, lock_limit;
size_t got;
int ret;
down_write(&current->mm->mmap_sem);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
locked = atomic64_add_return(num_pages, &current->mm->pinned_vm);
ret = __qib_get_user_pages(start_page, num_pages, p);
if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
goto bail;
}
up_write(&current->mm->mmap_sem);
down_read(&current->mm->mmap_sem);
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages_longterm(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0) {
up_read(&current->mm->mmap_sem);
goto bail_release;
}
}
up_read(&current->mm->mmap_sem);
return 0;
bail_release:
__qib_release_user_pages(p, got, 0);
bail:
atomic64_sub(num_pages, &current->mm->pinned_vm);
return ret;
}
void qib_release_user_pages(struct page **p, size_t num_pages)
{
if (current->mm) /* during close after signal, mm can be NULL */
down_write(&current->mm->mmap_sem);
__qib_release_user_pages(p, num_pages, 1);
if (current->mm) {
/* during close after signal, mm can be NULL */
if (current->mm)
atomic64_sub(num_pages, &current->mm->pinned_vm);
up_write(&current->mm->mmap_sem);
}
}