mm: gup: use get_user_pages_unlocked within get_user_pages_fast

This allows the get_user_pages_fast slow path to release the mmap_sem
before blocking.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Peter Feiner <pfeiner@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrea Arcangeli 2015-02-11 15:27:23 -08:00 committed by Linus Torvalds
parent 0fd71a56f4
commit a7b780750e
7 changed files with 16 additions and 33 deletions

View File

@ -301,11 +301,9 @@ slow_irqon:
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
(end - start) >> PAGE_SHIFT,
write, 0, pages, NULL);
up_read(&mm->mmap_sem);
ret = get_user_pages_unlocked(current, mm, start,
(end - start) >> PAGE_SHIFT,
write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {

View File

@ -235,10 +235,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
nr_pages - nr, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
ret = get_user_pages_unlocked(current, mm, start,
nr_pages - nr, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0)
ret = (ret < 0) ? nr : ret + nr;

View File

@ -257,10 +257,8 @@ slow_irqon:
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
ret = get_user_pages_unlocked(current, mm, start,
(end - start) >> PAGE_SHIFT, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {

View File

@ -249,10 +249,8 @@ slow:
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
ret = get_user_pages_unlocked(current, mm, start,
(end - start) >> PAGE_SHIFT, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {

View File

@ -388,10 +388,9 @@ slow_irqon:
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
ret = get_user_pages_unlocked(current, mm, start,
(end - start) >> PAGE_SHIFT,
write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {

View File

@ -1243,10 +1243,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
nr_pages - nr, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
ret = get_user_pages_unlocked(current, mm, start,
nr_pages - nr, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {

View File

@ -240,14 +240,8 @@ int __weak get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
{
struct mm_struct *mm = current->mm;
int ret;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start, nr_pages,
write, 0, pages, NULL);
up_read(&mm->mmap_sem);
return ret;
return get_user_pages_unlocked(current, mm, start, nr_pages,
write, 0, pages);
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);