mm/gup: longterm pin migration cleanup
When pages are longterm pinned, we must migrated them out of movable zone. The function that migrates them has a hidden loop with goto. The loop is to retry on isolation failures, and after successful migration. Make this code better by moving this loop to the caller. Link: https://lkml.kernel.org/r/20210215161349.246722-13-pasha.tatashin@soleen.com Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: James Morris <jmorris@namei.org> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sasha Levin <sashal@kernel.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Tyler Hicks <tyhicks@linux.microsoft.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
24dc20c75f
commit
f68749ec34
93
mm/gup.c
93
mm/gup.c
|
@ -1602,27 +1602,28 @@ struct page *get_dump_page(unsigned long addr)
|
||||||
#endif /* CONFIG_ELF_CORE */
|
#endif /* CONFIG_ELF_CORE */
|
||||||
|
|
||||||
#ifdef CONFIG_MIGRATION
|
#ifdef CONFIG_MIGRATION
|
||||||
static long check_and_migrate_movable_pages(struct mm_struct *mm,
|
/*
|
||||||
unsigned long start,
|
* Check whether all pages are pinnable, if so return number of pages. If some
|
||||||
unsigned long nr_pages,
|
* pages are not pinnable, migrate them, and unpin all pages. Return zero if
|
||||||
|
* pages were migrated, or if some pages were not successfully isolated.
|
||||||
|
* Return negative error if migration fails.
|
||||||
|
*/
|
||||||
|
static long check_and_migrate_movable_pages(unsigned long nr_pages,
|
||||||
struct page **pages,
|
struct page **pages,
|
||||||
struct vm_area_struct **vmas,
|
|
||||||
unsigned int gup_flags)
|
unsigned int gup_flags)
|
||||||
{
|
{
|
||||||
unsigned long i, isolation_error_count;
|
unsigned long i;
|
||||||
bool drain_allow;
|
unsigned long isolation_error_count = 0;
|
||||||
|
bool drain_allow = true;
|
||||||
LIST_HEAD(movable_page_list);
|
LIST_HEAD(movable_page_list);
|
||||||
long ret = nr_pages;
|
long ret = 0;
|
||||||
struct page *prev_head, *head;
|
struct page *prev_head = NULL;
|
||||||
|
struct page *head;
|
||||||
struct migration_target_control mtc = {
|
struct migration_target_control mtc = {
|
||||||
.nid = NUMA_NO_NODE,
|
.nid = NUMA_NO_NODE,
|
||||||
.gfp_mask = GFP_USER | __GFP_NOWARN,
|
.gfp_mask = GFP_USER | __GFP_NOWARN,
|
||||||
};
|
};
|
||||||
|
|
||||||
check_again:
|
|
||||||
prev_head = NULL;
|
|
||||||
isolation_error_count = 0;
|
|
||||||
drain_allow = true;
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_pages; i++) {
|
||||||
head = compound_head(pages[i]);
|
head = compound_head(pages[i]);
|
||||||
if (head == prev_head)
|
if (head == prev_head)
|
||||||
|
@ -1660,47 +1661,27 @@ check_again:
|
||||||
* in the correct zone.
|
* in the correct zone.
|
||||||
*/
|
*/
|
||||||
if (list_empty(&movable_page_list) && !isolation_error_count)
|
if (list_empty(&movable_page_list) && !isolation_error_count)
|
||||||
return ret;
|
return nr_pages;
|
||||||
|
|
||||||
|
if (gup_flags & FOLL_PIN) {
|
||||||
|
unpin_user_pages(pages, nr_pages);
|
||||||
|
} else {
|
||||||
|
for (i = 0; i < nr_pages; i++)
|
||||||
|
put_page(pages[i]);
|
||||||
|
}
|
||||||
if (!list_empty(&movable_page_list)) {
|
if (!list_empty(&movable_page_list)) {
|
||||||
/*
|
|
||||||
* drop the above get_user_pages reference.
|
|
||||||
*/
|
|
||||||
if (gup_flags & FOLL_PIN)
|
|
||||||
unpin_user_pages(pages, nr_pages);
|
|
||||||
else
|
|
||||||
for (i = 0; i < nr_pages; i++)
|
|
||||||
put_page(pages[i]);
|
|
||||||
|
|
||||||
ret = migrate_pages(&movable_page_list, alloc_migration_target,
|
ret = migrate_pages(&movable_page_list, alloc_migration_target,
|
||||||
NULL, (unsigned long)&mtc, MIGRATE_SYNC,
|
NULL, (unsigned long)&mtc, MIGRATE_SYNC,
|
||||||
MR_LONGTERM_PIN);
|
MR_LONGTERM_PIN);
|
||||||
if (ret) {
|
if (ret && !list_empty(&movable_page_list))
|
||||||
if (!list_empty(&movable_page_list))
|
putback_movable_pages(&movable_page_list);
|
||||||
putback_movable_pages(&movable_page_list);
|
|
||||||
return ret > 0 ? -ENOMEM : ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We unpinned pages before migration, pin them again */
|
|
||||||
ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
|
|
||||||
NULL, gup_flags);
|
|
||||||
if (ret <= 0)
|
|
||||||
return ret;
|
|
||||||
nr_pages = ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
return ret > 0 ? -ENOMEM : ret;
|
||||||
* check again because pages were unpinned, and we also might have
|
|
||||||
* had isolation errors and need more pages to migrate.
|
|
||||||
*/
|
|
||||||
goto check_again;
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static long check_and_migrate_movable_pages(struct mm_struct *mm,
|
static long check_and_migrate_movable_pages(unsigned long nr_pages,
|
||||||
unsigned long start,
|
|
||||||
unsigned long nr_pages,
|
|
||||||
struct page **pages,
|
struct page **pages,
|
||||||
struct vm_area_struct **vmas,
|
|
||||||
unsigned int gup_flags)
|
unsigned int gup_flags)
|
||||||
{
|
{
|
||||||
return nr_pages;
|
return nr_pages;
|
||||||
|
@ -1718,22 +1699,22 @@ static long __gup_longterm_locked(struct mm_struct *mm,
|
||||||
struct vm_area_struct **vmas,
|
struct vm_area_struct **vmas,
|
||||||
unsigned int gup_flags)
|
unsigned int gup_flags)
|
||||||
{
|
{
|
||||||
unsigned long flags = 0;
|
unsigned int flags;
|
||||||
long rc;
|
long rc;
|
||||||
|
|
||||||
if (gup_flags & FOLL_LONGTERM)
|
if (!(gup_flags & FOLL_LONGTERM))
|
||||||
flags = memalloc_pin_save();
|
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
|
||||||
|
NULL, gup_flags);
|
||||||
|
flags = memalloc_pin_save();
|
||||||
|
do {
|
||||||
|
rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
|
||||||
|
NULL, gup_flags);
|
||||||
|
if (rc <= 0)
|
||||||
|
break;
|
||||||
|
rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
|
||||||
|
} while (!rc);
|
||||||
|
memalloc_pin_restore(flags);
|
||||||
|
|
||||||
rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL,
|
|
||||||
gup_flags);
|
|
||||||
|
|
||||||
if (gup_flags & FOLL_LONGTERM) {
|
|
||||||
if (rc > 0)
|
|
||||||
rc = check_and_migrate_movable_pages(mm, start, rc,
|
|
||||||
pages, vmas,
|
|
||||||
gup_flags);
|
|
||||||
memalloc_pin_restore(flags);
|
|
||||||
}
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue