mm: migrate: clean up migrate_prep{_local}

The migrate_prep{_local} never fails, so it is pointless to have return
value and check the return value.

Link: https://lkml.kernel.org/r/20201113205359.556831-5-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Song Liu <songliubraving@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Yang Shi 2020-12-14 19:13:13 -08:00 committed by Linus Torvalds
parent c77c5cbafe
commit 236c32eb10
3 changed files with 6 additions and 14 deletions

View File

@ -45,8 +45,8 @@ extern struct page *alloc_migration_target(struct page *page, unsigned long priv
extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
extern void putback_movable_page(struct page *page); extern void putback_movable_page(struct page *page);
extern int migrate_prep(void); extern void migrate_prep(void);
extern int migrate_prep_local(void); extern void migrate_prep_local(void);
extern void migrate_page_states(struct page *newpage, struct page *page); extern void migrate_page_states(struct page *newpage, struct page *page);
extern void migrate_page_copy(struct page *newpage, struct page *page); extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping, extern int migrate_huge_page_move_mapping(struct address_space *mapping,

View File

@ -1114,9 +1114,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
int err; int err;
nodemask_t tmp; nodemask_t tmp;
err = migrate_prep(); migrate_prep();
if (err)
return err;
mmap_read_lock(mm); mmap_read_lock(mm);
@ -1315,9 +1313,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
err = migrate_prep(); migrate_prep();
if (err)
goto mpol_out;
} }
{ {
NODEMASK_SCRATCH(scratch); NODEMASK_SCRATCH(scratch);

View File

@ -62,7 +62,7 @@
* to be migrated using isolate_lru_page(). If scheduling work on other CPUs is * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
* undesirable, use migrate_prep_local() * undesirable, use migrate_prep_local()
*/ */
int migrate_prep(void) void migrate_prep(void)
{ {
/* /*
* Clear the LRU lists so pages can be isolated. * Clear the LRU lists so pages can be isolated.
@ -71,16 +71,12 @@ int migrate_prep(void)
* pages that may be busy. * pages that may be busy.
*/ */
lru_add_drain_all(); lru_add_drain_all();
return 0;
} }
/* Do the necessary work of migrate_prep but not if it involves other CPUs */ /* Do the necessary work of migrate_prep but not if it involves other CPUs */
int migrate_prep_local(void) void migrate_prep_local(void)
{ {
lru_add_drain(); lru_add_drain();
return 0;
} }
int isolate_movable_page(struct page *page, isolate_mode_t mode) int isolate_movable_page(struct page *page, isolate_mode_t mode)