mm: migrate: remove unused mode argument
migrate_page_move_mapping() doesn't use the mode argument. Remove it and update callers accordingly. Link: http://lkml.kernel.org/r/20190508210301.8472-1-keith.busch@intel.com Signed-off-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9a84503042
commit
371096949f
2
fs/aio.c
2
fs/aio.c
|
@ -425,7 +425,7 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
|
|||
BUG_ON(PageWriteback(old));
|
||||
get_page(new);
|
||||
|
||||
rc = migrate_page_move_mapping(mapping, new, old, mode, 1);
|
||||
rc = migrate_page_move_mapping(mapping, new, old, 1);
|
||||
if (rc != MIGRATEPAGE_SUCCESS) {
|
||||
put_page(new);
|
||||
goto out_unlock;
|
||||
|
|
|
@ -2919,7 +2919,7 @@ int f2fs_migrate_page(struct address_space *mapping,
|
|||
/* one extra reference was held for atomic_write page */
|
||||
extra_count = atomic_written ? 1 : 0;
|
||||
rc = migrate_page_move_mapping(mapping, newpage,
|
||||
page, mode, extra_count);
|
||||
page, extra_count);
|
||||
if (rc != MIGRATEPAGE_SUCCESS) {
|
||||
if (atomic_written)
|
||||
mutex_unlock(&fi->inmem_lock);
|
||||
|
|
|
@ -566,7 +566,7 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
|
||||
ret = migrate_page_move_mapping(mapping, newpage, page, 0);
|
||||
if (ret != MIGRATEPAGE_SUCCESS)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -1470,7 +1470,7 @@ static int ubifs_migrate_page(struct address_space *mapping,
|
|||
{
|
||||
int rc;
|
||||
|
||||
rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
|
||||
rc = migrate_page_move_mapping(mapping, newpage, page, 0);
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
|
|
|
@ -77,8 +77,7 @@ extern void migrate_page_copy(struct page *newpage, struct page *page);
|
|||
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||
struct page *newpage, struct page *page);
|
||||
extern int migrate_page_move_mapping(struct address_space *mapping,
|
||||
struct page *newpage, struct page *page, enum migrate_mode mode,
|
||||
int extra_count);
|
||||
struct page *newpage, struct page *page, int extra_count);
|
||||
#else
|
||||
|
||||
static inline void putback_movable_pages(struct list_head *l) {}
|
||||
|
|
|
@ -394,8 +394,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
|
|||
* 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
|
||||
*/
|
||||
int migrate_page_move_mapping(struct address_space *mapping,
|
||||
struct page *newpage, struct page *page, enum migrate_mode mode,
|
||||
int extra_count)
|
||||
struct page *newpage, struct page *page, int extra_count)
|
||||
{
|
||||
XA_STATE(xas, &mapping->i_pages, page_index(page));
|
||||
struct zone *oldzone, *newzone;
|
||||
|
@ -681,7 +680,7 @@ int migrate_page(struct address_space *mapping,
|
|||
|
||||
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
|
||||
|
||||
rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
|
||||
rc = migrate_page_move_mapping(mapping, newpage, page, 0);
|
||||
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
return rc;
|
||||
|
@ -780,7 +779,7 @@ recheck_buffers:
|
|||
}
|
||||
}
|
||||
|
||||
rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
|
||||
rc = migrate_page_move_mapping(mapping, newpage, page, 0);
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
goto unlock_buffers;
|
||||
|
||||
|
|
Loading…
Reference in New Issue