mm/page_isolation: fix potential warning from user

It makes sense to call the WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE)
from start_isolate_page_range(), but should avoid triggering it from
userspace, i.e, from is_mem_section_removable() because it could crash
the system by a non-root user if warn_on_panic is set.

While at it, simplify the code a bit by removing an unnecessary jump
label.

Link: http://lkml.kernel.org/r/20200120163915.1469-1-cai@lca.pw
Signed-off-by: Qian Cai <cai@lca.pw>
Suggested-by: Michal Hocko <mhocko@kernel.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Qian Cai 2020-01-30 22:15:01 -08:00 committed by Linus Torvalds
parent 4a55c0474a
commit 3d680bdf60
2 changed files with 15 additions and 14 deletions

View File

@ -8214,7 +8214,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
if (is_migrate_cma(migratetype)) if (is_migrate_cma(migratetype))
return NULL; return NULL;
goto unmovable; return page;
} }
for (; iter < pageblock_nr_pages; iter++) { for (; iter < pageblock_nr_pages; iter++) {
@ -8224,7 +8224,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
page = pfn_to_page(pfn + iter); page = pfn_to_page(pfn + iter);
if (PageReserved(page)) if (PageReserved(page))
goto unmovable; return page;
/* /*
* If the zone is movable and we have ruled out all reserved * If the zone is movable and we have ruled out all reserved
@ -8244,7 +8244,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
unsigned int skip_pages; unsigned int skip_pages;
if (!hugepage_migration_supported(page_hstate(head))) if (!hugepage_migration_supported(page_hstate(head)))
goto unmovable; return page;
skip_pages = compound_nr(head) - (page - head); skip_pages = compound_nr(head) - (page - head);
iter += skip_pages - 1; iter += skip_pages - 1;
@ -8286,12 +8286,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
* is set to both of a memory hole page and a _used_ kernel * is set to both of a memory hole page and a _used_ kernel
* page at boot. * page at boot.
*/ */
goto unmovable; return page;
} }
return NULL; return NULL;
unmovable:
WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
return pfn_to_page(pfn + iter);
} }
#ifdef CONFIG_CONTIG_ALLOC #ifdef CONFIG_CONTIG_ALLOC

View File

@ -54,14 +54,18 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
out: out:
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
if (!ret) if (!ret) {
drain_all_pages(zone); drain_all_pages(zone);
else if ((isol_flags & REPORT_FAILURE) && unmovable) } else {
/* WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
* printk() with zone->lock held will guarantee to trigger a
* lockdep splat, so defer it here. if ((isol_flags & REPORT_FAILURE) && unmovable)
*/ /*
dump_page(unmovable, "unmovable page"); * printk() with zone->lock held will likely trigger a
* lockdep splat, so defer it here.
*/
dump_page(unmovable, "unmovable page");
}
return ret; return ret;
} }