mm/memory_hotplug: simplify page offlining
We make sure that we cannot have any memory holes right at the beginning of offline_pages(). We no longer need walk_system_ram_range() and can call test_pages_isolated() and __offline_isolated_pages() directly. offlined_pages always corresponds to nr_pages, so we can simplify that. [akpm@linux-foundation.org: patch conflict resolution] Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Oscar Salvador <osalvador@suse.de> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> Cc: Baoquan He <bhe@redhat.com> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Cc: Charan Teja Reddy <charante@codeaurora.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michel Lespinasse <walken@google.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Link: https://lkml.kernel.org/r/20200819175957.28465-4-david@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4986fac160
commit
0a1a9a0008
|
@ -1384,28 +1384,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark all sections offline and remove all free pages from the buddy. */
|
|
||||||
static int
|
|
||||||
offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
|
|
||||||
void *data)
|
|
||||||
{
|
|
||||||
unsigned long *offlined_pages = (unsigned long *)data;
|
|
||||||
|
|
||||||
*offlined_pages += __offline_isolated_pages(start, start + nr_pages);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check all pages in range, recorded as memory resource, are isolated.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
|
|
||||||
void *data)
|
|
||||||
{
|
|
||||||
return test_pages_isolated(start_pfn, start_pfn + nr_pages,
|
|
||||||
MEMORY_OFFLINE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init cmdline_parse_movable_node(char *p)
|
static int __init cmdline_parse_movable_node(char *p)
|
||||||
{
|
{
|
||||||
movable_node_enabled = true;
|
movable_node_enabled = true;
|
||||||
|
@ -1492,7 +1470,7 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
|
||||||
int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
||||||
{
|
{
|
||||||
const unsigned long end_pfn = start_pfn + nr_pages;
|
const unsigned long end_pfn = start_pfn + nr_pages;
|
||||||
unsigned long pfn, system_ram_pages = 0, offlined_pages = 0;
|
unsigned long pfn, system_ram_pages = 0;
|
||||||
int ret, node, nr_isolate_pageblock;
|
int ret, node, nr_isolate_pageblock;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
|
@ -1590,9 +1568,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
||||||
reason = "failure to dissolve huge pages";
|
reason = "failure to dissolve huge pages";
|
||||||
goto failed_removal_isolated;
|
goto failed_removal_isolated;
|
||||||
}
|
}
|
||||||
/* check again */
|
|
||||||
ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
|
|
||||||
NULL, check_pages_isolated_cb);
|
|
||||||
/*
|
/*
|
||||||
* per-cpu pages are drained in start_isolate_page_range, but if
|
* per-cpu pages are drained in start_isolate_page_range, but if
|
||||||
* there are still pages that are not free, make sure that we
|
* there are still pages that are not free, make sure that we
|
||||||
|
@ -1605,15 +1581,15 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
||||||
* because has_unmovable_pages explicitly checks for
|
* because has_unmovable_pages explicitly checks for
|
||||||
* PageBuddy on freed pages on other zones.
|
* PageBuddy on freed pages on other zones.
|
||||||
*/
|
*/
|
||||||
|
ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE);
|
||||||
if (ret)
|
if (ret)
|
||||||
drain_all_pages(zone);
|
drain_all_pages(zone);
|
||||||
} while (ret);
|
} while (ret);
|
||||||
|
|
||||||
/* Ok, all of our target is isolated.
|
/* Mark all sections offline and remove free pages from the buddy. */
|
||||||
We cannot do rollback at this point. */
|
__offline_isolated_pages(start_pfn, end_pfn);
|
||||||
walk_system_ram_range(start_pfn, end_pfn - start_pfn,
|
pr_info("Offlined Pages %ld\n", nr_pages);
|
||||||
&offlined_pages, offline_isolated_pages_cb);
|
|
||||||
pr_info("Offlined Pages %ld\n", offlined_pages);
|
|
||||||
/*
|
/*
|
||||||
* Onlining will reset pagetype flags and makes migrate type
|
* Onlining will reset pagetype flags and makes migrate type
|
||||||
* MOVABLE, so just need to decrease the number of isolated
|
* MOVABLE, so just need to decrease the number of isolated
|
||||||
|
@ -1624,11 +1600,11 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
||||||
spin_unlock_irqrestore(&zone->lock, flags);
|
spin_unlock_irqrestore(&zone->lock, flags);
|
||||||
|
|
||||||
/* removal success */
|
/* removal success */
|
||||||
adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
|
adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
|
||||||
zone->present_pages -= offlined_pages;
|
zone->present_pages -= nr_pages;
|
||||||
|
|
||||||
pgdat_resize_lock(zone->zone_pgdat, &flags);
|
pgdat_resize_lock(zone->zone_pgdat, &flags);
|
||||||
zone->zone_pgdat->node_present_pages -= offlined_pages;
|
zone->zone_pgdat->node_present_pages -= nr_pages;
|
||||||
pgdat_resize_unlock(zone->zone_pgdat, &flags);
|
pgdat_resize_unlock(zone->zone_pgdat, &flags);
|
||||||
|
|
||||||
init_per_zone_wmark_min();
|
init_per_zone_wmark_min();
|
||||||
|
|
Loading…
Reference in New Issue