mm/sparse.c: introduce a new function clear_subsection_map()

Factor out the code which clear subsection map of one memory region from
section_deactivate() into clear_subsection_map().

And also add helper function is_subsection_map_empty() to check if the
current subsection map is empty or not.

Signed-off-by: Baoquan He <bhe@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Link: http://lkml.kernel.org/r/20200312124414.439-3-bhe@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Baoquan He 2020-04-06 20:07:03 -07:00 committed by Linus Torvalds
parent 5d87255cad
commit 37bc15020a
1 changed files with 23 additions and 8 deletions

View File

@ -705,15 +705,11 @@ static void free_map_bootmem(struct page *memmap)
} }
#endif /* CONFIG_SPARSEMEM_VMEMMAP */ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
static void section_deactivate(unsigned long pfn, unsigned long nr_pages, static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
struct vmem_altmap *altmap)
{ {
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
struct mem_section *ms = __pfn_to_section(pfn); struct mem_section *ms = __pfn_to_section(pfn);
bool section_is_early = early_section(ms);
struct page *memmap = NULL;
bool empty;
unsigned long *subsection_map = ms->usage unsigned long *subsection_map = ms->usage
? &ms->usage->subsection_map[0] : NULL; ? &ms->usage->subsection_map[0] : NULL;
@ -724,8 +720,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
"section already deactivated (%#lx + %ld)\n", "section already deactivated (%#lx + %ld)\n",
pfn, nr_pages)) pfn, nr_pages))
return; return -EINVAL;
bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
return 0;
}
static bool is_subsection_map_empty(struct mem_section *ms)
{
return bitmap_empty(&ms->usage->subsection_map[0],
SUBSECTIONS_PER_SECTION);
}
static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{
struct mem_section *ms = __pfn_to_section(pfn);
bool section_is_early = early_section(ms);
struct page *memmap = NULL;
bool empty;
if (clear_subsection_map(pfn, nr_pages))
return;
/* /*
* There are 3 cases to handle across two configurations * There are 3 cases to handle across two configurations
* (SPARSEMEM_VMEMMAP={y,n}): * (SPARSEMEM_VMEMMAP={y,n}):
@ -743,8 +759,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
* *
* For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
*/ */
bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); empty = is_subsection_map_empty(ms);
empty = bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION);
if (empty) { if (empty) {
unsigned long section_nr = pfn_to_section_nr(pfn); unsigned long section_nr = pfn_to_section_nr(pfn);