xen: use same main loop for counting and remapping pages

Instead of having two functions for cycling through the E820 map in
order to count to be remapped pages and remap them later, just use one
function with a caller supplied sub-function called for each region to
be processed. This eliminates the possibility of a mismatch between
both loops which showed up in certain configurations.

Suggested-by: Ed Swierk <eswierk@skyportsystems.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
This commit is contained in:
Juergen Gross 2016-05-18 16:44:54 +02:00 committed by David Vrabel
parent f0f393877c
commit dd14be92fb
1 changed files with 26 additions and 39 deletions

View File

@ -393,6 +393,9 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long i = 0; unsigned long i = 0;
unsigned long n = end_pfn - start_pfn; unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0)
remap_pfn = nr_pages;
while (i < n) { while (i < n) {
unsigned long cur_pfn = start_pfn + i; unsigned long cur_pfn = start_pfn + i;
unsigned long left = n - i; unsigned long left = n - i;
@ -438,17 +441,29 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
return remap_pfn; return remap_pfn;
} }
static void __init xen_set_identity_and_remap(unsigned long nr_pages) static unsigned long __init xen_count_remap_pages(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
unsigned long remap_pages)
{
if (start_pfn >= nr_pages)
return remap_pages;
return remap_pages + min(end_pfn, nr_pages) - start_pfn;
}
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
unsigned long nr_pages, unsigned long last_val))
{ {
phys_addr_t start = 0; phys_addr_t start = 0;
unsigned long last_pfn = nr_pages; unsigned long ret_val = 0;
const struct e820entry *entry = xen_e820_map; const struct e820entry *entry = xen_e820_map;
int i; int i;
/* /*
* Combine non-RAM regions and gaps until a RAM region (or the * Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then set the 1:1 map and * end of the map) is reached, then call the provided function
* remap the memory in those non-RAM regions. * to perform its duty on the non-RAM region.
* *
* The combined non-RAM regions are rounded to a whole number * The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1 * of pages so any partial pages are accessible via the 1:1
@ -466,14 +481,13 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages)
end_pfn = PFN_UP(entry->addr); end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) if (start_pfn < end_pfn)
last_pfn = xen_set_identity_and_remap_chunk( ret_val = func(start_pfn, end_pfn, nr_pages,
start_pfn, end_pfn, nr_pages, ret_val);
last_pfn);
start = end; start = end;
} }
} }
pr_info("Released %ld page(s)\n", xen_released_pages); return ret_val;
} }
/* /*
@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(void)
} }
} }
static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
{
unsigned long extra = 0;
unsigned long start_pfn, end_pfn;
const struct e820entry *entry = xen_e820_map;
int i;
end_pfn = 0;
for (i = 0; i < xen_e820_map_entries; i++, entry++) {
start_pfn = PFN_DOWN(entry->addr);
/* Adjacent regions on non-page boundaries handling! */
end_pfn = min(end_pfn, start_pfn);
if (start_pfn >= max_pfn)
return extra + max_pfn - end_pfn;
/* Add any holes in map to result. */
extra += start_pfn - end_pfn;
end_pfn = PFN_UP(entry->addr + entry->size);
end_pfn = min(end_pfn, max_pfn);
if (entry->type != E820_RAM)
extra += end_pfn - start_pfn;
}
return extra;
}
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{ {
struct e820entry *entry; struct e820entry *entry;
@ -804,7 +789,7 @@ char * __init xen_memory_setup(void)
max_pages = xen_get_max_pages(); max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */ /* How many extra pages do we need due to remapping? */
max_pages += xen_count_remap_pages(max_pfn); max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
if (max_pages > max_pfn) if (max_pages > max_pfn)
extra_pages += max_pages - max_pfn; extra_pages += max_pages - max_pfn;
@ -922,7 +907,9 @@ char * __init xen_memory_setup(void)
* Set identity map on non-RAM pages and prepare remapping the * Set identity map on non-RAM pages and prepare remapping the
* underlying RAM. * underlying RAM.
*/ */
xen_set_identity_and_remap(max_pfn); xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages);
return "Xen"; return "Xen";
} }