xen: implement "extra" memory to reserve space for pages not present at boot

When using the e820 map to get the initial pseudo-physical address space,
look for either Xen-provided memory which doesn't lie within an E820
region, or an E820 RAM region which extends beyond the Xen-provided
memory range.

Count these pages, and add them to a new "extra memory" range.  This range
has an E820 RAM range to describe it - so the kernel will allocate page
structures for it - but it is also marked reserved so that the kernel
will not attempt to use it.

The balloon driver can then add this range as a set of currently
ballooned-out pages, which can be used to extend the domain beyond its
original size.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
This commit is contained in:
Jeremy Fitzhardinge 2010-08-30 16:41:02 -07:00
parent 35ae11fd14
commit 42ee1471e9
1 changed files with 27 additions and 2 deletions

View File

@ -34,6 +34,26 @@ extern void xen_sysenter_target(void);
extern void xen_syscall_target(void); extern void xen_syscall_target(void);
extern void xen_syscall32_target(void); extern void xen_syscall32_target(void);
/* Amount of extra memory space we add to the e820 ranges */
phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
static __init void xen_add_extra_mem(unsigned long pages)
{
u64 size = (u64)pages * PAGE_SIZE;
if (!pages)
return;
e820_add_region(xen_extra_mem_start + xen_extra_mem_size, size, E820_RAM);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
reserve_early(xen_extra_mem_start + xen_extra_mem_size,
xen_extra_mem_start + xen_extra_mem_size + size,
"XEN EXTRA");
xen_extra_mem_size += size;
}
static unsigned long __init xen_release_chunk(phys_addr_t start_addr, static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
phys_addr_t end_addr) phys_addr_t end_addr)
{ {
@ -105,7 +125,6 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
/** /**
* machine_specific_memory_setup - Hook for machine specific memory setup. * machine_specific_memory_setup - Hook for machine specific memory setup.
**/ **/
char * __init xen_memory_setup(void) char * __init xen_memory_setup(void)
{ {
static struct e820entry map[E820MAX] __initdata; static struct e820entry map[E820MAX] __initdata;
@ -114,6 +133,7 @@ char * __init xen_memory_setup(void)
unsigned long long mem_end; unsigned long long mem_end;
int rc; int rc;
struct xen_memory_map memmap; struct xen_memory_map memmap;
unsigned long extra_pages = 0;
int i; int i;
max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
@ -135,6 +155,7 @@ char * __init xen_memory_setup(void)
BUG_ON(rc); BUG_ON(rc);
e820.nr_map = 0; e820.nr_map = 0;
xen_extra_mem_start = mem_end;
for (i = 0; i < memmap.nr_entries; i++) { for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end = map[i].addr + map[i].size; unsigned long long end = map[i].addr + map[i].size;
if (map[i].type == E820_RAM) { if (map[i].type == E820_RAM) {
@ -143,6 +164,8 @@ char * __init xen_memory_setup(void)
if (end > mem_end) { if (end > mem_end) {
/* Truncate region to max_mem. */ /* Truncate region to max_mem. */
map[i].size -= end - mem_end; map[i].size -= end - mem_end;
extra_pages += PFN_DOWN(end - mem_end);
} }
} }
if (map[i].size > 0) if (map[i].size > 0)
@ -169,7 +192,9 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
xen_return_unused_memory(xen_start_info->nr_pages, &e820); extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
xen_add_extra_mem(extra_pages);
return "Xen"; return "Xen";
} }