Merge branch 'stable/autoballoon.v5.2' into stable/for-linus-3.5
* stable/autoballoon.v5.2: xen/setup: update VA mapping when releasing memory during setup xen/setup: Combine the two hypercall functions - since they are quite similar. xen/setup: Populate freed MFNs from non-RAM E820 entries and gaps to E820 RAM xen/setup: Only print "Freeing XXX-YYY pfn range: Z pages freed" if Z > 0 xen/p2m: An early bootup variant of set_phys_to_machine xen/p2m: Collapse early_alloc_p2m_middle redundant checks. xen/p2m: Allow alloc_p2m_middle to call reserve_brk depending on argument xen/p2m: Move code around to allow for better re-usage.
This commit is contained in:
commit
4b3451ad13
|
@ -44,6 +44,7 @@ extern unsigned long machine_to_phys_nr;
|
|||
|
||||
extern unsigned long get_phys_to_machine(unsigned long pfn);
|
||||
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
|
||||
extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
|
||||
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
|
||||
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
|
||||
unsigned long pfn_e);
|
||||
|
|
|
@ -1316,7 +1316,6 @@ asmlinkage void __init xen_start_kernel(void)
|
|||
|
||||
xen_raw_console_write("mapping kernel into physical memory\n");
|
||||
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
|
||||
xen_ident_map_ISA();
|
||||
|
||||
/* Allocate and initialize top and mid mfn levels for p2m structure */
|
||||
xen_build_mfn_list_list();
|
||||
|
|
|
@ -1929,29 +1929,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
|
|||
#endif
|
||||
}
|
||||
|
||||
void __init xen_ident_map_ISA(void)
|
||||
{
|
||||
unsigned long pa;
|
||||
|
||||
/*
|
||||
* If we're dom0, then linear map the ISA machine addresses into
|
||||
* the kernel's address space.
|
||||
*/
|
||||
if (!xen_initial_domain())
|
||||
return;
|
||||
|
||||
xen_raw_printk("Xen: setup ISA identity maps\n");
|
||||
|
||||
for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
|
||||
pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
|
||||
|
||||
if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
|
||||
BUG();
|
||||
}
|
||||
|
||||
xen_flush_tlb();
|
||||
}
|
||||
|
||||
static void __init xen_post_allocator_init(void)
|
||||
{
|
||||
pv_mmu_ops.set_pte = xen_set_pte;
|
||||
|
|
|
@ -499,16 +499,18 @@ static bool alloc_p2m(unsigned long pfn)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool __init __early_alloc_p2m(unsigned long pfn)
|
||||
static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
|
||||
{
|
||||
unsigned topidx, mididx, idx;
|
||||
unsigned long *p2m;
|
||||
unsigned long *mid_mfn_p;
|
||||
|
||||
topidx = p2m_top_index(pfn);
|
||||
mididx = p2m_mid_index(pfn);
|
||||
idx = p2m_index(pfn);
|
||||
|
||||
/* Pfff.. No boundary cross-over, lets get out. */
|
||||
if (!idx)
|
||||
if (!idx && check_boundary)
|
||||
return false;
|
||||
|
||||
WARN(p2m_top[topidx][mididx] == p2m_identity,
|
||||
|
@ -522,24 +524,66 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
|
|||
return false;
|
||||
|
||||
/* Boundary cross-over for the edges: */
|
||||
if (idx) {
|
||||
unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
|
||||
unsigned long *mid_mfn_p;
|
||||
p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
p2m_init(p2m);
|
||||
p2m_init(p2m);
|
||||
|
||||
p2m_top[topidx][mididx] = p2m;
|
||||
p2m_top[topidx][mididx] = p2m;
|
||||
|
||||
/* For save/restore we need to MFN of the P2M saved */
|
||||
|
||||
mid_mfn_p = p2m_top_mfn_p[topidx];
|
||||
WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
|
||||
"P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
|
||||
topidx, mididx);
|
||||
mid_mfn_p[mididx] = virt_to_mfn(p2m);
|
||||
/* For save/restore we need to MFN of the P2M saved */
|
||||
|
||||
mid_mfn_p = p2m_top_mfn_p[topidx];
|
||||
WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
|
||||
"P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
|
||||
topidx, mididx);
|
||||
mid_mfn_p[mididx] = virt_to_mfn(p2m);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool __init early_alloc_p2m(unsigned long pfn)
|
||||
{
|
||||
unsigned topidx = p2m_top_index(pfn);
|
||||
unsigned long *mid_mfn_p;
|
||||
unsigned long **mid;
|
||||
|
||||
mid = p2m_top[topidx];
|
||||
mid_mfn_p = p2m_top_mfn_p[topidx];
|
||||
if (mid == p2m_mid_missing) {
|
||||
mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
p2m_mid_init(mid);
|
||||
|
||||
p2m_top[topidx] = mid;
|
||||
|
||||
BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
|
||||
}
|
||||
return idx != 0;
|
||||
/* And the save/restore P2M tables.. */
|
||||
if (mid_mfn_p == p2m_mid_missing_mfn) {
|
||||
mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
|
||||
p2m_mid_mfn_init(mid_mfn_p);
|
||||
|
||||
p2m_top_mfn_p[topidx] = mid_mfn_p;
|
||||
p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
|
||||
/* Note: we don't set mid_mfn_p[midix] here,
|
||||
* look in early_alloc_p2m_middle */
|
||||
}
|
||||
return true;
|
||||
}
|
||||
bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
|
||||
{
|
||||
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
|
||||
if (!early_alloc_p2m(pfn))
|
||||
return false;
|
||||
|
||||
if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
|
||||
return false;
|
||||
|
||||
if (!__set_phys_to_machine(pfn, mfn))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
|
||||
unsigned long pfn_e)
|
||||
|
@ -559,35 +603,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
|
|||
pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
|
||||
pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
|
||||
{
|
||||
unsigned topidx = p2m_top_index(pfn);
|
||||
unsigned long *mid_mfn_p;
|
||||
unsigned long **mid;
|
||||
|
||||
mid = p2m_top[topidx];
|
||||
mid_mfn_p = p2m_top_mfn_p[topidx];
|
||||
if (mid == p2m_mid_missing) {
|
||||
mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
p2m_mid_init(mid);
|
||||
|
||||
p2m_top[topidx] = mid;
|
||||
|
||||
BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
|
||||
}
|
||||
/* And the save/restore P2M tables.. */
|
||||
if (mid_mfn_p == p2m_mid_missing_mfn) {
|
||||
mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
|
||||
p2m_mid_mfn_init(mid_mfn_p);
|
||||
|
||||
p2m_top_mfn_p[topidx] = mid_mfn_p;
|
||||
p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
|
||||
/* Note: we don't set mid_mfn_p[midix] here,
|
||||
* look in __early_alloc_p2m */
|
||||
}
|
||||
WARN_ON(!early_alloc_p2m(pfn));
|
||||
}
|
||||
|
||||
__early_alloc_p2m(pfn_s);
|
||||
__early_alloc_p2m(pfn_e);
|
||||
early_alloc_p2m_middle(pfn_s, true);
|
||||
early_alloc_p2m_middle(pfn_e, true);
|
||||
|
||||
for (pfn = pfn_s; pfn < pfn_e; pfn++)
|
||||
if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include <xen/interface/memory.h>
|
||||
#include <xen/interface/physdev.h>
|
||||
#include <xen/features.h>
|
||||
|
||||
#include "xen-ops.h"
|
||||
#include "vdso.h"
|
||||
|
||||
|
@ -84,8 +83,8 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
|
|||
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
|
||||
}
|
||||
|
||||
static unsigned long __init xen_release_chunk(unsigned long start,
|
||||
unsigned long end)
|
||||
static unsigned long __init xen_do_chunk(unsigned long start,
|
||||
unsigned long end, bool release)
|
||||
{
|
||||
struct xen_memory_reservation reservation = {
|
||||
.address_bits = 0,
|
||||
|
@ -96,30 +95,138 @@ static unsigned long __init xen_release_chunk(unsigned long start,
|
|||
unsigned long pfn;
|
||||
int ret;
|
||||
|
||||
for(pfn = start; pfn < end; pfn++) {
|
||||
for (pfn = start; pfn < end; pfn++) {
|
||||
unsigned long frame;
|
||||
unsigned long mfn = pfn_to_mfn(pfn);
|
||||
|
||||
/* Make sure pfn exists to start with */
|
||||
if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
|
||||
continue;
|
||||
|
||||
set_xen_guest_handle(reservation.extent_start, &mfn);
|
||||
if (release) {
|
||||
/* Make sure pfn exists to start with */
|
||||
if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
|
||||
continue;
|
||||
frame = mfn;
|
||||
} else {
|
||||
if (mfn != INVALID_P2M_ENTRY)
|
||||
continue;
|
||||
frame = pfn;
|
||||
}
|
||||
set_xen_guest_handle(reservation.extent_start, &frame);
|
||||
reservation.nr_extents = 1;
|
||||
|
||||
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
|
||||
ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
|
||||
&reservation);
|
||||
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
|
||||
WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
|
||||
release ? "release" : "populate", pfn, ret);
|
||||
|
||||
if (ret == 1) {
|
||||
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
|
||||
if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
|
||||
if (release)
|
||||
break;
|
||||
set_xen_guest_handle(reservation.extent_start, &frame);
|
||||
reservation.nr_extents = 1;
|
||||
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
|
||||
&reservation);
|
||||
break;
|
||||
}
|
||||
len++;
|
||||
}
|
||||
} else
|
||||
break;
|
||||
}
|
||||
printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
|
||||
start, end, len);
|
||||
if (len)
|
||||
printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
|
||||
release ? "Freeing" : "Populating",
|
||||
start, end, len,
|
||||
release ? "freed" : "added");
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static unsigned long __init xen_release_chunk(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
return xen_do_chunk(start, end, true);
|
||||
}
|
||||
|
||||
static unsigned long __init xen_populate_chunk(
|
||||
const struct e820entry *list, size_t map_size,
|
||||
unsigned long max_pfn, unsigned long *last_pfn,
|
||||
unsigned long credits_left)
|
||||
{
|
||||
const struct e820entry *entry;
|
||||
unsigned int i;
|
||||
unsigned long done = 0;
|
||||
unsigned long dest_pfn;
|
||||
|
||||
for (i = 0, entry = list; i < map_size; i++, entry++) {
|
||||
unsigned long credits = credits_left;
|
||||
unsigned long s_pfn;
|
||||
unsigned long e_pfn;
|
||||
unsigned long pfns;
|
||||
long capacity;
|
||||
|
||||
if (credits <= 0)
|
||||
break;
|
||||
|
||||
if (entry->type != E820_RAM)
|
||||
continue;
|
||||
|
||||
e_pfn = PFN_UP(entry->addr + entry->size);
|
||||
|
||||
/* We only care about E820 after the xen_start_info->nr_pages */
|
||||
if (e_pfn <= max_pfn)
|
||||
continue;
|
||||
|
||||
s_pfn = PFN_DOWN(entry->addr);
|
||||
/* If the E820 falls within the nr_pages, we want to start
|
||||
* at the nr_pages PFN.
|
||||
* If that would mean going past the E820 entry, skip it
|
||||
*/
|
||||
if (s_pfn <= max_pfn) {
|
||||
capacity = e_pfn - max_pfn;
|
||||
dest_pfn = max_pfn;
|
||||
} else {
|
||||
/* last_pfn MUST be within E820_RAM regions */
|
||||
if (*last_pfn && e_pfn >= *last_pfn)
|
||||
s_pfn = *last_pfn;
|
||||
capacity = e_pfn - s_pfn;
|
||||
dest_pfn = s_pfn;
|
||||
}
|
||||
/* If we had filled this E820_RAM entry, go to the next one. */
|
||||
if (capacity <= 0)
|
||||
continue;
|
||||
|
||||
if (credits > capacity)
|
||||
credits = capacity;
|
||||
|
||||
pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
|
||||
done += pfns;
|
||||
credits_left -= pfns;
|
||||
*last_pfn = (dest_pfn + pfns);
|
||||
}
|
||||
return done;
|
||||
}
|
||||
|
||||
static void __init xen_set_identity_and_release_chunk(
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
|
||||
unsigned long *released, unsigned long *identity)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
/*
|
||||
* If the PFNs are currently mapped, the VA mapping also needs
|
||||
* to be updated to be 1:1.
|
||||
*/
|
||||
for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
|
||||
(void)HYPERVISOR_update_va_mapping(
|
||||
(unsigned long)__va(pfn << PAGE_SHIFT),
|
||||
mfn_pte(pfn, PAGE_KERNEL_IO), 0);
|
||||
|
||||
if (start_pfn < nr_pages)
|
||||
*released += xen_release_chunk(
|
||||
start_pfn, min(end_pfn, nr_pages));
|
||||
|
||||
*identity += set_phys_range_identity(start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
static unsigned long __init xen_set_identity_and_release(
|
||||
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
|
||||
{
|
||||
|
@ -142,7 +249,6 @@ static unsigned long __init xen_set_identity_and_release(
|
|||
*/
|
||||
for (i = 0, entry = list; i < map_size; i++, entry++) {
|
||||
phys_addr_t end = entry->addr + entry->size;
|
||||
|
||||
if (entry->type == E820_RAM || i == map_size - 1) {
|
||||
unsigned long start_pfn = PFN_DOWN(start);
|
||||
unsigned long end_pfn = PFN_UP(end);
|
||||
|
@ -150,20 +256,19 @@ static unsigned long __init xen_set_identity_and_release(
|
|||
if (entry->type == E820_RAM)
|
||||
end_pfn = PFN_UP(entry->addr);
|
||||
|
||||
if (start_pfn < end_pfn) {
|
||||
if (start_pfn < nr_pages)
|
||||
released += xen_release_chunk(
|
||||
start_pfn, min(end_pfn, nr_pages));
|
||||
if (start_pfn < end_pfn)
|
||||
xen_set_identity_and_release_chunk(
|
||||
start_pfn, end_pfn, nr_pages,
|
||||
&released, &identity);
|
||||
|
||||
identity += set_phys_range_identity(
|
||||
start_pfn, end_pfn);
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
|
||||
printk(KERN_INFO "Released %lu pages of unused memory\n", released);
|
||||
printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
|
||||
if (released)
|
||||
printk(KERN_INFO "Released %lu pages of unused memory\n", released);
|
||||
if (identity)
|
||||
printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
|
||||
|
||||
return released;
|
||||
}
|
||||
|
@ -217,7 +322,9 @@ char * __init xen_memory_setup(void)
|
|||
int rc;
|
||||
struct xen_memory_map memmap;
|
||||
unsigned long max_pages;
|
||||
unsigned long last_pfn = 0;
|
||||
unsigned long extra_pages = 0;
|
||||
unsigned long populated;
|
||||
int i;
|
||||
int op;
|
||||
|
||||
|
@ -257,8 +364,19 @@ char * __init xen_memory_setup(void)
|
|||
*/
|
||||
xen_released_pages = xen_set_identity_and_release(
|
||||
map, memmap.nr_entries, max_pfn);
|
||||
extra_pages += xen_released_pages;
|
||||
|
||||
/*
|
||||
* Populate back the non-RAM pages and E820 gaps that had been
|
||||
* released. */
|
||||
populated = xen_populate_chunk(map, memmap.nr_entries,
|
||||
max_pfn, &last_pfn, xen_released_pages);
|
||||
|
||||
extra_pages += (xen_released_pages - populated);
|
||||
|
||||
if (last_pfn > max_pfn) {
|
||||
max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
|
||||
mem_end = PFN_PHYS(max_pfn);
|
||||
}
|
||||
/*
|
||||
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
|
||||
* factor the base size. On non-highmem systems, the base
|
||||
|
@ -272,7 +390,6 @@ char * __init xen_memory_setup(void)
|
|||
*/
|
||||
extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
|
||||
extra_pages);
|
||||
|
||||
i = 0;
|
||||
while (i < memmap.nr_entries) {
|
||||
u64 addr = map[i].addr;
|
||||
|
|
|
@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
|
|||
void xen_build_mfn_list_list(void);
|
||||
void xen_setup_machphys_mapping(void);
|
||||
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
|
||||
void xen_ident_map_ISA(void);
|
||||
void xen_reserve_top(void);
|
||||
extern unsigned long xen_max_p2m_pfn;
|
||||
|
||||
|
|
Loading…
Reference in New Issue