Merge branch 'upstream/core' into upstream/xen

* upstream/core:
  xen/panic: use xen_reboot and fix smp_send_stop
  Xen: register panic notifier to take crashes of xen guests on panic
  xen: support large numbers of CPUs with vcpu info placement
  xen: drop xen_sched_clock in favour of using plain wallclock time
  pvops: do not notify callers from register_xenstore_notifier
  xen: make sure pages are really part of domain before freeing
  xen: release unused free memory
This commit is contained in:
Jeremy Fitzhardinge 2010-08-04 14:49:05 -07:00
commit a70ce4b606
6 changed files with 112 additions and 49 deletions

View File

@ -97,6 +97,14 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
*/
static int have_vcpu_info_placement = 1;
static void clamp_max_cpus(void)
{
#ifdef CONFIG_SMP
if (setup_max_cpus > MAX_VIRT_CPUS)
setup_max_cpus = MAX_VIRT_CPUS;
#endif
}
static void xen_vcpu_setup(int cpu)
{
struct vcpu_register_vcpu_info info;
@ -104,13 +112,17 @@ static void xen_vcpu_setup(int cpu)
struct vcpu_info *vcpup;
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
if (cpu < MAX_VIRT_CPUS)
per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
if (!have_vcpu_info_placement)
return; /* already tested, not available */
if (!have_vcpu_info_placement) {
if (cpu >= MAX_VIRT_CPUS)
clamp_max_cpus();
return;
}
vcpup = &per_cpu(xen_vcpu_info, cpu);
info.mfn = arbitrary_virt_to_mfn(vcpup);
info.offset = offset_in_page(vcpup);
@ -125,6 +137,7 @@ static void xen_vcpu_setup(int cpu)
if (err) {
printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
have_vcpu_info_placement = 0;
clamp_max_cpus();
} else {
/* This cpu is using the registered vcpu info, even if
later ones fail to. */
@ -731,7 +744,6 @@ static void set_xen_basic_apic_ops(void)
#endif
static void xen_clts(void)
{
struct multicall_space mcs;
@ -927,7 +939,7 @@ static const struct pv_init_ops xen_init_ops __initdata = {
};
static const struct pv_time_ops xen_time_ops __initdata = {
.sched_clock = xen_sched_clock,
.sched_clock = xen_clocksource_read,
};
static const struct pv_cpu_ops xen_cpu_ops __initdata = {
@ -1028,6 +1040,23 @@ static void xen_crash_shutdown(struct pt_regs *regs)
xen_reboot(SHUTDOWN_crash);
}
static int
xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
xen_reboot(SHUTDOWN_crash);
return NOTIFY_DONE;
}
static struct notifier_block xen_panic_block = {
.notifier_call= xen_panic_event,
};
int xen_panic_handler_init(void)
{
atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
return 0;
}
static const struct machine_ops __initdata xen_machine_ops = {
.restart = xen_restart,
.halt = xen_machine_halt,

View File

@ -20,6 +20,7 @@
#include <xen/page.h>
#include <xen/interface/callback.h>
#include <xen/interface/physdev.h>
#include <xen/interface/memory.h>
#include <xen/features.h>
#include "xen-ops.h"
@ -32,6 +33,73 @@ extern void xen_sysenter_target(void);
extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
phys_addr_t end_addr)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
.domid = DOMID_SELF
};
unsigned long start, end;
unsigned long len = 0;
unsigned long pfn;
int ret;
start = PFN_UP(start_addr);
end = PFN_DOWN(end_addr);
if (end <= start)
return 0;
printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ",
start, end);
for(pfn = start; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
/* Make sure pfn exists to start with */
if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
continue;
set_xen_guest_handle(reservation.extent_start, &mfn);
reservation.nr_extents = 1;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&reservation);
WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
start, end, ret);
if (ret == 1) {
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
len++;
}
}
printk(KERN_CONT "%ld pages freed\n", len);
return len;
}
static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
const struct e820map *e820)
{
phys_addr_t max_addr = PFN_PHYS(max_pfn);
phys_addr_t last_end = 0;
unsigned long released = 0;
int i;
for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
phys_addr_t end = e820->map[i].addr;
end = min(max_addr, end);
released += xen_release_chunk(last_end, end);
last_end = e820->map[i].addr + e820->map[i].size;
}
if (last_end < max_addr)
released += xen_release_chunk(last_end, max_addr);
printk(KERN_INFO "released %ld pages of unused memory\n", released);
return released;
}
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
@ -67,6 +135,8 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
xen_return_unused_memory(xen_start_info->nr_pages, &e820);
return "Xen";
}
@ -156,6 +226,8 @@ void __init xen_arch_setup(void)
struct physdev_set_iopl set_iopl;
int rc;
xen_panic_handler_init();
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);

View File

@ -394,6 +394,8 @@ static void stop_self(void *v)
load_cr3(swapper_pg_dir);
/* should set up a minimal gdt */
set_cpu_online(cpu, false);
HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
BUG();
}

View File

@ -155,45 +155,6 @@ static void do_stolen_accounting(void)
account_idle_ticks(ticks);
}
/*
* Xen sched_clock implementation. Returns the number of unstolen
* nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
* states.
*/
unsigned long long xen_sched_clock(void)
{
struct vcpu_runstate_info state;
cycle_t now;
u64 ret;
s64 offset;
/*
* Ideally sched_clock should be called on a per-cpu basis
* anyway, so preempt should already be disabled, but that's
* not current practice at the moment.
*/
preempt_disable();
now = xen_clocksource_read();
get_runstate_snapshot(&state);
WARN_ON(state.state != RUNSTATE_running);
offset = now - state.state_entry_time;
if (offset < 0)
offset = 0;
ret = state.time[RUNSTATE_blocked] +
state.time[RUNSTATE_running] +
offset;
preempt_enable();
return ret;
}
/* Get the TSC speed from Xen */
unsigned long xen_tsc_khz(void)
{

View File

@ -101,4 +101,6 @@ void xen_sysret32(void);
void xen_sysret64(void);
void xen_adjust_exception_frame(void);
extern int xen_panic_handler_init(void);
#endif /* XEN_OPS_H */

View File

@ -752,9 +752,6 @@ int register_xenstore_notifier(struct notifier_block *nb)
{
int ret = 0;
if (xenstored_ready > 0)
ret = nb->notifier_call(nb, 0, NULL);
else
blocking_notifier_chain_register(&xenstore_chain, nb);
return ret;