Features:
- Xen ACPI memory and CPU hotplug drivers - allowing Xen hypervisor to be aware of new CPU and new DIMMs - Cleanups Bug-fixes: - Fixes a long-standing bug in the PV spinlock wherein we did not kick VCPUs that were in a tight loop. - Fixes in the error paths for the event channel machinery. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (GNU/Linux) iQEcBAABAgAGBQJRJS1kAAoJEFjIrFwIi8fJj2YIAMO3/LVUZyojX/d8U9pqrCly lFfEF2UVjcxHJSj0ZFNXt1o3fnYP1SLRlT9u7ZLDjXf6Lmxmw6/C3Haw2wp3DfGq yUR0G/X9CPTBEgMYDdX7bjeTjyURvZcUaFwr+qodaaeL3uXx2pW6621Sc6jRKuia yAFVZMAKeaRrvUUIXjKHtlpRp9LKFdSztShMtYqmFvxEwrJPq2b37caKruoUCa6o X/YO0fvE9QtYD/pG0jsghFmLh/mcr+n9IFMCUXo1Yc9FdQBExtKzABDS5jdpuFND 4aMDE3dqUmHmpbaQhRE7SdblvpyrGdQXL6FSTjvwBgISfLo847CrnRKRgPp0YeA= =LQeU -----END PGP SIGNATURE----- Merge tag 'stable/for-linus-3.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen Pull Xen update from Konrad Rzeszutek Wilk: "This has two new ACPI drivers for Xen - a physical CPU offline/online and a memory hotplug. The way this works is that ACPI kicks the drivers and they make the appropiate hypercall to the hypervisor to tell it that there is a new CPU or memory. There also some changes to the Xen ARM ABIs and couple of fixes. One particularly nasty bug in the Xen PV spinlock code was fixed by Stefan Bader - and has been there since the 2.6.32! Features: - Xen ACPI memory and CPU hotplug drivers - allowing Xen hypervisor to be aware of new CPU and new DIMMs - Cleanups Bug-fixes: - Fixes a long-standing bug in the PV spinlock wherein we did not kick VCPUs that were in a tight loop. - Fixes in the error paths for the event channel machinery" Fix up a few semantic conflicts with the ACPI interface changes in drivers/xen/xen-acpi-{cpu,mem}hotplug.c. * tag 'stable/for-linus-3.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: xen: event channel arrays are xen_ulong_t and not unsigned long xen: Send spinlock IPI to all waiters xen: introduce xen_remap, use it instead of ioremap xen: close evtchn port if binding to irq fails xen-evtchn: correct comment and error output xen/tmem: Add missing %s in the printk statement. xen/acpi: move xen_acpi_get_pxm under CONFIG_XEN_DOM0 xen/acpi: ACPI cpu hotplug xen/acpi: Move xen_acpi_get_pxm to Xen's acpi.h xen/stub: driver for CPU hotplug xen/acpi: ACPI memory hotplug xen/stub: driver for memory hotplug xen: implement updated XENMEM_add_to_physmap_range ABI xen/smp: Move the common CPU init code a bit to prep for PVH patch.
This commit is contained in:
commit
77be36de8b
|
@ -15,4 +15,26 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
|
||||||
return raw_irqs_disabled_flags(regs->ARM_cpsr);
|
return raw_irqs_disabled_flags(regs->ARM_cpsr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We cannot use xchg because it does not support 8-byte
|
||||||
|
* values. However it is safe to use {ldr,dtd}exd directly because all
|
||||||
|
* platforms which Xen can run on support those instructions.
|
||||||
|
*/
|
||||||
|
static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val)
|
||||||
|
{
|
||||||
|
xen_ulong_t oldval;
|
||||||
|
unsigned int tmp;
|
||||||
|
|
||||||
|
wmb();
|
||||||
|
asm volatile("@ xchg_xen_ulong\n"
|
||||||
|
"1: ldrexd %0, %H0, [%3]\n"
|
||||||
|
" strexd %1, %2, %H2, [%3]\n"
|
||||||
|
" teq %1, #0\n"
|
||||||
|
" bne 1b"
|
||||||
|
: "=&r" (oldval), "=&r" (tmp)
|
||||||
|
: "r" (val), "r" (ptr)
|
||||||
|
: "memory", "cc");
|
||||||
|
return oldval;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_ARM_XEN_EVENTS_H */
|
#endif /* _ASM_ARM_XEN_EVENTS_H */
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#ifndef _ASM_ARM_XEN_PAGE_H
|
#ifndef _ASM_ARM_XEN_PAGE_H
|
||||||
#define _ASM_ARM_XEN_PAGE_H
|
#define _ASM_ARM_XEN_PAGE_H
|
||||||
|
|
||||||
|
#include <asm/mach/map.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
|
@ -86,4 +87,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
|
||||||
{
|
{
|
||||||
return __set_phys_to_machine(pfn, mfn);
|
return __set_phys_to_machine(pfn, mfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define xen_remap(cookie, size) __arm_ioremap((cookie), (size), MT_MEMORY);
|
||||||
|
|
||||||
#endif /* _ASM_ARM_XEN_PAGE_H */
|
#endif /* _ASM_ARM_XEN_PAGE_H */
|
||||||
|
|
|
@ -59,14 +59,16 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
|
||||||
};
|
};
|
||||||
xen_ulong_t idx = fgmfn;
|
xen_ulong_t idx = fgmfn;
|
||||||
xen_pfn_t gpfn = lpfn;
|
xen_pfn_t gpfn = lpfn;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
set_xen_guest_handle(xatp.idxs, &idx);
|
set_xen_guest_handle(xatp.idxs, &idx);
|
||||||
set_xen_guest_handle(xatp.gpfns, &gpfn);
|
set_xen_guest_handle(xatp.gpfns, &gpfn);
|
||||||
|
set_xen_guest_handle(xatp.errs, &err);
|
||||||
|
|
||||||
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
|
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
|
||||||
if (rc) {
|
if (rc || err) {
|
||||||
pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n",
|
pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
|
||||||
rc, lpfn, fgmfn);
|
rc, err, lpfn, fgmfn);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -16,4 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
|
||||||
return raw_irqs_disabled_flags(regs->flags);
|
return raw_irqs_disabled_flags(regs->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* No need for a barrier -- XCHG is a barrier on x86. */
|
||||||
|
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
|
||||||
|
|
||||||
#endif /* _ASM_X86_XEN_EVENTS_H */
|
#endif /* _ASM_X86_XEN_EVENTS_H */
|
||||||
|
|
|
@ -212,4 +212,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
|
||||||
void make_lowmem_page_readonly(void *vaddr);
|
void make_lowmem_page_readonly(void *vaddr);
|
||||||
void make_lowmem_page_readwrite(void *vaddr);
|
void make_lowmem_page_readwrite(void *vaddr);
|
||||||
|
|
||||||
|
#define xen_remap(cookie, size) ioremap((cookie), (size));
|
||||||
|
|
||||||
#endif /* _ASM_X86_XEN_PAGE_H */
|
#endif /* _ASM_X86_XEN_PAGE_H */
|
||||||
|
|
|
@ -300,8 +300,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||||
gdt = get_cpu_gdt_table(cpu);
|
gdt = get_cpu_gdt_table(cpu);
|
||||||
|
|
||||||
ctxt->flags = VGCF_IN_KERNEL;
|
ctxt->flags = VGCF_IN_KERNEL;
|
||||||
ctxt->user_regs.ds = __USER_DS;
|
|
||||||
ctxt->user_regs.es = __USER_DS;
|
|
||||||
ctxt->user_regs.ss = __KERNEL_DS;
|
ctxt->user_regs.ss = __KERNEL_DS;
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
ctxt->user_regs.fs = __KERNEL_PERCPU;
|
ctxt->user_regs.fs = __KERNEL_PERCPU;
|
||||||
|
@ -310,35 +308,41 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||||
ctxt->gs_base_kernel = per_cpu_offset(cpu);
|
ctxt->gs_base_kernel = per_cpu_offset(cpu);
|
||||||
#endif
|
#endif
|
||||||
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
|
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
|
||||||
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
|
|
||||||
|
|
||||||
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
|
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
|
||||||
|
|
||||||
xen_copy_trap_info(ctxt->trap_ctxt);
|
{
|
||||||
|
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
|
||||||
|
ctxt->user_regs.ds = __USER_DS;
|
||||||
|
ctxt->user_regs.es = __USER_DS;
|
||||||
|
|
||||||
ctxt->ldt_ents = 0;
|
xen_copy_trap_info(ctxt->trap_ctxt);
|
||||||
|
|
||||||
BUG_ON((unsigned long)gdt & ~PAGE_MASK);
|
ctxt->ldt_ents = 0;
|
||||||
|
|
||||||
gdt_mfn = arbitrary_virt_to_mfn(gdt);
|
BUG_ON((unsigned long)gdt & ~PAGE_MASK);
|
||||||
make_lowmem_page_readonly(gdt);
|
|
||||||
make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
|
|
||||||
|
|
||||||
ctxt->gdt_frames[0] = gdt_mfn;
|
gdt_mfn = arbitrary_virt_to_mfn(gdt);
|
||||||
ctxt->gdt_ents = GDT_ENTRIES;
|
make_lowmem_page_readonly(gdt);
|
||||||
|
make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
|
||||||
|
|
||||||
ctxt->user_regs.cs = __KERNEL_CS;
|
ctxt->gdt_frames[0] = gdt_mfn;
|
||||||
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
|
ctxt->gdt_ents = GDT_ENTRIES;
|
||||||
|
|
||||||
ctxt->kernel_ss = __KERNEL_DS;
|
ctxt->kernel_ss = __KERNEL_DS;
|
||||||
ctxt->kernel_sp = idle->thread.sp0;
|
ctxt->kernel_sp = idle->thread.sp0;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
ctxt->event_callback_cs = __KERNEL_CS;
|
ctxt->event_callback_cs = __KERNEL_CS;
|
||||||
ctxt->failsafe_callback_cs = __KERNEL_CS;
|
ctxt->failsafe_callback_cs = __KERNEL_CS;
|
||||||
#endif
|
#endif
|
||||||
ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
|
ctxt->event_callback_eip =
|
||||||
ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
|
(unsigned long)xen_hypervisor_callback;
|
||||||
|
ctxt->failsafe_callback_eip =
|
||||||
|
(unsigned long)xen_failsafe_callback;
|
||||||
|
}
|
||||||
|
ctxt->user_regs.cs = __KERNEL_CS;
|
||||||
|
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
|
||||||
|
|
||||||
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
|
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
|
||||||
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
|
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
|
||||||
|
|
|
@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
|
||||||
if (per_cpu(lock_spinners, cpu) == xl) {
|
if (per_cpu(lock_spinners, cpu) == xl) {
|
||||||
ADD_STATS(released_slow_kicked, 1);
|
ADD_STATS(released_slow_kicked, 1);
|
||||||
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
|
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -230,7 +230,7 @@ static int xen_hvm_console_init(void)
|
||||||
if (r < 0 || v == 0)
|
if (r < 0 || v == 0)
|
||||||
goto err;
|
goto err;
|
||||||
mfn = v;
|
mfn = v;
|
||||||
info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
|
info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE);
|
||||||
if (info->intf == NULL)
|
if (info->intf == NULL)
|
||||||
goto err;
|
goto err;
|
||||||
info->vtermno = HVC_COOKIE;
|
info->vtermno = HVC_COOKIE;
|
||||||
|
|
|
@ -180,6 +180,40 @@ config XEN_PRIVCMD
|
||||||
depends on XEN
|
depends on XEN
|
||||||
default m
|
default m
|
||||||
|
|
||||||
|
config XEN_STUB
|
||||||
|
bool "Xen stub drivers"
|
||||||
|
depends on XEN && X86_64
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
Allow kernel to install stub drivers, to reserve space for Xen drivers,
|
||||||
|
i.e. memory hotplug and cpu hotplug, and to block native drivers loaded,
|
||||||
|
so that real Xen drivers can be modular.
|
||||||
|
|
||||||
|
To enable Xen features like cpu and memory hotplug, select Y here.
|
||||||
|
|
||||||
|
config XEN_ACPI_HOTPLUG_MEMORY
|
||||||
|
tristate "Xen ACPI memory hotplug"
|
||||||
|
depends on XEN_DOM0 && XEN_STUB && ACPI
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
This is Xen ACPI memory hotplug.
|
||||||
|
|
||||||
|
Currently Xen only support ACPI memory hot-add. If you want
|
||||||
|
to hot-add memory at runtime (the hot-added memory cannot be
|
||||||
|
removed until machine stop), select Y/M here, otherwise select N.
|
||||||
|
|
||||||
|
config XEN_ACPI_HOTPLUG_CPU
|
||||||
|
tristate "Xen ACPI cpu hotplug"
|
||||||
|
depends on XEN_DOM0 && XEN_STUB && ACPI
|
||||||
|
select ACPI_CONTAINER
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
Xen ACPI cpu enumerating and hotplugging
|
||||||
|
|
||||||
|
For hotplugging, currently Xen only support ACPI cpu hotadd.
|
||||||
|
If you want to hotadd cpu at runtime (the hotadded cpu cannot
|
||||||
|
be removed until machine stop), select Y/M here.
|
||||||
|
|
||||||
config XEN_ACPI_PROCESSOR
|
config XEN_ACPI_PROCESSOR
|
||||||
tristate "Xen ACPI processor"
|
tristate "Xen ACPI processor"
|
||||||
depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
|
depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
|
||||||
|
|
|
@ -30,6 +30,9 @@ obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
|
||||||
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
|
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
|
||||||
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
|
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
|
||||||
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
|
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
|
||||||
|
obj-$(CONFIG_XEN_STUB) += xen-stub.o
|
||||||
|
obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY) += xen-acpi-memhotplug.o
|
||||||
|
obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
|
||||||
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
|
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
|
||||||
xen-evtchn-y := evtchn.o
|
xen-evtchn-y := evtchn.o
|
||||||
xen-gntdev-y := gntdev.o
|
xen-gntdev-y := gntdev.o
|
||||||
|
|
|
@ -120,7 +120,22 @@ static unsigned long *pirq_eoi_map;
|
||||||
#endif
|
#endif
|
||||||
static bool (*pirq_needs_eoi)(unsigned irq);
|
static bool (*pirq_needs_eoi)(unsigned irq);
|
||||||
|
|
||||||
static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
|
/*
|
||||||
|
* Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
|
||||||
|
* careful to only use bitops which allow for this (e.g
|
||||||
|
* test_bit/find_first_bit and friends but not __ffs) and to pass
|
||||||
|
* BITS_PER_EVTCHN_WORD as the bitmask length.
|
||||||
|
*/
|
||||||
|
#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
|
||||||
|
/*
|
||||||
|
* Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
|
||||||
|
* array. Primarily to avoid long lines (hence the terse name).
|
||||||
|
*/
|
||||||
|
#define BM(x) (unsigned long *)(x)
|
||||||
|
/* Find the first set bit in a evtchn mask */
|
||||||
|
#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
|
||||||
cpu_evtchn_mask);
|
cpu_evtchn_mask);
|
||||||
|
|
||||||
/* Xen will never allocate port zero for any purpose. */
|
/* Xen will never allocate port zero for any purpose. */
|
||||||
|
@ -294,9 +309,9 @@ static bool pirq_needs_eoi_flag(unsigned irq)
|
||||||
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
|
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long active_evtchns(unsigned int cpu,
|
static inline xen_ulong_t active_evtchns(unsigned int cpu,
|
||||||
struct shared_info *sh,
|
struct shared_info *sh,
|
||||||
unsigned int idx)
|
unsigned int idx)
|
||||||
{
|
{
|
||||||
return sh->evtchn_pending[idx] &
|
return sh->evtchn_pending[idx] &
|
||||||
per_cpu(cpu_evtchn_mask, cpu)[idx] &
|
per_cpu(cpu_evtchn_mask, cpu)[idx] &
|
||||||
|
@ -312,8 +327,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
|
||||||
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
|
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
|
clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))));
|
||||||
set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
|
set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
|
||||||
|
|
||||||
info_for_irq(irq)->cpu = cpu;
|
info_for_irq(irq)->cpu = cpu;
|
||||||
}
|
}
|
||||||
|
@ -339,19 +354,19 @@ static void init_evtchn_cpu_bindings(void)
|
||||||
static inline void clear_evtchn(int port)
|
static inline void clear_evtchn(int port)
|
||||||
{
|
{
|
||||||
struct shared_info *s = HYPERVISOR_shared_info;
|
struct shared_info *s = HYPERVISOR_shared_info;
|
||||||
sync_clear_bit(port, &s->evtchn_pending[0]);
|
sync_clear_bit(port, BM(&s->evtchn_pending[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_evtchn(int port)
|
static inline void set_evtchn(int port)
|
||||||
{
|
{
|
||||||
struct shared_info *s = HYPERVISOR_shared_info;
|
struct shared_info *s = HYPERVISOR_shared_info;
|
||||||
sync_set_bit(port, &s->evtchn_pending[0]);
|
sync_set_bit(port, BM(&s->evtchn_pending[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int test_evtchn(int port)
|
static inline int test_evtchn(int port)
|
||||||
{
|
{
|
||||||
struct shared_info *s = HYPERVISOR_shared_info;
|
struct shared_info *s = HYPERVISOR_shared_info;
|
||||||
return sync_test_bit(port, &s->evtchn_pending[0]);
|
return sync_test_bit(port, BM(&s->evtchn_pending[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -375,7 +390,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq);
|
||||||
static void mask_evtchn(int port)
|
static void mask_evtchn(int port)
|
||||||
{
|
{
|
||||||
struct shared_info *s = HYPERVISOR_shared_info;
|
struct shared_info *s = HYPERVISOR_shared_info;
|
||||||
sync_set_bit(port, &s->evtchn_mask[0]);
|
sync_set_bit(port, BM(&s->evtchn_mask[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unmask_evtchn(int port)
|
static void unmask_evtchn(int port)
|
||||||
|
@ -389,7 +404,7 @@ static void unmask_evtchn(int port)
|
||||||
if (unlikely((cpu != cpu_from_evtchn(port))))
|
if (unlikely((cpu != cpu_from_evtchn(port))))
|
||||||
do_hypercall = 1;
|
do_hypercall = 1;
|
||||||
else
|
else
|
||||||
evtchn_pending = sync_test_bit(port, &s->evtchn_pending[0]);
|
evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
|
||||||
|
|
||||||
if (unlikely(evtchn_pending && xen_hvm_domain()))
|
if (unlikely(evtchn_pending && xen_hvm_domain()))
|
||||||
do_hypercall = 1;
|
do_hypercall = 1;
|
||||||
|
@ -403,7 +418,7 @@ static void unmask_evtchn(int port)
|
||||||
} else {
|
} else {
|
||||||
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
|
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
|
||||||
|
|
||||||
sync_clear_bit(port, &s->evtchn_mask[0]);
|
sync_clear_bit(port, BM(&s->evtchn_mask[0]));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following is basically the equivalent of
|
* The following is basically the equivalent of
|
||||||
|
@ -411,8 +426,8 @@ static void unmask_evtchn(int port)
|
||||||
* the interrupt edge' if the channel is masked.
|
* the interrupt edge' if the channel is masked.
|
||||||
*/
|
*/
|
||||||
if (evtchn_pending &&
|
if (evtchn_pending &&
|
||||||
!sync_test_and_set_bit(port / BITS_PER_LONG,
|
!sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
|
||||||
&vcpu_info->evtchn_pending_sel))
|
BM(&vcpu_info->evtchn_pending_sel)))
|
||||||
vcpu_info->evtchn_upcall_pending = 1;
|
vcpu_info->evtchn_upcall_pending = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1189,7 +1204,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct shared_info *sh = HYPERVISOR_shared_info;
|
struct shared_info *sh = HYPERVISOR_shared_info;
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
|
xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
|
||||||
int i;
|
int i;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
static DEFINE_SPINLOCK(debug_lock);
|
static DEFINE_SPINLOCK(debug_lock);
|
||||||
|
@ -1205,7 +1220,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
|
||||||
pending = (get_irq_regs() && i == cpu)
|
pending = (get_irq_regs() && i == cpu)
|
||||||
? xen_irqs_disabled(get_irq_regs())
|
? xen_irqs_disabled(get_irq_regs())
|
||||||
: v->evtchn_upcall_mask;
|
: v->evtchn_upcall_mask;
|
||||||
printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
|
printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
|
||||||
pending, v->evtchn_upcall_pending,
|
pending, v->evtchn_upcall_pending,
|
||||||
(int)(sizeof(v->evtchn_pending_sel)*2),
|
(int)(sizeof(v->evtchn_pending_sel)*2),
|
||||||
v->evtchn_pending_sel);
|
v->evtchn_pending_sel);
|
||||||
|
@ -1214,49 +1229,52 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
|
||||||
|
|
||||||
printk("\npending:\n ");
|
printk("\npending:\n ");
|
||||||
for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
|
for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
|
||||||
printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
|
printk("%0*"PRI_xen_ulong"%s",
|
||||||
|
(int)sizeof(sh->evtchn_pending[0])*2,
|
||||||
sh->evtchn_pending[i],
|
sh->evtchn_pending[i],
|
||||||
i % 8 == 0 ? "\n " : " ");
|
i % 8 == 0 ? "\n " : " ");
|
||||||
printk("\nglobal mask:\n ");
|
printk("\nglobal mask:\n ");
|
||||||
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
|
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
|
||||||
printk("%0*lx%s",
|
printk("%0*"PRI_xen_ulong"%s",
|
||||||
(int)(sizeof(sh->evtchn_mask[0])*2),
|
(int)(sizeof(sh->evtchn_mask[0])*2),
|
||||||
sh->evtchn_mask[i],
|
sh->evtchn_mask[i],
|
||||||
i % 8 == 0 ? "\n " : " ");
|
i % 8 == 0 ? "\n " : " ");
|
||||||
|
|
||||||
printk("\nglobally unmasked:\n ");
|
printk("\nglobally unmasked:\n ");
|
||||||
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
|
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
|
||||||
printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
|
printk("%0*"PRI_xen_ulong"%s",
|
||||||
|
(int)(sizeof(sh->evtchn_mask[0])*2),
|
||||||
sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
|
sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
|
||||||
i % 8 == 0 ? "\n " : " ");
|
i % 8 == 0 ? "\n " : " ");
|
||||||
|
|
||||||
printk("\nlocal cpu%d mask:\n ", cpu);
|
printk("\nlocal cpu%d mask:\n ", cpu);
|
||||||
for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
|
for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
|
||||||
printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
|
printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
|
||||||
cpu_evtchn[i],
|
cpu_evtchn[i],
|
||||||
i % 8 == 0 ? "\n " : " ");
|
i % 8 == 0 ? "\n " : " ");
|
||||||
|
|
||||||
printk("\nlocally unmasked:\n ");
|
printk("\nlocally unmasked:\n ");
|
||||||
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
|
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
|
||||||
unsigned long pending = sh->evtchn_pending[i]
|
xen_ulong_t pending = sh->evtchn_pending[i]
|
||||||
& ~sh->evtchn_mask[i]
|
& ~sh->evtchn_mask[i]
|
||||||
& cpu_evtchn[i];
|
& cpu_evtchn[i];
|
||||||
printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
|
printk("%0*"PRI_xen_ulong"%s",
|
||||||
|
(int)(sizeof(sh->evtchn_mask[0])*2),
|
||||||
pending, i % 8 == 0 ? "\n " : " ");
|
pending, i % 8 == 0 ? "\n " : " ");
|
||||||
}
|
}
|
||||||
|
|
||||||
printk("\npending list:\n");
|
printk("\npending list:\n");
|
||||||
for (i = 0; i < NR_EVENT_CHANNELS; i++) {
|
for (i = 0; i < NR_EVENT_CHANNELS; i++) {
|
||||||
if (sync_test_bit(i, sh->evtchn_pending)) {
|
if (sync_test_bit(i, BM(sh->evtchn_pending))) {
|
||||||
int word_idx = i / BITS_PER_LONG;
|
int word_idx = i / BITS_PER_EVTCHN_WORD;
|
||||||
printk(" %d: event %d -> irq %d%s%s%s\n",
|
printk(" %d: event %d -> irq %d%s%s%s\n",
|
||||||
cpu_from_evtchn(i), i,
|
cpu_from_evtchn(i), i,
|
||||||
evtchn_to_irq[i],
|
evtchn_to_irq[i],
|
||||||
sync_test_bit(word_idx, &v->evtchn_pending_sel)
|
sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
|
||||||
? "" : " l2-clear",
|
? "" : " l2-clear",
|
||||||
!sync_test_bit(i, sh->evtchn_mask)
|
!sync_test_bit(i, BM(sh->evtchn_mask))
|
||||||
? "" : " globally-masked",
|
? "" : " globally-masked",
|
||||||
sync_test_bit(i, cpu_evtchn)
|
sync_test_bit(i, BM(cpu_evtchn))
|
||||||
? "" : " locally-masked");
|
? "" : " locally-masked");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1273,7 +1291,7 @@ static DEFINE_PER_CPU(unsigned int, current_bit_idx);
|
||||||
/*
|
/*
|
||||||
* Mask out the i least significant bits of w
|
* Mask out the i least significant bits of w
|
||||||
*/
|
*/
|
||||||
#define MASK_LSBS(w, i) (w & ((~0UL) << i))
|
#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Search the CPUs pending events bitmasks. For each one found, map
|
* Search the CPUs pending events bitmasks. For each one found, map
|
||||||
|
@ -1295,18 +1313,19 @@ static void __xen_evtchn_do_upcall(void)
|
||||||
unsigned count;
|
unsigned count;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned long pending_words;
|
xen_ulong_t pending_words;
|
||||||
|
|
||||||
vcpu_info->evtchn_upcall_pending = 0;
|
vcpu_info->evtchn_upcall_pending = 0;
|
||||||
|
|
||||||
if (__this_cpu_inc_return(xed_nesting_count) - 1)
|
if (__this_cpu_inc_return(xed_nesting_count) - 1)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
|
/*
|
||||||
/* Clear master flag /before/ clearing selector flag. */
|
* Master flag must be cleared /before/ clearing
|
||||||
wmb();
|
* selector flag. xchg_xen_ulong must contain an
|
||||||
#endif
|
* appropriate barrier.
|
||||||
pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
|
*/
|
||||||
|
pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
|
||||||
|
|
||||||
start_word_idx = __this_cpu_read(current_word_idx);
|
start_word_idx = __this_cpu_read(current_word_idx);
|
||||||
start_bit_idx = __this_cpu_read(current_bit_idx);
|
start_bit_idx = __this_cpu_read(current_bit_idx);
|
||||||
|
@ -1314,8 +1333,8 @@ static void __xen_evtchn_do_upcall(void)
|
||||||
word_idx = start_word_idx;
|
word_idx = start_word_idx;
|
||||||
|
|
||||||
for (i = 0; pending_words != 0; i++) {
|
for (i = 0; pending_words != 0; i++) {
|
||||||
unsigned long pending_bits;
|
xen_ulong_t pending_bits;
|
||||||
unsigned long words;
|
xen_ulong_t words;
|
||||||
|
|
||||||
words = MASK_LSBS(pending_words, word_idx);
|
words = MASK_LSBS(pending_words, word_idx);
|
||||||
|
|
||||||
|
@ -1327,7 +1346,7 @@ static void __xen_evtchn_do_upcall(void)
|
||||||
bit_idx = 0;
|
bit_idx = 0;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
word_idx = __ffs(words);
|
word_idx = EVTCHN_FIRST_BIT(words);
|
||||||
|
|
||||||
pending_bits = active_evtchns(cpu, s, word_idx);
|
pending_bits = active_evtchns(cpu, s, word_idx);
|
||||||
bit_idx = 0; /* usually scan entire word from start */
|
bit_idx = 0; /* usually scan entire word from start */
|
||||||
|
@ -1342,7 +1361,7 @@ static void __xen_evtchn_do_upcall(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned long bits;
|
xen_ulong_t bits;
|
||||||
int port, irq;
|
int port, irq;
|
||||||
struct irq_desc *desc;
|
struct irq_desc *desc;
|
||||||
|
|
||||||
|
@ -1352,10 +1371,10 @@ static void __xen_evtchn_do_upcall(void)
|
||||||
if (bits == 0)
|
if (bits == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
bit_idx = __ffs(bits);
|
bit_idx = EVTCHN_FIRST_BIT(bits);
|
||||||
|
|
||||||
/* Process port. */
|
/* Process port. */
|
||||||
port = (word_idx * BITS_PER_LONG) + bit_idx;
|
port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
|
||||||
irq = evtchn_to_irq[port];
|
irq = evtchn_to_irq[port];
|
||||||
|
|
||||||
if (irq != -1) {
|
if (irq != -1) {
|
||||||
|
@ -1364,12 +1383,12 @@ static void __xen_evtchn_do_upcall(void)
|
||||||
generic_handle_irq_desc(irq, desc);
|
generic_handle_irq_desc(irq, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
bit_idx = (bit_idx + 1) % BITS_PER_LONG;
|
bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
|
||||||
|
|
||||||
/* Next caller starts at last processed + 1 */
|
/* Next caller starts at last processed + 1 */
|
||||||
__this_cpu_write(current_word_idx,
|
__this_cpu_write(current_word_idx,
|
||||||
bit_idx ? word_idx :
|
bit_idx ? word_idx :
|
||||||
(word_idx+1) % BITS_PER_LONG);
|
(word_idx+1) % BITS_PER_EVTCHN_WORD);
|
||||||
__this_cpu_write(current_bit_idx, bit_idx);
|
__this_cpu_write(current_bit_idx, bit_idx);
|
||||||
} while (bit_idx != 0);
|
} while (bit_idx != 0);
|
||||||
|
|
||||||
|
@ -1377,7 +1396,7 @@ static void __xen_evtchn_do_upcall(void)
|
||||||
if ((word_idx != start_word_idx) || (i != 0))
|
if ((word_idx != start_word_idx) || (i != 0))
|
||||||
pending_words &= ~(1UL << word_idx);
|
pending_words &= ~(1UL << word_idx);
|
||||||
|
|
||||||
word_idx = (word_idx + 1) % BITS_PER_LONG;
|
word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(!irqs_disabled());
|
BUG_ON(!irqs_disabled());
|
||||||
|
@ -1487,8 +1506,8 @@ int resend_irq_on_evtchn(unsigned int irq)
|
||||||
if (!VALID_EVTCHN(evtchn))
|
if (!VALID_EVTCHN(evtchn))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
|
masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
|
||||||
sync_set_bit(evtchn, s->evtchn_pending);
|
sync_set_bit(evtchn, BM(s->evtchn_pending));
|
||||||
if (!masked)
|
if (!masked)
|
||||||
unmask_evtchn(evtchn);
|
unmask_evtchn(evtchn);
|
||||||
|
|
||||||
|
@ -1536,8 +1555,8 @@ static int retrigger_dynirq(struct irq_data *data)
|
||||||
if (VALID_EVTCHN(evtchn)) {
|
if (VALID_EVTCHN(evtchn)) {
|
||||||
int masked;
|
int masked;
|
||||||
|
|
||||||
masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
|
masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask));
|
||||||
sync_set_bit(evtchn, sh->evtchn_pending);
|
sync_set_bit(evtchn, BM(sh->evtchn_pending));
|
||||||
if (!masked)
|
if (!masked)
|
||||||
unmask_evtchn(evtchn);
|
unmask_evtchn(evtchn);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
|
|
|
@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
|
||||||
u->name, (void *)(unsigned long)port);
|
u->name, (void *)(unsigned long)port);
|
||||||
if (rc >= 0)
|
if (rc >= 0)
|
||||||
rc = evtchn_make_refcounted(port);
|
rc = evtchn_make_refcounted(port);
|
||||||
|
else {
|
||||||
|
/* bind failed, should close the port now */
|
||||||
|
struct evtchn_close close;
|
||||||
|
close.port = port;
|
||||||
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
|
||||||
|
BUG();
|
||||||
|
set_port_user(port, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
|
||||||
{
|
{
|
||||||
int irq = irq_from_evtchn(port);
|
int irq = irq_from_evtchn(port);
|
||||||
|
|
||||||
|
BUG_ON(irq < 0);
|
||||||
|
|
||||||
unbind_from_irqhandler(irq, (void *)(unsigned long)port);
|
unbind_from_irqhandler(irq, (void *)(unsigned long)port);
|
||||||
|
|
||||||
set_port_user(port, NULL);
|
set_port_user(port, NULL);
|
||||||
|
@ -534,10 +544,10 @@ static int __init evtchn_init(void)
|
||||||
|
|
||||||
spin_lock_init(&port_user_lock);
|
spin_lock_init(&port_user_lock);
|
||||||
|
|
||||||
/* Create '/dev/misc/evtchn'. */
|
/* Create '/dev/xen/evtchn'. */
|
||||||
err = misc_register(&evtchn_miscdev);
|
err = misc_register(&evtchn_miscdev);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
|
printk(KERN_ERR "Could not register /dev/xen/evtchn\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1147,7 +1147,7 @@ static int gnttab_setup(void)
|
||||||
return gnttab_map(0, nr_grant_frames - 1);
|
return gnttab_map(0, nr_grant_frames - 1);
|
||||||
|
|
||||||
if (gnttab_shared.addr == NULL) {
|
if (gnttab_shared.addr == NULL) {
|
||||||
gnttab_shared.addr = ioremap(xen_hvm_resume_frames,
|
gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
|
||||||
PAGE_SIZE * max_nr_gframes);
|
PAGE_SIZE * max_nr_gframes);
|
||||||
if (gnttab_shared.addr == NULL) {
|
if (gnttab_shared.addr == NULL) {
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
|
|
|
@ -332,6 +332,41 @@ static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Sync with Xen hypervisor after cpu hotadded */
|
||||||
|
void xen_pcpu_hotplug_sync(void)
|
||||||
|
{
|
||||||
|
schedule_work(&xen_pcpu_work);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xen_pcpu_hotplug_sync);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For hypervisor presented cpu, return logic cpu id;
|
||||||
|
* For hypervisor non-presented cpu, return -ENODEV.
|
||||||
|
*/
|
||||||
|
int xen_pcpu_id(uint32_t acpi_id)
|
||||||
|
{
|
||||||
|
int cpu_id = 0, max_id = 0;
|
||||||
|
struct xen_platform_op op;
|
||||||
|
|
||||||
|
op.cmd = XENPF_get_cpuinfo;
|
||||||
|
while (cpu_id <= max_id) {
|
||||||
|
op.u.pcpu_info.xen_cpuid = cpu_id;
|
||||||
|
if (HYPERVISOR_dom0_op(&op)) {
|
||||||
|
cpu_id++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (acpi_id == op.u.pcpu_info.acpi_id)
|
||||||
|
return cpu_id;
|
||||||
|
if (op.u.pcpu_info.max_present > max_id)
|
||||||
|
max_id = op.u.pcpu_info.max_present;
|
||||||
|
cpu_id++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xen_pcpu_id);
|
||||||
|
|
||||||
static int __init xen_pcpu_init(void)
|
static int __init xen_pcpu_init(void)
|
||||||
{
|
{
|
||||||
int irq, ret;
|
int irq, ret;
|
||||||
|
|
|
@ -385,7 +385,7 @@ static int __init xen_tmem_init(void)
|
||||||
if (old_ops.init != NULL)
|
if (old_ops.init != NULL)
|
||||||
s = " (WARNING: frontswap_ops overridden)";
|
s = " (WARNING: frontswap_ops overridden)";
|
||||||
printk(KERN_INFO "frontswap enabled, RAM provided by "
|
printk(KERN_INFO "frontswap enabled, RAM provided by "
|
||||||
"Xen Transcendent Memory\n");
|
"Xen Transcendent Memory%s\n", s);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_CLEANCACHE
|
#ifdef CONFIG_CLEANCACHE
|
||||||
|
|
|
@ -0,0 +1,471 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2012 Intel Corporation
|
||||||
|
* Author: Liu Jinsong <jinsong.liu@intel.com>
|
||||||
|
* Author: Jiang Yunhong <yunhong.jiang@intel.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
* the Free Software Foundation; either version 2 of the License, or (at
|
||||||
|
* your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||||
|
* NON INFRINGEMENT. See the GNU General Public License for more
|
||||||
|
* details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/acpi.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
|
#include <acpi/acpi_bus.h>
|
||||||
|
#include <acpi/acpi_drivers.h>
|
||||||
|
#include <acpi/processor.h>
|
||||||
|
|
||||||
|
#include <xen/acpi.h>
|
||||||
|
#include <xen/interface/platform.h>
|
||||||
|
#include <asm/xen/hypercall.h>
|
||||||
|
|
||||||
|
#define PREFIX "ACPI:xen_cpu_hotplug:"
|
||||||
|
|
||||||
|
#define INSTALL_NOTIFY_HANDLER 0
|
||||||
|
#define UNINSTALL_NOTIFY_HANDLER 1
|
||||||
|
|
||||||
|
static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr);
|
||||||
|
|
||||||
|
/* --------------------------------------------------------------------------
|
||||||
|
Driver Interface
|
||||||
|
-------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
static int xen_acpi_processor_enable(struct acpi_device *device)
|
||||||
|
{
|
||||||
|
acpi_status status = 0;
|
||||||
|
unsigned long long value;
|
||||||
|
union acpi_object object = { 0 };
|
||||||
|
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||||
|
struct acpi_processor *pr;
|
||||||
|
|
||||||
|
pr = acpi_driver_data(device);
|
||||||
|
if (!pr) {
|
||||||
|
pr_err(PREFIX "Cannot find driver data\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
|
||||||
|
/* Declared with "Processor" statement; match ProcessorID */
|
||||||
|
status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
pr_err(PREFIX "Evaluating processor object\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr->acpi_id = object.processor.proc_id;
|
||||||
|
} else {
|
||||||
|
/* Declared with "Device" statement; match _UID */
|
||||||
|
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
|
||||||
|
NULL, &value);
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
pr_err(PREFIX "Evaluating processor _UID\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr->acpi_id = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr->id = xen_pcpu_id(pr->acpi_id);
|
||||||
|
|
||||||
|
if ((int)pr->id < 0)
|
||||||
|
/* This cpu is not presented at hypervisor, try to hotadd it */
|
||||||
|
if (ACPI_FAILURE(xen_acpi_cpu_hotadd(pr))) {
|
||||||
|
pr_err(PREFIX "Hotadd CPU (acpi_id = %d) failed.\n",
|
||||||
|
pr->acpi_id);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cpuinit xen_acpi_processor_add(struct acpi_device *device)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct acpi_processor *pr;
|
||||||
|
|
||||||
|
if (!device)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
|
||||||
|
if (!pr)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pr->handle = device->handle;
|
||||||
|
strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
|
||||||
|
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
|
||||||
|
device->driver_data = pr;
|
||||||
|
|
||||||
|
ret = xen_acpi_processor_enable(device);
|
||||||
|
if (ret)
|
||||||
|
pr_err(PREFIX "Error when enabling Xen processor\n");
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_acpi_processor_remove(struct acpi_device *device)
|
||||||
|
{
|
||||||
|
struct acpi_processor *pr;
|
||||||
|
|
||||||
|
if (!device)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
pr = acpi_driver_data(device);
|
||||||
|
if (!pr)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
kfree(pr);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*--------------------------------------------------------------
|
||||||
|
Acpi processor hotplug support
|
||||||
|
--------------------------------------------------------------*/
|
||||||
|
|
||||||
|
static int is_processor_present(acpi_handle handle)
|
||||||
|
{
|
||||||
|
acpi_status status;
|
||||||
|
unsigned long long sta = 0;
|
||||||
|
|
||||||
|
|
||||||
|
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
|
||||||
|
|
||||||
|
if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* _STA is mandatory for a processor that supports hot plug
|
||||||
|
*/
|
||||||
|
if (status == AE_NOT_FOUND)
|
||||||
|
pr_info(PREFIX "Processor does not support hot plug\n");
|
||||||
|
else
|
||||||
|
pr_info(PREFIX "Processor Device is not present");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_apic_id(acpi_handle handle)
|
||||||
|
{
|
||||||
|
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||||
|
union acpi_object *obj;
|
||||||
|
struct acpi_madt_local_apic *lapic;
|
||||||
|
int apic_id;
|
||||||
|
|
||||||
|
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!buffer.length || !buffer.pointer)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
obj = buffer.pointer;
|
||||||
|
if (obj->type != ACPI_TYPE_BUFFER ||
|
||||||
|
obj->buffer.length < sizeof(*lapic)) {
|
||||||
|
kfree(buffer.pointer);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
|
||||||
|
|
||||||
|
if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
|
||||||
|
!(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
|
||||||
|
kfree(buffer.pointer);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
apic_id = (uint32_t)lapic->id;
|
||||||
|
kfree(buffer.pointer);
|
||||||
|
buffer.length = ACPI_ALLOCATE_BUFFER;
|
||||||
|
buffer.pointer = NULL;
|
||||||
|
|
||||||
|
return apic_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_hotadd_cpu(struct acpi_processor *pr)
|
||||||
|
{
|
||||||
|
int cpu_id, apic_id, pxm;
|
||||||
|
struct xen_platform_op op;
|
||||||
|
|
||||||
|
apic_id = xen_apic_id(pr->handle);
|
||||||
|
if (apic_id < 0) {
|
||||||
|
pr_err(PREFIX "Failed to get apic_id for acpi_id %d\n",
|
||||||
|
pr->acpi_id);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
pxm = xen_acpi_get_pxm(pr->handle);
|
||||||
|
if (pxm < 0) {
|
||||||
|
pr_err(PREFIX "Failed to get _PXM for acpi_id %d\n",
|
||||||
|
pr->acpi_id);
|
||||||
|
return pxm;
|
||||||
|
}
|
||||||
|
|
||||||
|
op.cmd = XENPF_cpu_hotadd;
|
||||||
|
op.u.cpu_add.apic_id = apic_id;
|
||||||
|
op.u.cpu_add.acpi_id = pr->acpi_id;
|
||||||
|
op.u.cpu_add.pxm = pxm;
|
||||||
|
|
||||||
|
cpu_id = HYPERVISOR_dom0_op(&op);
|
||||||
|
if (cpu_id < 0)
|
||||||
|
pr_err(PREFIX "Failed to hotadd CPU for acpi_id %d\n",
|
||||||
|
pr->acpi_id);
|
||||||
|
|
||||||
|
return cpu_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr)
|
||||||
|
{
|
||||||
|
if (!is_processor_present(pr->handle))
|
||||||
|
return AE_ERROR;
|
||||||
|
|
||||||
|
pr->id = xen_hotadd_cpu(pr);
|
||||||
|
if ((int)pr->id < 0)
|
||||||
|
return AE_ERROR;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sync with Xen hypervisor, providing new /sys/.../xen_cpuX
|
||||||
|
* interface after cpu hotadded.
|
||||||
|
*/
|
||||||
|
xen_pcpu_hotplug_sync();
|
||||||
|
|
||||||
|
return AE_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
|
||||||
|
{
|
||||||
|
acpi_handle phandle;
|
||||||
|
struct acpi_device *pdev;
|
||||||
|
|
||||||
|
if (acpi_get_parent(handle, &phandle))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (acpi_bus_get_device(phandle, &pdev))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (acpi_bus_scan(handle))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int acpi_processor_device_remove(struct acpi_device *device)
|
||||||
|
{
|
||||||
|
pr_debug(PREFIX "Xen does not support CPU hotremove\n");
|
||||||
|
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void acpi_processor_hotplug_notify(acpi_handle handle,
|
||||||
|
u32 event, void *data)
|
||||||
|
{
|
||||||
|
struct acpi_processor *pr;
|
||||||
|
struct acpi_device *device = NULL;
|
||||||
|
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
|
||||||
|
int result;
|
||||||
|
|
||||||
|
switch (event) {
|
||||||
|
case ACPI_NOTIFY_BUS_CHECK:
|
||||||
|
case ACPI_NOTIFY_DEVICE_CHECK:
|
||||||
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||||
|
"Processor driver received %s event\n",
|
||||||
|
(event == ACPI_NOTIFY_BUS_CHECK) ?
|
||||||
|
"ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
|
||||||
|
|
||||||
|
if (!is_processor_present(handle))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!acpi_bus_get_device(handle, &device))
|
||||||
|
break;
|
||||||
|
|
||||||
|
result = acpi_processor_device_add(handle, &device);
|
||||||
|
if (result) {
|
||||||
|
pr_err(PREFIX "Unable to add the device\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ost_code = ACPI_OST_SC_SUCCESS;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case ACPI_NOTIFY_EJECT_REQUEST:
|
||||||
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||||
|
"received ACPI_NOTIFY_EJECT_REQUEST\n"));
|
||||||
|
|
||||||
|
if (acpi_bus_get_device(handle, &device)) {
|
||||||
|
pr_err(PREFIX "Device don't exist, dropping EJECT\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
pr = acpi_driver_data(device);
|
||||||
|
if (!pr) {
|
||||||
|
pr_err(PREFIX "Driver data is NULL, dropping EJECT\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TBD: implement acpi_processor_device_remove if Xen support
|
||||||
|
* CPU hotremove in the future.
|
||||||
|
*/
|
||||||
|
acpi_processor_device_remove(device);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||||
|
"Unsupported event [0x%x]\n", event));
|
||||||
|
|
||||||
|
/* non-hotplug event; possibly handled by other handler */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static acpi_status is_processor_device(acpi_handle handle)
|
||||||
|
{
|
||||||
|
struct acpi_device_info *info;
|
||||||
|
char *hid;
|
||||||
|
acpi_status status;
|
||||||
|
|
||||||
|
status = acpi_get_object_info(handle, &info);
|
||||||
|
if (ACPI_FAILURE(status))
|
||||||
|
return status;
|
||||||
|
|
||||||
|
if (info->type == ACPI_TYPE_PROCESSOR) {
|
||||||
|
kfree(info);
|
||||||
|
return AE_OK; /* found a processor object */
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(info->valid & ACPI_VALID_HID)) {
|
||||||
|
kfree(info);
|
||||||
|
return AE_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
hid = info->hardware_id.string;
|
||||||
|
if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
|
||||||
|
kfree(info);
|
||||||
|
return AE_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(info);
|
||||||
|
return AE_OK; /* found a processor device object */
|
||||||
|
}
|
||||||
|
|
||||||
|
static acpi_status
|
||||||
|
processor_walk_namespace_cb(acpi_handle handle,
|
||||||
|
u32 lvl, void *context, void **rv)
|
||||||
|
{
|
||||||
|
acpi_status status;
|
||||||
|
int *action = context;
|
||||||
|
|
||||||
|
status = is_processor_device(handle);
|
||||||
|
if (ACPI_FAILURE(status))
|
||||||
|
return AE_OK; /* not a processor; continue to walk */
|
||||||
|
|
||||||
|
switch (*action) {
|
||||||
|
case INSTALL_NOTIFY_HANDLER:
|
||||||
|
acpi_install_notify_handler(handle,
|
||||||
|
ACPI_SYSTEM_NOTIFY,
|
||||||
|
acpi_processor_hotplug_notify,
|
||||||
|
NULL);
|
||||||
|
break;
|
||||||
|
case UNINSTALL_NOTIFY_HANDLER:
|
||||||
|
acpi_remove_notify_handler(handle,
|
||||||
|
ACPI_SYSTEM_NOTIFY,
|
||||||
|
acpi_processor_hotplug_notify);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* found a processor; skip walking underneath */
|
||||||
|
return AE_CTRL_DEPTH;
|
||||||
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
void acpi_processor_install_hotplug_notify(void)
|
||||||
|
{
|
||||||
|
int action = INSTALL_NOTIFY_HANDLER;
|
||||||
|
acpi_walk_namespace(ACPI_TYPE_ANY,
|
||||||
|
ACPI_ROOT_OBJECT,
|
||||||
|
ACPI_UINT32_MAX,
|
||||||
|
processor_walk_namespace_cb, NULL, &action, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
void acpi_processor_uninstall_hotplug_notify(void)
|
||||||
|
{
|
||||||
|
int action = UNINSTALL_NOTIFY_HANDLER;
|
||||||
|
acpi_walk_namespace(ACPI_TYPE_ANY,
|
||||||
|
ACPI_ROOT_OBJECT,
|
||||||
|
ACPI_UINT32_MAX,
|
||||||
|
processor_walk_namespace_cb, NULL, &action, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct acpi_device_id processor_device_ids[] = {
|
||||||
|
{ACPI_PROCESSOR_OBJECT_HID, 0},
|
||||||
|
{ACPI_PROCESSOR_DEVICE_HID, 0},
|
||||||
|
{"", 0},
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
|
||||||
|
|
||||||
|
static struct acpi_driver xen_acpi_processor_driver = {
|
||||||
|
.name = "processor",
|
||||||
|
.class = ACPI_PROCESSOR_CLASS,
|
||||||
|
.ids = processor_device_ids,
|
||||||
|
.ops = {
|
||||||
|
.add = xen_acpi_processor_add,
|
||||||
|
.remove = xen_acpi_processor_remove,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init xen_acpi_processor_init(void)
|
||||||
|
{
|
||||||
|
int result = 0;
|
||||||
|
|
||||||
|
if (!xen_initial_domain())
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* unregister the stub which only used to reserve driver space */
|
||||||
|
xen_stub_processor_exit();
|
||||||
|
|
||||||
|
result = acpi_bus_register_driver(&xen_acpi_processor_driver);
|
||||||
|
if (result < 0) {
|
||||||
|
xen_stub_processor_init();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
acpi_processor_install_hotplug_notify();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit xen_acpi_processor_exit(void)
|
||||||
|
{
|
||||||
|
if (!xen_initial_domain())
|
||||||
|
return;
|
||||||
|
|
||||||
|
acpi_processor_uninstall_hotplug_notify();
|
||||||
|
|
||||||
|
acpi_bus_unregister_driver(&xen_acpi_processor_driver);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* stub reserve space again to prevent any chance of native
|
||||||
|
* driver loading.
|
||||||
|
*/
|
||||||
|
xen_stub_processor_init();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(xen_acpi_processor_init);
|
||||||
|
module_exit(xen_acpi_processor_exit);
|
||||||
|
ACPI_MODULE_NAME("xen-acpi-cpuhotplug");
|
||||||
|
MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
|
||||||
|
MODULE_DESCRIPTION("Xen Hotplug CPU Driver");
|
||||||
|
MODULE_LICENSE("GPL");
|
|
@ -0,0 +1,483 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2012 Intel Corporation
|
||||||
|
* Author: Liu Jinsong <jinsong.liu@intel.com>
|
||||||
|
* Author: Jiang Yunhong <yunhong.jiang@intel.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
* the Free Software Foundation; either version 2 of the License, or (at
|
||||||
|
* your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||||
|
* NON INFRINGEMENT. See the GNU General Public License for more
|
||||||
|
* details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/acpi.h>
|
||||||
|
#include <acpi/acpi_drivers.h>
|
||||||
|
#include <xen/acpi.h>
|
||||||
|
#include <xen/interface/platform.h>
|
||||||
|
#include <asm/xen/hypercall.h>
|
||||||
|
|
||||||
|
#define PREFIX "ACPI:xen_memory_hotplug:"
|
||||||
|
|
||||||
|
struct acpi_memory_info {
|
||||||
|
struct list_head list;
|
||||||
|
u64 start_addr; /* Memory Range start physical addr */
|
||||||
|
u64 length; /* Memory Range length */
|
||||||
|
unsigned short caching; /* memory cache attribute */
|
||||||
|
unsigned short write_protect; /* memory read/write attribute */
|
||||||
|
/* copied from buffer getting from _CRS */
|
||||||
|
unsigned int enabled:1;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct acpi_memory_device {
|
||||||
|
struct acpi_device *device;
|
||||||
|
struct list_head res_list;
|
||||||
|
};
|
||||||
|
|
||||||
|
static bool acpi_hotmem_initialized __read_mostly;
|
||||||
|
|
||||||
|
static int xen_hotadd_memory(int pxm, struct acpi_memory_info *info)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
struct xen_platform_op op;
|
||||||
|
|
||||||
|
op.cmd = XENPF_mem_hotadd;
|
||||||
|
op.u.mem_add.spfn = info->start_addr >> PAGE_SHIFT;
|
||||||
|
op.u.mem_add.epfn = (info->start_addr + info->length) >> PAGE_SHIFT;
|
||||||
|
op.u.mem_add.pxm = pxm;
|
||||||
|
|
||||||
|
rc = HYPERVISOR_dom0_op(&op);
|
||||||
|
if (rc)
|
||||||
|
pr_err(PREFIX "Xen Hotplug Memory Add failed on "
|
||||||
|
"0x%lx -> 0x%lx, _PXM: %d, error: %d\n",
|
||||||
|
(unsigned long)info->start_addr,
|
||||||
|
(unsigned long)(info->start_addr + info->length),
|
||||||
|
pxm, rc);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_acpi_memory_enable_device(struct acpi_memory_device *mem_device)
|
||||||
|
{
|
||||||
|
int pxm, result;
|
||||||
|
int num_enabled = 0;
|
||||||
|
struct acpi_memory_info *info;
|
||||||
|
|
||||||
|
if (!mem_device)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
pxm = xen_acpi_get_pxm(mem_device->device->handle);
|
||||||
|
if (pxm < 0)
|
||||||
|
return pxm;
|
||||||
|
|
||||||
|
list_for_each_entry(info, &mem_device->res_list, list) {
|
||||||
|
if (info->enabled) { /* just sanity check...*/
|
||||||
|
num_enabled++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info->length)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
result = xen_hotadd_memory(pxm, info);
|
||||||
|
if (result)
|
||||||
|
continue;
|
||||||
|
info->enabled = 1;
|
||||||
|
num_enabled++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!num_enabled)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static acpi_status
|
||||||
|
acpi_memory_get_resource(struct acpi_resource *resource, void *context)
|
||||||
|
{
|
||||||
|
struct acpi_memory_device *mem_device = context;
|
||||||
|
struct acpi_resource_address64 address64;
|
||||||
|
struct acpi_memory_info *info, *new;
|
||||||
|
acpi_status status;
|
||||||
|
|
||||||
|
status = acpi_resource_to_address64(resource, &address64);
|
||||||
|
if (ACPI_FAILURE(status) ||
|
||||||
|
(address64.resource_type != ACPI_MEMORY_RANGE))
|
||||||
|
return AE_OK;
|
||||||
|
|
||||||
|
list_for_each_entry(info, &mem_device->res_list, list) {
|
||||||
|
if ((info->caching == address64.info.mem.caching) &&
|
||||||
|
(info->write_protect == address64.info.mem.write_protect) &&
|
||||||
|
(info->start_addr + info->length == address64.minimum)) {
|
||||||
|
info->length += address64.address_length;
|
||||||
|
return AE_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
|
||||||
|
if (!new)
|
||||||
|
return AE_ERROR;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&new->list);
|
||||||
|
new->caching = address64.info.mem.caching;
|
||||||
|
new->write_protect = address64.info.mem.write_protect;
|
||||||
|
new->start_addr = address64.minimum;
|
||||||
|
new->length = address64.address_length;
|
||||||
|
list_add_tail(&new->list, &mem_device->res_list);
|
||||||
|
|
||||||
|
return AE_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
|
||||||
|
{
|
||||||
|
acpi_status status;
|
||||||
|
struct acpi_memory_info *info, *n;
|
||||||
|
|
||||||
|
if (!list_empty(&mem_device->res_list))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
status = acpi_walk_resources(mem_device->device->handle,
|
||||||
|
METHOD_NAME__CRS, acpi_memory_get_resource, mem_device);
|
||||||
|
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
list_for_each_entry_safe(info, n, &mem_device->res_list, list)
|
||||||
|
kfree(info);
|
||||||
|
INIT_LIST_HEAD(&mem_device->res_list);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
acpi_memory_get_device(acpi_handle handle,
|
||||||
|
struct acpi_memory_device **mem_device)
|
||||||
|
{
|
||||||
|
acpi_status status;
|
||||||
|
acpi_handle phandle;
|
||||||
|
struct acpi_device *device = NULL;
|
||||||
|
struct acpi_device *pdevice = NULL;
|
||||||
|
int result;
|
||||||
|
|
||||||
|
if (!acpi_bus_get_device(handle, &device) && device)
|
||||||
|
goto end;
|
||||||
|
|
||||||
|
status = acpi_get_parent(handle, &phandle);
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
pr_warn(PREFIX "Cannot find acpi parent\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Get the parent device */
|
||||||
|
result = acpi_bus_get_device(phandle, &pdevice);
|
||||||
|
if (result) {
|
||||||
|
pr_warn(PREFIX "Cannot get acpi bus device\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now add the notified device. This creates the acpi_device
|
||||||
|
* and invokes .add function
|
||||||
|
*/
|
||||||
|
result = acpi_bus_scan(handle);
|
||||||
|
if (result) {
|
||||||
|
pr_warn(PREFIX "Cannot add acpi bus\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
end:
|
||||||
|
*mem_device = acpi_driver_data(device);
|
||||||
|
if (!(*mem_device)) {
|
||||||
|
pr_err(PREFIX "Driver data not found\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
|
||||||
|
{
|
||||||
|
unsigned long long current_status;
|
||||||
|
|
||||||
|
/* Get device present/absent information from the _STA */
|
||||||
|
if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
|
||||||
|
"_STA", NULL, ¤t_status)))
|
||||||
|
return -ENODEV;
|
||||||
|
/*
|
||||||
|
* Check for device status. Device should be
|
||||||
|
* present/enabled/functioning.
|
||||||
|
*/
|
||||||
|
if (!((current_status & ACPI_STA_DEVICE_PRESENT)
|
||||||
|
&& (current_status & ACPI_STA_DEVICE_ENABLED)
|
||||||
|
&& (current_status & ACPI_STA_DEVICE_FUNCTIONING)))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
|
||||||
|
{
|
||||||
|
pr_debug(PREFIX "Xen does not support memory hotremove\n");
|
||||||
|
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
|
||||||
|
{
|
||||||
|
struct acpi_memory_device *mem_device;
|
||||||
|
struct acpi_device *device;
|
||||||
|
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
|
||||||
|
|
||||||
|
switch (event) {
|
||||||
|
case ACPI_NOTIFY_BUS_CHECK:
|
||||||
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||||
|
"\nReceived BUS CHECK notification for device\n"));
|
||||||
|
/* Fall Through */
|
||||||
|
case ACPI_NOTIFY_DEVICE_CHECK:
|
||||||
|
if (event == ACPI_NOTIFY_DEVICE_CHECK)
|
||||||
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||||
|
"\nReceived DEVICE CHECK notification for device\n"));
|
||||||
|
|
||||||
|
if (acpi_memory_get_device(handle, &mem_device)) {
|
||||||
|
pr_err(PREFIX "Cannot find driver data\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ost_code = ACPI_OST_SC_SUCCESS;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case ACPI_NOTIFY_EJECT_REQUEST:
|
||||||
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||||
|
"\nReceived EJECT REQUEST notification for device\n"));
|
||||||
|
|
||||||
|
if (acpi_bus_get_device(handle, &device)) {
|
||||||
|
pr_err(PREFIX "Device doesn't exist\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
mem_device = acpi_driver_data(device);
|
||||||
|
if (!mem_device) {
|
||||||
|
pr_err(PREFIX "Driver Data is NULL\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TBD: implement acpi_memory_disable_device and invoke
|
||||||
|
* acpi_bus_remove if Xen support hotremove in the future
|
||||||
|
*/
|
||||||
|
acpi_memory_disable_device(mem_device);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||||
|
"Unsupported event [0x%x]\n", event));
|
||||||
|
/* non-hotplug event; possibly handled by other handler */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_acpi_memory_device_add(struct acpi_device *device)
|
||||||
|
{
|
||||||
|
int result;
|
||||||
|
struct acpi_memory_device *mem_device = NULL;
|
||||||
|
|
||||||
|
|
||||||
|
if (!device)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL);
|
||||||
|
if (!mem_device)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&mem_device->res_list);
|
||||||
|
mem_device->device = device;
|
||||||
|
sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
|
||||||
|
sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
|
||||||
|
device->driver_data = mem_device;
|
||||||
|
|
||||||
|
/* Get the range from the _CRS */
|
||||||
|
result = acpi_memory_get_device_resources(mem_device);
|
||||||
|
if (result) {
|
||||||
|
kfree(mem_device);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For booting existed memory devices, early boot code has recognized
|
||||||
|
* memory area by EFI/E820. If DSDT shows these memory devices on boot,
|
||||||
|
* hotplug is not necessary for them.
|
||||||
|
* For hot-added memory devices during runtime, it need hypercall to
|
||||||
|
* Xen hypervisor to add memory.
|
||||||
|
*/
|
||||||
|
if (!acpi_hotmem_initialized)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!acpi_memory_check_device(mem_device))
|
||||||
|
result = xen_acpi_memory_enable_device(mem_device);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_acpi_memory_device_remove(struct acpi_device *device)
|
||||||
|
{
|
||||||
|
struct acpi_memory_device *mem_device = NULL;
|
||||||
|
|
||||||
|
if (!device || !acpi_driver_data(device))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
mem_device = acpi_driver_data(device);
|
||||||
|
kfree(mem_device);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helper function to check for memory device
|
||||||
|
*/
|
||||||
|
static acpi_status is_memory_device(acpi_handle handle)
|
||||||
|
{
|
||||||
|
char *hardware_id;
|
||||||
|
acpi_status status;
|
||||||
|
struct acpi_device_info *info;
|
||||||
|
|
||||||
|
status = acpi_get_object_info(handle, &info);
|
||||||
|
if (ACPI_FAILURE(status))
|
||||||
|
return status;
|
||||||
|
|
||||||
|
if (!(info->valid & ACPI_VALID_HID)) {
|
||||||
|
kfree(info);
|
||||||
|
return AE_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
hardware_id = info->hardware_id.string;
|
||||||
|
if ((hardware_id == NULL) ||
|
||||||
|
(strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
|
||||||
|
status = AE_ERROR;
|
||||||
|
|
||||||
|
kfree(info);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
static acpi_status
|
||||||
|
acpi_memory_register_notify_handler(acpi_handle handle,
|
||||||
|
u32 level, void *ctxt, void **retv)
|
||||||
|
{
|
||||||
|
acpi_status status;
|
||||||
|
|
||||||
|
status = is_memory_device(handle);
|
||||||
|
if (ACPI_FAILURE(status))
|
||||||
|
return AE_OK; /* continue */
|
||||||
|
|
||||||
|
status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
|
||||||
|
acpi_memory_device_notify, NULL);
|
||||||
|
/* continue */
|
||||||
|
return AE_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static acpi_status
|
||||||
|
acpi_memory_deregister_notify_handler(acpi_handle handle,
|
||||||
|
u32 level, void *ctxt, void **retv)
|
||||||
|
{
|
||||||
|
acpi_status status;
|
||||||
|
|
||||||
|
status = is_memory_device(handle);
|
||||||
|
if (ACPI_FAILURE(status))
|
||||||
|
return AE_OK; /* continue */
|
||||||
|
|
||||||
|
status = acpi_remove_notify_handler(handle,
|
||||||
|
ACPI_SYSTEM_NOTIFY,
|
||||||
|
acpi_memory_device_notify);
|
||||||
|
|
||||||
|
return AE_OK; /* continue */
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct acpi_device_id memory_device_ids[] = {
|
||||||
|
{ACPI_MEMORY_DEVICE_HID, 0},
|
||||||
|
{"", 0},
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(acpi, memory_device_ids);
|
||||||
|
|
||||||
|
static struct acpi_driver xen_acpi_memory_device_driver = {
|
||||||
|
.name = "acpi_memhotplug",
|
||||||
|
.class = ACPI_MEMORY_DEVICE_CLASS,
|
||||||
|
.ids = memory_device_ids,
|
||||||
|
.ops = {
|
||||||
|
.add = xen_acpi_memory_device_add,
|
||||||
|
.remove = xen_acpi_memory_device_remove,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init xen_acpi_memory_device_init(void)
|
||||||
|
{
|
||||||
|
int result;
|
||||||
|
acpi_status status;
|
||||||
|
|
||||||
|
if (!xen_initial_domain())
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* unregister the stub which only used to reserve driver space */
|
||||||
|
xen_stub_memory_device_exit();
|
||||||
|
|
||||||
|
result = acpi_bus_register_driver(&xen_acpi_memory_device_driver);
|
||||||
|
if (result < 0) {
|
||||||
|
xen_stub_memory_device_init();
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||||
|
ACPI_UINT32_MAX,
|
||||||
|
acpi_memory_register_notify_handler,
|
||||||
|
NULL, NULL, NULL);
|
||||||
|
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
pr_warn(PREFIX "walk_namespace failed\n");
|
||||||
|
acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
|
||||||
|
xen_stub_memory_device_init();
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
acpi_hotmem_initialized = true;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit xen_acpi_memory_device_exit(void)
|
||||||
|
{
|
||||||
|
acpi_status status;
|
||||||
|
|
||||||
|
if (!xen_initial_domain())
|
||||||
|
return;
|
||||||
|
|
||||||
|
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||||
|
ACPI_UINT32_MAX,
|
||||||
|
acpi_memory_deregister_notify_handler,
|
||||||
|
NULL, NULL, NULL);
|
||||||
|
if (ACPI_FAILURE(status))
|
||||||
|
pr_warn(PREFIX "walk_namespace failed\n");
|
||||||
|
|
||||||
|
acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* stub reserve space again to prevent any chance of native
|
||||||
|
* driver loading.
|
||||||
|
*/
|
||||||
|
xen_stub_memory_device_init();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(xen_acpi_memory_device_init);
|
||||||
|
module_exit(xen_acpi_memory_device_exit);
|
||||||
|
ACPI_MODULE_NAME("xen-acpi-memhotplug");
|
||||||
|
MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
|
||||||
|
MODULE_DESCRIPTION("Xen Hotplug Mem Driver");
|
||||||
|
MODULE_LICENSE("GPL");
|
|
@ -0,0 +1,101 @@
|
||||||
|
/*
|
||||||
|
* xen-stub.c - stub drivers to reserve space for Xen
|
||||||
|
*
|
||||||
|
* Copyright (C) 2012 Intel Corporation
|
||||||
|
* Author: Liu Jinsong <jinsong.liu@intel.com>
|
||||||
|
* Author: Jiang Yunhong <yunhong.jiang@intel.com>
|
||||||
|
*
|
||||||
|
* Copyright (C) 2012 Oracle Inc
|
||||||
|
* Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
* the Free Software Foundation; either version 2 of the License, or (at
|
||||||
|
* your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||||
|
* NON INFRINGEMENT. See the GNU General Public License for more
|
||||||
|
* details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/export.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/acpi.h>
|
||||||
|
#include <acpi/acpi_drivers.h>
|
||||||
|
#include <xen/acpi.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_ACPI
|
||||||
|
|
||||||
|
/*--------------------------------------------
|
||||||
|
stub driver for Xen memory hotplug
|
||||||
|
--------------------------------------------*/
|
||||||
|
|
||||||
|
static const struct acpi_device_id memory_device_ids[] = {
|
||||||
|
{ACPI_MEMORY_DEVICE_HID, 0},
|
||||||
|
{"", 0},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct acpi_driver xen_stub_memory_device_driver = {
|
||||||
|
/* same name as native memory driver to block native loaded */
|
||||||
|
.name = "acpi_memhotplug",
|
||||||
|
.class = ACPI_MEMORY_DEVICE_CLASS,
|
||||||
|
.ids = memory_device_ids,
|
||||||
|
};
|
||||||
|
|
||||||
|
int xen_stub_memory_device_init(void)
|
||||||
|
{
|
||||||
|
if (!xen_initial_domain())
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* just reserve space for Xen, block native driver loaded */
|
||||||
|
return acpi_bus_register_driver(&xen_stub_memory_device_driver);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xen_stub_memory_device_init);
|
||||||
|
subsys_initcall(xen_stub_memory_device_init);
|
||||||
|
|
||||||
|
void xen_stub_memory_device_exit(void)
|
||||||
|
{
|
||||||
|
acpi_bus_unregister_driver(&xen_stub_memory_device_driver);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xen_stub_memory_device_exit);
|
||||||
|
|
||||||
|
|
||||||
|
/*--------------------------------------------
|
||||||
|
stub driver for Xen cpu hotplug
|
||||||
|
--------------------------------------------*/
|
||||||
|
|
||||||
|
static const struct acpi_device_id processor_device_ids[] = {
|
||||||
|
{ACPI_PROCESSOR_OBJECT_HID, 0},
|
||||||
|
{ACPI_PROCESSOR_DEVICE_HID, 0},
|
||||||
|
{"", 0},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct acpi_driver xen_stub_processor_driver = {
|
||||||
|
/* same name as native processor driver to block native loaded */
|
||||||
|
.name = "processor",
|
||||||
|
.class = ACPI_PROCESSOR_CLASS,
|
||||||
|
.ids = processor_device_ids,
|
||||||
|
};
|
||||||
|
|
||||||
|
int xen_stub_processor_init(void)
|
||||||
|
{
|
||||||
|
if (!xen_initial_domain())
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* just reserve space for Xen, block native driver loaded */
|
||||||
|
return acpi_bus_register_driver(&xen_stub_processor_driver);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xen_stub_processor_init);
|
||||||
|
subsys_initcall(xen_stub_processor_init);
|
||||||
|
|
||||||
|
void xen_stub_processor_exit(void)
|
||||||
|
{
|
||||||
|
acpi_bus_unregister_driver(&xen_stub_processor_driver);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xen_stub_processor_exit);
|
||||||
|
|
||||||
|
#endif
|
|
@ -769,7 +769,7 @@ static int __init xenbus_init(void)
|
||||||
goto out_error;
|
goto out_error;
|
||||||
xen_store_mfn = (unsigned long)v;
|
xen_store_mfn = (unsigned long)v;
|
||||||
xen_store_interface =
|
xen_store_interface =
|
||||||
ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
|
xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_warn("Xenstore state unknown\n");
|
pr_warn("Xenstore state unknown\n");
|
||||||
|
|
|
@ -40,6 +40,41 @@
|
||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
#include <linux/acpi.h>
|
#include <linux/acpi.h>
|
||||||
|
|
||||||
|
#define ACPI_MEMORY_DEVICE_CLASS "memory"
|
||||||
|
#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
|
||||||
|
#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
|
||||||
|
|
||||||
|
int xen_stub_memory_device_init(void);
|
||||||
|
void xen_stub_memory_device_exit(void);
|
||||||
|
|
||||||
|
#define ACPI_PROCESSOR_CLASS "processor"
|
||||||
|
#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
|
||||||
|
#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
|
||||||
|
|
||||||
|
int xen_stub_processor_init(void);
|
||||||
|
void xen_stub_processor_exit(void);
|
||||||
|
|
||||||
|
void xen_pcpu_hotplug_sync(void);
|
||||||
|
int xen_pcpu_id(uint32_t acpi_id);
|
||||||
|
|
||||||
|
static inline int xen_acpi_get_pxm(acpi_handle h)
|
||||||
|
{
|
||||||
|
unsigned long long pxm;
|
||||||
|
acpi_status status;
|
||||||
|
acpi_handle handle;
|
||||||
|
acpi_handle phandle = h;
|
||||||
|
|
||||||
|
do {
|
||||||
|
handle = phandle;
|
||||||
|
status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
|
||||||
|
if (ACPI_SUCCESS(status))
|
||||||
|
return pxm;
|
||||||
|
status = acpi_get_parent(handle, &phandle);
|
||||||
|
} while (ACPI_SUCCESS(status));
|
||||||
|
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
|
||||||
int xen_acpi_notify_hypervisor_state(u8 sleep_state,
|
int xen_acpi_notify_hypervisor_state(u8 sleep_state,
|
||||||
u32 pm1a_cnt, u32 pm1b_cnd);
|
u32 pm1a_cnt, u32 pm1b_cnd);
|
||||||
|
|
||||||
|
|
|
@ -190,6 +190,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
|
||||||
|
|
||||||
#define XENMEM_add_to_physmap_range 23
|
#define XENMEM_add_to_physmap_range 23
|
||||||
struct xen_add_to_physmap_range {
|
struct xen_add_to_physmap_range {
|
||||||
|
/* IN */
|
||||||
/* Which domain to change the mapping for. */
|
/* Which domain to change the mapping for. */
|
||||||
domid_t domid;
|
domid_t domid;
|
||||||
uint16_t space; /* => enum phys_map_space */
|
uint16_t space; /* => enum phys_map_space */
|
||||||
|
@ -203,6 +204,11 @@ struct xen_add_to_physmap_range {
|
||||||
|
|
||||||
/* GPFN in domid where the source mapping page should appear. */
|
/* GPFN in domid where the source mapping page should appear. */
|
||||||
GUEST_HANDLE(xen_pfn_t) gpfns;
|
GUEST_HANDLE(xen_pfn_t) gpfns;
|
||||||
|
|
||||||
|
/* OUT */
|
||||||
|
|
||||||
|
/* Per index error code. */
|
||||||
|
GUEST_HANDLE(int) errs;
|
||||||
};
|
};
|
||||||
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range);
|
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range);
|
||||||
|
|
||||||
|
|
|
@ -324,10 +324,21 @@ struct xenpf_cpu_ol {
|
||||||
};
|
};
|
||||||
DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol);
|
DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol);
|
||||||
|
|
||||||
/*
|
#define XENPF_cpu_hotadd 58
|
||||||
* CMD 58 and 59 are reserved for cpu hotadd and memory hotadd,
|
struct xenpf_cpu_hotadd {
|
||||||
* which are already occupied at Xen hypervisor side.
|
uint32_t apic_id;
|
||||||
*/
|
uint32_t acpi_id;
|
||||||
|
uint32_t pxm;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define XENPF_mem_hotadd 59
|
||||||
|
struct xenpf_mem_hotadd {
|
||||||
|
uint64_t spfn;
|
||||||
|
uint64_t epfn;
|
||||||
|
uint32_t pxm;
|
||||||
|
uint32_t flags;
|
||||||
|
};
|
||||||
|
|
||||||
#define XENPF_core_parking 60
|
#define XENPF_core_parking 60
|
||||||
struct xenpf_core_parking {
|
struct xenpf_core_parking {
|
||||||
/* IN variables */
|
/* IN variables */
|
||||||
|
@ -357,6 +368,8 @@ struct xen_platform_op {
|
||||||
struct xenpf_set_processor_pminfo set_pminfo;
|
struct xenpf_set_processor_pminfo set_pminfo;
|
||||||
struct xenpf_pcpuinfo pcpu_info;
|
struct xenpf_pcpuinfo pcpu_info;
|
||||||
struct xenpf_cpu_ol cpu_ol;
|
struct xenpf_cpu_ol cpu_ol;
|
||||||
|
struct xenpf_cpu_hotadd cpu_add;
|
||||||
|
struct xenpf_mem_hotadd mem_add;
|
||||||
struct xenpf_core_parking core_parking;
|
struct xenpf_core_parking core_parking;
|
||||||
uint8_t pad[128];
|
uint8_t pad[128];
|
||||||
} u;
|
} u;
|
||||||
|
|
|
@ -285,7 +285,7 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
|
||||||
* Event channel endpoints per domain:
|
* Event channel endpoints per domain:
|
||||||
* 1024 if a long is 32 bits; 4096 if a long is 64 bits.
|
* 1024 if a long is 32 bits; 4096 if a long is 64 bits.
|
||||||
*/
|
*/
|
||||||
#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
|
#define NR_EVENT_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
|
||||||
|
|
||||||
struct vcpu_time_info {
|
struct vcpu_time_info {
|
||||||
/*
|
/*
|
||||||
|
@ -341,7 +341,7 @@ struct vcpu_info {
|
||||||
*/
|
*/
|
||||||
uint8_t evtchn_upcall_pending;
|
uint8_t evtchn_upcall_pending;
|
||||||
uint8_t evtchn_upcall_mask;
|
uint8_t evtchn_upcall_mask;
|
||||||
unsigned long evtchn_pending_sel;
|
xen_ulong_t evtchn_pending_sel;
|
||||||
struct arch_vcpu_info arch;
|
struct arch_vcpu_info arch;
|
||||||
struct pvclock_vcpu_time_info time;
|
struct pvclock_vcpu_time_info time;
|
||||||
}; /* 64 bytes (x86) */
|
}; /* 64 bytes (x86) */
|
||||||
|
@ -384,8 +384,8 @@ struct shared_info {
|
||||||
* per-vcpu selector word to be set. Each bit in the selector covers a
|
* per-vcpu selector word to be set. Each bit in the selector covers a
|
||||||
* 'C long' in the PENDING bitfield array.
|
* 'C long' in the PENDING bitfield array.
|
||||||
*/
|
*/
|
||||||
unsigned long evtchn_pending[sizeof(unsigned long) * 8];
|
xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
|
||||||
unsigned long evtchn_mask[sizeof(unsigned long) * 8];
|
xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wallclock time: updated only by control software. Guests should base
|
* Wallclock time: updated only by control software. Guests should base
|
||||||
|
|
Loading…
Reference in New Issue