Revert the PVonHVM kexec. The patch introduces a regression
with older hypervisor stacks, such as Xen 4.1. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (GNU/Linux) iQEcBAABAgAGBQJRHZ7eAAoJEFjIrFwIi8fJZ+sH/ieMkzdBB6aqbFMcNr7mkfBo i3swjO2JQI7REYIHfKEVoR3IgHfqKEuABdeEQrceE0XqDepFh84YiKGI2QpPRWEA 903vUV4DXVdcBrypbL45tSFZ1Jxsrzx+F7WfV/f9WHyeiwOyaZTGVQH0VuOzpcum RvPTT7MmC7g8MJDi66SDYBaX/pBQzifQ81nMWWjXNw0w4CwWX7le1cScZEP42MR6 jTEHzYMLDojdO+2aQM5pt/0CGI5tzBHtX5nNRl6tovlPI3ckknYYx6a7RfxkfZzF IkMIuGS32yLfsswPPIiMs47/Qgiq3BN6eSTJXMZKUwQokL9yEs8LodcnRDYfgyQ= =fqcJ -----END PGP SIGNATURE----- Merge tag 'stable/for-linus-3.8-rc7-tag-two' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen Pull xen fixes from Konrad Rzeszutek Wilk: "Two fixes: - A simple bug-fix for redundant NULL check. - CVE-2013-0228/XSA-42: x86/xen: don't assume %ds is usable in xen_iret for 32-bit PVOPS and two reverts: - Revert the PVonHVM kexec. The patch introduces a regression with older hypervisor stacks, such as Xen 4.1." * tag 'stable/for-linus-3.8-rc7-tag-two' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: Revert "xen PVonHVM: use E820_Reserved area for shared_info" Revert "xen/PVonHVM: fix compile warning in init_hvm_pv_info" xen: remove redundant NULL check before unregister_and_remove_pcpu(). x86/xen: don't assume %ds is usable in xen_iret for 32-bit PVOPS.
This commit is contained in:
commit
f741656d64
|
@ -1517,72 +1517,51 @@ asmlinkage void __init xen_start_kernel(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_PVHVM
|
||||
#define HVM_SHARED_INFO_ADDR 0xFE700000UL
|
||||
static struct shared_info *xen_hvm_shared_info;
|
||||
static unsigned long xen_hvm_sip_phys;
|
||||
static int xen_major, xen_minor;
|
||||
|
||||
static void xen_hvm_connect_shared_info(unsigned long pfn)
|
||||
void __ref xen_hvm_init_shared_info(void)
|
||||
{
|
||||
int cpu;
|
||||
struct xen_add_to_physmap xatp;
|
||||
static struct shared_info *shared_info_page = 0;
|
||||
|
||||
if (!shared_info_page)
|
||||
shared_info_page = (struct shared_info *)
|
||||
extend_brk(PAGE_SIZE, PAGE_SIZE);
|
||||
xatp.domid = DOMID_SELF;
|
||||
xatp.idx = 0;
|
||||
xatp.space = XENMAPSPACE_shared_info;
|
||||
xatp.gpfn = pfn;
|
||||
xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
|
||||
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
|
||||
BUG();
|
||||
|
||||
}
|
||||
static void __init xen_hvm_set_shared_info(struct shared_info *sip)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
HYPERVISOR_shared_info = sip;
|
||||
HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
|
||||
|
||||
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
|
||||
* page, we use it in the event channel upcall and in some pvclock
|
||||
* related functions. We don't need the vcpu_info placement
|
||||
* optimizations because we don't use any pv_mmu or pv_irq op on
|
||||
* HVM. */
|
||||
for_each_online_cpu(cpu)
|
||||
* HVM.
|
||||
* When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
|
||||
* online but xen_hvm_init_shared_info is run at resume time too and
|
||||
* in that case multiple vcpus might be online. */
|
||||
for_each_online_cpu(cpu) {
|
||||
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
|
||||
}
|
||||
|
||||
/* Reconnect the shared_info pfn to a (new) mfn */
|
||||
void xen_hvm_resume_shared_info(void)
|
||||
{
|
||||
xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage.
|
||||
* On these old tools the shared info page will be placed in E820_Ram.
|
||||
* Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects
|
||||
* that nothing is mapped up to HVM_SHARED_INFO_ADDR.
|
||||
* Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used
|
||||
* here for the shared info page. */
|
||||
static void __init xen_hvm_init_shared_info(void)
|
||||
{
|
||||
if (xen_major < 4) {
|
||||
xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
|
||||
xen_hvm_sip_phys = __pa(xen_hvm_shared_info);
|
||||
} else {
|
||||
xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR;
|
||||
set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys);
|
||||
xen_hvm_shared_info =
|
||||
(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
|
||||
}
|
||||
xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
|
||||
xen_hvm_set_shared_info(xen_hvm_shared_info);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_PVHVM
|
||||
static void __init init_hvm_pv_info(void)
|
||||
{
|
||||
uint32_t ecx, edx, pages, msr, base;
|
||||
int major, minor;
|
||||
uint32_t eax, ebx, ecx, edx, pages, msr, base;
|
||||
u64 pfn;
|
||||
|
||||
base = xen_cpuid_base();
|
||||
cpuid(base + 1, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
major = eax >> 16;
|
||||
minor = eax & 0xffff;
|
||||
printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
|
||||
|
||||
cpuid(base + 2, &pages, &msr, &ecx, &edx);
|
||||
|
||||
pfn = __pa(hypercall_page);
|
||||
|
@ -1633,22 +1612,12 @@ static void __init xen_hvm_guest_init(void)
|
|||
|
||||
static bool __init xen_hvm_platform(void)
|
||||
{
|
||||
uint32_t eax, ebx, ecx, edx, base;
|
||||
|
||||
if (xen_pv_domain())
|
||||
return false;
|
||||
|
||||
base = xen_cpuid_base();
|
||||
if (!base)
|
||||
if (!xen_cpuid_base())
|
||||
return false;
|
||||
|
||||
cpuid(base + 1, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
xen_major = eax >> 16;
|
||||
xen_minor = eax & 0xffff;
|
||||
|
||||
printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
|
|||
{
|
||||
#ifdef CONFIG_XEN_PVHVM
|
||||
int cpu;
|
||||
xen_hvm_resume_shared_info();
|
||||
xen_hvm_init_shared_info();
|
||||
xen_callback_vector();
|
||||
xen_unplug_emulated_devices();
|
||||
if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
|
||||
|
|
|
@ -89,11 +89,11 @@ ENTRY(xen_iret)
|
|||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
GET_THREAD_INFO(%eax)
|
||||
movl TI_cpu(%eax), %eax
|
||||
movl __per_cpu_offset(,%eax,4), %eax
|
||||
mov xen_vcpu(%eax), %eax
|
||||
movl %ss:TI_cpu(%eax), %eax
|
||||
movl %ss:__per_cpu_offset(,%eax,4), %eax
|
||||
mov %ss:xen_vcpu(%eax), %eax
|
||||
#else
|
||||
movl xen_vcpu, %eax
|
||||
movl %ss:xen_vcpu, %eax
|
||||
#endif
|
||||
|
||||
/* check IF state we're restoring */
|
||||
|
@ -106,11 +106,11 @@ ENTRY(xen_iret)
|
|||
* resuming the code, so we don't have to be worried about
|
||||
* being preempted to another CPU.
|
||||
*/
|
||||
setz XEN_vcpu_info_mask(%eax)
|
||||
setz %ss:XEN_vcpu_info_mask(%eax)
|
||||
xen_iret_start_crit:
|
||||
|
||||
/* check for unmasked and pending */
|
||||
cmpw $0x0001, XEN_vcpu_info_pending(%eax)
|
||||
cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
|
||||
|
||||
/*
|
||||
* If there's something pending, mask events again so we can
|
||||
|
@ -118,7 +118,7 @@ xen_iret_start_crit:
|
|||
* touch XEN_vcpu_info_mask.
|
||||
*/
|
||||
jne 1f
|
||||
movb $1, XEN_vcpu_info_mask(%eax)
|
||||
movb $1, %ss:XEN_vcpu_info_mask(%eax)
|
||||
|
||||
1: popl %eax
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ void xen_enable_syscall(void);
|
|||
void xen_vcpu_restore(void);
|
||||
|
||||
void xen_callback_vector(void);
|
||||
void xen_hvm_resume_shared_info(void);
|
||||
void xen_hvm_init_shared_info(void);
|
||||
void xen_unplug_emulated_devices(void);
|
||||
|
||||
void __init xen_build_dynamic_phys_to_machine(void);
|
||||
|
|
|
@ -278,8 +278,7 @@ static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
|
|||
* Only those at cpu present map has its sys interface.
|
||||
*/
|
||||
if (info->flags & XEN_PCPU_FLAGS_INVALID) {
|
||||
if (pcpu)
|
||||
unregister_and_remove_pcpu(pcpu);
|
||||
unregister_and_remove_pcpu(pcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue