2019-05-29 22:17:54 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-01-19 07:45:02 +08:00
|
|
|
/*
|
|
|
|
* X86 specific Hyper-V initialization code.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016, Microsoft, Inc.
|
|
|
|
*
|
|
|
|
* Author : K. Y. Srinivasan <kys@microsoft.com>
|
|
|
|
*/
|
|
|
|
|
2018-09-19 06:29:50 +08:00
|
|
|
#include <linux/efi.h>
|
2017-01-19 07:45:02 +08:00
|
|
|
#include <linux/types.h>
|
2021-02-01 22:48:11 +08:00
|
|
|
#include <linux/bitfield.h>
|
2021-07-15 02:34:47 +08:00
|
|
|
#include <linux/io.h>
|
2018-01-24 21:23:33 +08:00
|
|
|
#include <asm/apic.h>
|
|
|
|
#include <asm/desc.h>
|
2017-01-19 07:45:02 +08:00
|
|
|
#include <asm/hypervisor.h>
|
2018-03-20 22:02:05 +08:00
|
|
|
#include <asm/hyperv-tlfs.h>
|
2017-01-19 07:45:02 +08:00
|
|
|
#include <asm/mshyperv.h>
|
2020-05-22 04:05:43 +08:00
|
|
|
#include <asm/idtentry.h>
|
2020-12-22 14:55:41 +08:00
|
|
|
#include <linux/kexec.h>
|
2017-01-19 07:45:02 +08:00
|
|
|
#include <linux/version.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/mm.h>
|
2017-03-05 09:27:11 +08:00
|
|
|
#include <linux/hyperv.h>
|
2017-08-03 00:09:18 +08:00
|
|
|
#include <linux/slab.h>
|
2020-04-06 23:53:31 +08:00
|
|
|
#include <linux/kernel.h>
|
2017-08-03 00:09:18 +08:00
|
|
|
#include <linux/cpuhotplug.h>
|
2020-01-07 06:42:39 +08:00
|
|
|
#include <linux/syscore_ops.h>
|
2019-07-01 12:26:06 +08:00
|
|
|
#include <clocksource/hyperv_timer.h>
|
2021-02-03 23:04:26 +08:00
|
|
|
#include <linux/highmem.h>
|
2021-12-13 15:14:04 +08:00
|
|
|
#include <linux/swiotlb.h>
|
2017-01-19 07:45:02 +08:00
|
|
|
|
2020-12-22 14:55:41 +08:00
|
|
|
int hyperv_init_cpuhp;
|
2021-02-03 23:04:25 +08:00
|
|
|
u64 hv_current_partition_id = ~0ull;
|
|
|
|
EXPORT_SYMBOL_GPL(hv_current_partition_id);
|
2020-12-22 14:55:41 +08:00
|
|
|
|
2017-08-03 00:09:14 +08:00
|
|
|
void *hv_hypercall_pg;
|
|
|
|
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
|
2017-02-05 00:57:13 +08:00
|
|
|
|
2021-10-25 20:21:11 +08:00
|
|
|
union hv_ghcb __percpu **hv_ghcb_pg;
|
2021-10-25 20:21:06 +08:00
|
|
|
|
2020-01-07 06:42:39 +08:00
|
|
|
/* Storage to save the hypercall page temporarily for hibernation */
|
|
|
|
static void *hv_hypercall_pg_saved;
|
|
|
|
|
2018-03-20 22:02:08 +08:00
|
|
|
struct hv_vp_assist_page **hv_vp_assist_page;
|
|
|
|
EXPORT_SYMBOL_GPL(hv_vp_assist_page);
|
|
|
|
|
2021-10-25 20:21:06 +08:00
|
|
|
static int hyperv_init_ghcb(void)
|
|
|
|
{
|
|
|
|
u64 ghcb_gpa;
|
|
|
|
void *ghcb_va;
|
|
|
|
void **ghcb_base;
|
|
|
|
|
|
|
|
if (!hv_isolation_type_snp())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!hv_ghcb_pg)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GHCB page is allocated by paravisor. The address
|
|
|
|
* returned by MSR_AMD64_SEV_ES_GHCB is above shared
|
|
|
|
* memory boundary and map it here.
|
|
|
|
*/
|
|
|
|
rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
|
|
|
|
ghcb_va = memremap(ghcb_gpa, HV_HYP_PAGE_SIZE, MEMREMAP_WB);
|
|
|
|
if (!ghcb_va)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
|
|
|
|
*ghcb_base = ghcb_va;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-03 00:09:18 +08:00
|
|
|
static int hv_cpu_init(unsigned int cpu)
|
|
|
|
{
|
2021-07-31 20:05:19 +08:00
|
|
|
union hv_vp_assist_msr_contents msr = { 0 };
|
2018-03-20 22:02:08 +08:00
|
|
|
struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
|
2021-07-15 02:34:45 +08:00
|
|
|
int ret;
|
2017-08-03 00:09:18 +08:00
|
|
|
|
2021-07-15 02:34:45 +08:00
|
|
|
ret = hv_common_cpu_init(cpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-10-06 23:48:54 +08:00
|
|
|
|
2018-03-20 22:02:08 +08:00
|
|
|
if (!hv_vp_assist_page)
|
|
|
|
return 0;
|
|
|
|
|
x86/hyper-v: Zero out the VP ASSIST PAGE on allocation
The VP ASSIST PAGE is an "overlay" page (see Hyper-V TLFS's Section
5.2.1 "GPA Overlay Pages" for the details) and here is an excerpt:
"The hypervisor defines several special pages that "overlay" the guest's
Guest Physical Addresses (GPA) space. Overlays are addressed GPA but are
not included in the normal GPA map maintained internally by the hypervisor.
Conceptually, they exist in a separate map that overlays the GPA map.
If a page within the GPA space is overlaid, any SPA page mapped to the
GPA page is effectively "obscured" and generally unreachable by the
virtual processor through processor memory accesses.
If an overlay page is disabled, the underlying GPA page is "uncovered",
and an existing mapping becomes accessible to the guest."
SPA = System Physical Address = the final real physical address.
When a CPU (e.g. CPU1) is onlined, hv_cpu_init() allocates the VP ASSIST
PAGE and enables the EOI optimization for this CPU by writing the MSR
HV_X64_MSR_VP_ASSIST_PAGE. From now on, hvp->apic_assist belongs to the
special SPA page, and this CPU *always* uses hvp->apic_assist (which is
shared with the hypervisor) to decide if it needs to write the EOI MSR.
When a CPU is offlined then on the outgoing CPU:
1. hv_cpu_die() disables the EOI optimizaton for this CPU, and from
now on hvp->apic_assist belongs to the original "normal" SPA page;
2. the remaining work of stopping this CPU is done
3. this CPU is completely stopped.
Between 1 and 3, this CPU can still receive interrupts (e.g. reschedule
IPIs from CPU0, and Local APIC timer interrupts), and this CPU *must* write
the EOI MSR for every interrupt received, otherwise the hypervisor may not
deliver further interrupts, which may be needed to completely stop the CPU.
So, after the EOI optimization is disabled in hv_cpu_die(), it's required
that the hvp->apic_assist's bit0 is zero, which is not guaranteed by the
current allocation mode because it lacks __GFP_ZERO. As a consequence the
bit might be set and interrupt handling would not write the EOI MSR causing
interrupt delivery to become stuck.
Add the missing __GFP_ZERO to the allocation.
Note 1: after the "normal" SPA page is allocted and zeroed out, neither the
hypervisor nor the guest writes into the page, so the page remains with
zeros.
Note 2: see Section 10.3.5 "EOI Assist" for the details of the EOI
optimization. When the optimization is enabled, the guest can still write
the EOI MSR register irrespective of the "No EOI required" value, but
that's slower than the optimized assist based variant.
Fixes: ba696429d290 ("x86/hyper-v: Implement EOI assist")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/ <PU1P153MB0169B716A637FABF07433C04BFCB0@PU1P153MB0169.APCP153.PROD.OUTLOOK.COM
2019-07-19 11:22:35 +08:00
|
|
|
if (!*hvp) {
|
2021-07-31 20:05:19 +08:00
|
|
|
if (hv_root_partition) {
|
|
|
|
/*
|
|
|
|
* For root partition we get the hypervisor provided VP assist
|
|
|
|
* page, instead of allocating a new page.
|
|
|
|
*/
|
|
|
|
rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
|
|
|
|
*hvp = memremap(msr.pfn <<
|
|
|
|
HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
|
|
|
|
PAGE_SIZE, MEMREMAP_WB);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The VP assist page is an "overlay" page (see Hyper-V TLFS's
|
|
|
|
* Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed
|
|
|
|
* out to make sure we always write the EOI MSR in
|
|
|
|
* hv_apic_eoi_write() *after* the EOI optimization is disabled
|
|
|
|
* in hv_cpu_die(), otherwise a CPU may not be stopped in the
|
|
|
|
* case of CPU offlining and the VM will hang.
|
|
|
|
*/
|
|
|
|
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
|
|
|
|
if (*hvp)
|
|
|
|
msr.pfn = vmalloc_to_pfn(*hvp);
|
|
|
|
}
|
|
|
|
WARN_ON(!(*hvp));
|
|
|
|
if (*hvp) {
|
|
|
|
msr.enable = 1;
|
|
|
|
wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
|
|
|
|
}
|
2018-03-20 22:02:08 +08:00
|
|
|
}
|
|
|
|
|
2021-10-25 20:21:06 +08:00
|
|
|
return hyperv_init_ghcb();
|
2017-08-03 00:09:18 +08:00
|
|
|
}
|
|
|
|
|
2018-01-24 21:23:33 +08:00
|
|
|
static void (*hv_reenlightenment_cb)(void);
|
|
|
|
|
|
|
|
static void hv_reenlightenment_notify(struct work_struct *dummy)
|
|
|
|
{
|
|
|
|
struct hv_tsc_emulation_status emu_status;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
|
|
|
|
|
|
|
|
/* Don't issue the callback if TSC accesses are not emulated */
|
|
|
|
if (hv_reenlightenment_cb && emu_status.inprogress)
|
|
|
|
hv_reenlightenment_cb();
|
|
|
|
}
|
|
|
|
static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify);
|
|
|
|
|
|
|
|
void hyperv_stop_tsc_emulation(void)
|
|
|
|
{
|
|
|
|
u64 freq;
|
|
|
|
struct hv_tsc_emulation_status emu_status;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
|
|
|
|
emu_status.inprogress = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
|
|
|
|
tsc_khz = div64_u64(freq, 1000);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
|
|
|
|
|
|
|
|
static inline bool hv_reenlightenment_available(void)
|
|
|
|
{
|
|
|
|
/*
|
2021-03-18 22:28:01 +08:00
|
|
|
* Check for required features and privileges to make TSC frequency
|
2018-01-24 21:23:33 +08:00
|
|
|
* change notifications work.
|
|
|
|
*/
|
2020-09-26 22:26:26 +08:00
|
|
|
return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
|
2018-01-24 21:23:33 +08:00
|
|
|
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE &&
|
2020-09-26 22:26:26 +08:00
|
|
|
ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT;
|
2018-01-24 21:23:33 +08:00
|
|
|
}
|
|
|
|
|
2020-05-22 04:05:43 +08:00
|
|
|
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
|
2018-01-24 21:23:33 +08:00
|
|
|
{
|
2020-05-22 04:05:43 +08:00
|
|
|
ack_APIC_irq();
|
2018-01-24 21:23:35 +08:00
|
|
|
inc_irq_stat(irq_hv_reenlightenment_count);
|
2018-01-24 21:23:33 +08:00
|
|
|
schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_hv_tscchange_cb(void (*cb)(void))
|
|
|
|
{
|
|
|
|
struct hv_reenlightenment_control re_ctrl = {
|
|
|
|
.vector = HYPERV_REENLIGHTENMENT_VECTOR,
|
|
|
|
.enabled = 1,
|
|
|
|
};
|
|
|
|
struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
|
|
|
|
|
|
|
|
if (!hv_reenlightenment_available()) {
|
|
|
|
pr_warn("Hyper-V: reenlightenment support is unavailable\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-11-05 02:22:38 +08:00
|
|
|
if (!hv_vp_index)
|
|
|
|
return;
|
|
|
|
|
2018-01-24 21:23:33 +08:00
|
|
|
hv_reenlightenment_cb = cb;
|
|
|
|
|
|
|
|
/* Make sure callback is registered before we write to MSRs */
|
|
|
|
wmb();
|
|
|
|
|
2021-10-12 23:50:05 +08:00
|
|
|
re_ctrl.target_vp = hv_vp_index[get_cpu()];
|
|
|
|
|
2018-01-24 21:23:33 +08:00
|
|
|
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
|
|
|
|
wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
|
2021-10-12 23:50:05 +08:00
|
|
|
|
|
|
|
put_cpu();
|
2018-01-24 21:23:33 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(set_hv_tscchange_cb);
|
|
|
|
|
|
|
|
void clear_hv_tscchange_cb(void)
|
|
|
|
{
|
|
|
|
struct hv_reenlightenment_control re_ctrl;
|
|
|
|
|
|
|
|
if (!hv_reenlightenment_available())
|
|
|
|
return;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
|
|
|
|
re_ctrl.enabled = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
|
|
|
|
|
|
|
|
hv_reenlightenment_cb = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb);
|
|
|
|
|
2018-01-24 21:23:34 +08:00
|
|
|
static int hv_cpu_die(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct hv_reenlightenment_control re_ctrl;
|
|
|
|
unsigned int new_cpu;
|
2021-10-25 20:21:06 +08:00
|
|
|
void **ghcb_va;
|
|
|
|
|
|
|
|
if (hv_ghcb_pg) {
|
|
|
|
ghcb_va = (void **)this_cpu_ptr(hv_ghcb_pg);
|
|
|
|
if (*ghcb_va)
|
|
|
|
memunmap(*ghcb_va);
|
|
|
|
*ghcb_va = NULL;
|
|
|
|
}
|
2018-05-17 05:53:31 +08:00
|
|
|
|
2021-07-15 02:34:45 +08:00
|
|
|
hv_common_cpu_die(cpu);
|
2018-01-24 21:23:34 +08:00
|
|
|
|
2021-07-31 20:05:19 +08:00
|
|
|
if (hv_vp_assist_page && hv_vp_assist_page[cpu]) {
|
|
|
|
union hv_vp_assist_msr_contents msr = { 0 };
|
|
|
|
if (hv_root_partition) {
|
|
|
|
/*
|
|
|
|
* For root partition the VP assist page is mapped to
|
|
|
|
* hypervisor provided page, and thus we unmap the
|
|
|
|
* page here and nullify it, so that in future we have
|
|
|
|
* correct page address mapped in hv_cpu_init.
|
|
|
|
*/
|
|
|
|
memunmap(hv_vp_assist_page[cpu]);
|
|
|
|
hv_vp_assist_page[cpu] = NULL;
|
|
|
|
rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
|
|
|
|
msr.enable = 0;
|
|
|
|
}
|
|
|
|
wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
|
|
|
|
}
|
2018-03-20 22:02:08 +08:00
|
|
|
|
2018-01-24 21:23:34 +08:00
|
|
|
if (hv_reenlightenment_cb == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
|
|
|
|
if (re_ctrl.target_vp == hv_vp_index[cpu]) {
|
2020-05-13 00:01:53 +08:00
|
|
|
/*
|
|
|
|
* Reassign reenlightenment notifications to some other online
|
|
|
|
* CPU or just disable the feature if there are no online CPUs
|
|
|
|
* left (happens on hibernation).
|
|
|
|
*/
|
2018-01-24 21:23:34 +08:00
|
|
|
new_cpu = cpumask_any_but(cpu_online_mask, cpu);
|
|
|
|
|
2020-05-13 00:01:53 +08:00
|
|
|
if (new_cpu < nr_cpu_ids)
|
|
|
|
re_ctrl.target_vp = hv_vp_index[new_cpu];
|
|
|
|
else
|
|
|
|
re_ctrl.enabled = 0;
|
|
|
|
|
2018-01-24 21:23:34 +08:00
|
|
|
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-19 06:29:50 +08:00
|
|
|
static int __init hv_pci_init(void)
|
|
|
|
{
|
|
|
|
int gen2vm = efi_enabled(EFI_BOOT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For Generation-2 VM, we exit from pci_arch_init() by returning 0.
|
|
|
|
* The purpose is to suppress the harmless warning:
|
|
|
|
* "PCI: Fatal: No config space access function found"
|
|
|
|
*/
|
|
|
|
if (gen2vm)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* For Generation-1 VM, we'll proceed in pci_arch_init(). */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-01-07 06:42:39 +08:00
|
|
|
static int hv_suspend(void)
|
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
x86/hyperv: Suspend/resume the VP assist page for hibernation
Unlike the other CPUs, CPU0 is never offlined during hibernation, so in the
resume path, the "new" kernel's VP assist page is not suspended (i.e. not
disabled), and later when we jump to the "old" kernel, the page is not
properly re-enabled for CPU0 with the allocated page from the old kernel.
So far, the VP assist page is used by hv_apic_eoi_write(), and is also
used in the case of nested virtualization (running KVM atop Hyper-V).
For hv_apic_eoi_write(), when the page is not properly re-enabled,
hvp->apic_assist is always 0, so the HV_X64_MSR_EOI MSR is always written.
This is not ideal with respect to performance, but Hyper-V can still
correctly handle this according to the Hyper-V spec; nevertheless, Linux
still must update the Hyper-V hypervisor with the correct VP assist page
to prevent Hyper-V from writing to the stale page, which causes guest
memory corruption and consequently may have caused the hangs and triple
faults seen during non-boot CPUs resume.
Fix the issue by calling hv_cpu_die()/hv_cpu_init() in the syscore ops.
Without the fix, hibernation can fail at a rate of 1/300 ~ 1/500.
With the fix, hibernation can pass a long-haul test of 2000 runs.
In the case of nested virtualization, disabling/reenabling the assist
page upon hibernation may be unsafe if there are active L2 guests.
It looks KVM should be enhanced to abort the hibernation request if
there is any active L2 guest.
Fixes: 05bd330a7fd8 ("x86/hyperv: Suspend/resume the hypercall page for hibernation")
Cc: stable@vger.kernel.org
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Link: https://lore.kernel.org/r/1587437171-2472-1-git-send-email-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2020-04-21 10:46:11 +08:00
|
|
|
int ret;
|
2020-01-07 06:42:39 +08:00
|
|
|
|
2021-02-03 23:04:26 +08:00
|
|
|
if (hv_root_partition)
|
|
|
|
return -EPERM;
|
|
|
|
|
2020-01-07 06:42:39 +08:00
|
|
|
/*
|
|
|
|
* Reset the hypercall page as it is going to be invalidated
|
2021-03-18 22:28:01 +08:00
|
|
|
* across hibernation. Setting hv_hypercall_pg to NULL ensures
|
2020-01-07 06:42:39 +08:00
|
|
|
* that any subsequent hypercall operation fails safely instead of
|
|
|
|
* crashing due to an access of an invalid page. The hypercall page
|
|
|
|
* pointer is restored on resume.
|
|
|
|
*/
|
|
|
|
hv_hypercall_pg_saved = hv_hypercall_pg;
|
|
|
|
hv_hypercall_pg = NULL;
|
|
|
|
|
|
|
|
/* Disable the hypercall page in the hypervisor */
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
hypercall_msr.enable = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
|
x86/hyperv: Suspend/resume the VP assist page for hibernation
Unlike the other CPUs, CPU0 is never offlined during hibernation, so in the
resume path, the "new" kernel's VP assist page is not suspended (i.e. not
disabled), and later when we jump to the "old" kernel, the page is not
properly re-enabled for CPU0 with the allocated page from the old kernel.
So far, the VP assist page is used by hv_apic_eoi_write(), and is also
used in the case of nested virtualization (running KVM atop Hyper-V).
For hv_apic_eoi_write(), when the page is not properly re-enabled,
hvp->apic_assist is always 0, so the HV_X64_MSR_EOI MSR is always written.
This is not ideal with respect to performance, but Hyper-V can still
correctly handle this according to the Hyper-V spec; nevertheless, Linux
still must update the Hyper-V hypervisor with the correct VP assist page
to prevent Hyper-V from writing to the stale page, which causes guest
memory corruption and consequently may have caused the hangs and triple
faults seen during non-boot CPUs resume.
Fix the issue by calling hv_cpu_die()/hv_cpu_init() in the syscore ops.
Without the fix, hibernation can fail at a rate of 1/300 ~ 1/500.
With the fix, hibernation can pass a long-haul test of 2000 runs.
In the case of nested virtualization, disabling/reenabling the assist
page upon hibernation may be unsafe if there are active L2 guests.
It looks KVM should be enhanced to abort the hibernation request if
there is any active L2 guest.
Fixes: 05bd330a7fd8 ("x86/hyperv: Suspend/resume the hypercall page for hibernation")
Cc: stable@vger.kernel.org
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Link: https://lore.kernel.org/r/1587437171-2472-1-git-send-email-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2020-04-21 10:46:11 +08:00
|
|
|
ret = hv_cpu_die(0);
|
|
|
|
return ret;
|
2020-01-07 06:42:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hv_resume(void)
|
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
x86/hyperv: Suspend/resume the VP assist page for hibernation
Unlike the other CPUs, CPU0 is never offlined during hibernation, so in the
resume path, the "new" kernel's VP assist page is not suspended (i.e. not
disabled), and later when we jump to the "old" kernel, the page is not
properly re-enabled for CPU0 with the allocated page from the old kernel.
So far, the VP assist page is used by hv_apic_eoi_write(), and is also
used in the case of nested virtualization (running KVM atop Hyper-V).
For hv_apic_eoi_write(), when the page is not properly re-enabled,
hvp->apic_assist is always 0, so the HV_X64_MSR_EOI MSR is always written.
This is not ideal with respect to performance, but Hyper-V can still
correctly handle this according to the Hyper-V spec; nevertheless, Linux
still must update the Hyper-V hypervisor with the correct VP assist page
to prevent Hyper-V from writing to the stale page, which causes guest
memory corruption and consequently may have caused the hangs and triple
faults seen during non-boot CPUs resume.
Fix the issue by calling hv_cpu_die()/hv_cpu_init() in the syscore ops.
Without the fix, hibernation can fail at a rate of 1/300 ~ 1/500.
With the fix, hibernation can pass a long-haul test of 2000 runs.
In the case of nested virtualization, disabling/reenabling the assist
page upon hibernation may be unsafe if there are active L2 guests.
It looks KVM should be enhanced to abort the hibernation request if
there is any active L2 guest.
Fixes: 05bd330a7fd8 ("x86/hyperv: Suspend/resume the hypercall page for hibernation")
Cc: stable@vger.kernel.org
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Link: https://lore.kernel.org/r/1587437171-2472-1-git-send-email-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2020-04-21 10:46:11 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = hv_cpu_init(0);
|
|
|
|
WARN_ON(ret);
|
2020-01-07 06:42:39 +08:00
|
|
|
|
|
|
|
/* Re-enable the hypercall page */
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
hypercall_msr.enable = 1;
|
|
|
|
hypercall_msr.guest_physical_address =
|
|
|
|
vmalloc_to_pfn(hv_hypercall_pg_saved);
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
|
|
|
|
hv_hypercall_pg = hv_hypercall_pg_saved;
|
|
|
|
hv_hypercall_pg_saved = NULL;
|
2020-05-13 00:01:53 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reenlightenment notifications are disabled by hv_cpu_die(0),
|
|
|
|
* reenable them here if hv_reenlightenment_cb was previously set.
|
|
|
|
*/
|
|
|
|
if (hv_reenlightenment_cb)
|
|
|
|
set_hv_tscchange_cb(hv_reenlightenment_cb);
|
2020-01-07 06:42:39 +08:00
|
|
|
}
|
|
|
|
|
x86/hyperv: Suspend/resume the VP assist page for hibernation
Unlike the other CPUs, CPU0 is never offlined during hibernation, so in the
resume path, the "new" kernel's VP assist page is not suspended (i.e. not
disabled), and later when we jump to the "old" kernel, the page is not
properly re-enabled for CPU0 with the allocated page from the old kernel.
So far, the VP assist page is used by hv_apic_eoi_write(), and is also
used in the case of nested virtualization (running KVM atop Hyper-V).
For hv_apic_eoi_write(), when the page is not properly re-enabled,
hvp->apic_assist is always 0, so the HV_X64_MSR_EOI MSR is always written.
This is not ideal with respect to performance, but Hyper-V can still
correctly handle this according to the Hyper-V spec; nevertheless, Linux
still must update the Hyper-V hypervisor with the correct VP assist page
to prevent Hyper-V from writing to the stale page, which causes guest
memory corruption and consequently may have caused the hangs and triple
faults seen during non-boot CPUs resume.
Fix the issue by calling hv_cpu_die()/hv_cpu_init() in the syscore ops.
Without the fix, hibernation can fail at a rate of 1/300 ~ 1/500.
With the fix, hibernation can pass a long-haul test of 2000 runs.
In the case of nested virtualization, disabling/reenabling the assist
page upon hibernation may be unsafe if there are active L2 guests.
It looks KVM should be enhanced to abort the hibernation request if
there is any active L2 guest.
Fixes: 05bd330a7fd8 ("x86/hyperv: Suspend/resume the hypercall page for hibernation")
Cc: stable@vger.kernel.org
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Link: https://lore.kernel.org/r/1587437171-2472-1-git-send-email-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2020-04-21 10:46:11 +08:00
|
|
|
/* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
|
2020-01-07 06:42:39 +08:00
|
|
|
static struct syscore_ops hv_syscore_ops = {
|
|
|
|
.suspend = hv_suspend,
|
|
|
|
.resume = hv_resume,
|
|
|
|
};
|
|
|
|
|
x86/hyperv: Initialize clockevents after LAPIC is initialized
With commit 4df4cb9e99f8, the Hyper-V direct-mode STIMER is actually
initialized before LAPIC is initialized: see
apic_intr_mode_init()
x86_platform.apic_post_init()
hyperv_init()
hv_stimer_alloc()
apic_bsp_setup()
setup_local_APIC()
setup_local_APIC() temporarily disables LAPIC, initializes it and
re-eanble it. The direct-mode STIMER depends on LAPIC, and when it's
registered, it can be programmed immediately and the timer can fire
very soon:
hv_stimer_init
clockevents_config_and_register
clockevents_register_device
tick_check_new_device
tick_setup_device
tick_setup_periodic(), tick_setup_oneshot()
clockevents_program_event
When the timer fires in the hypervisor, if the LAPIC is in the
disabled state, new versions of Hyper-V ignore the event and don't inject
the timer interrupt into the VM, and hence the VM hangs when it boots.
Note: when the VM starts/reboots, the LAPIC is pre-enabled by the
firmware, so the window of LAPIC being temporarily disabled is pretty
small, and the issue can only happen once out of 100~200 reboots for
a 40-vCPU VM on one dev host, and on another host the issue doesn't
reproduce after 2000 reboots.
The issue is more noticeable for kdump/kexec, because the LAPIC is
disabled by the first kernel, and stays disabled until the kdump/kexec
kernel enables it. This is especially an issue to a Generation-2 VM
(for which Hyper-V doesn't emulate the PIT timer) when CONFIG_HZ=1000
(rather than CONFIG_HZ=250) is used.
Fix the issue by moving hv_stimer_alloc() to a later place where the
LAPIC timer is initialized.
Fixes: 4df4cb9e99f8 ("x86/hyperv: Initialize clockevents earlier in CPU onlining")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20210116223136.13892-1-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2021-01-17 06:31:36 +08:00
|
|
|
static void (* __initdata old_setup_percpu_clockev)(void);
|
|
|
|
|
|
|
|
static void __init hv_stimer_setup_percpu_clockev(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Ignore any errors in setting up stimer clockevents
|
|
|
|
* as we can run with the LAPIC timer as a fallback.
|
|
|
|
*/
|
2021-03-03 05:38:22 +08:00
|
|
|
(void)hv_stimer_alloc(false);
|
x86/hyperv: Initialize clockevents after LAPIC is initialized
With commit 4df4cb9e99f8, the Hyper-V direct-mode STIMER is actually
initialized before LAPIC is initialized: see
apic_intr_mode_init()
x86_platform.apic_post_init()
hyperv_init()
hv_stimer_alloc()
apic_bsp_setup()
setup_local_APIC()
setup_local_APIC() temporarily disables LAPIC, initializes it and
re-eanble it. The direct-mode STIMER depends on LAPIC, and when it's
registered, it can be programmed immediately and the timer can fire
very soon:
hv_stimer_init
clockevents_config_and_register
clockevents_register_device
tick_check_new_device
tick_setup_device
tick_setup_periodic(), tick_setup_oneshot()
clockevents_program_event
When the timer fires in the hypervisor, if the LAPIC is in the
disabled state, new versions of Hyper-V ignore the event and don't inject
the timer interrupt into the VM, and hence the VM hangs when it boots.
Note: when the VM starts/reboots, the LAPIC is pre-enabled by the
firmware, so the window of LAPIC being temporarily disabled is pretty
small, and the issue can only happen once out of 100~200 reboots for
a 40-vCPU VM on one dev host, and on another host the issue doesn't
reproduce after 2000 reboots.
The issue is more noticeable for kdump/kexec, because the LAPIC is
disabled by the first kernel, and stays disabled until the kdump/kexec
kernel enables it. This is especially an issue to a Generation-2 VM
(for which Hyper-V doesn't emulate the PIT timer) when CONFIG_HZ=1000
(rather than CONFIG_HZ=250) is used.
Fix the issue by moving hv_stimer_alloc() to a later place where the
LAPIC timer is initialized.
Fixes: 4df4cb9e99f8 ("x86/hyperv: Initialize clockevents earlier in CPU onlining")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20210116223136.13892-1-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2021-01-17 06:31:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Still register the LAPIC timer, because the direct-mode STIMER is
|
|
|
|
* not supported by old versions of Hyper-V. This also allows users
|
|
|
|
* to switch to LAPIC timer via /sys, if they want to.
|
|
|
|
*/
|
|
|
|
if (old_setup_percpu_clockev)
|
|
|
|
old_setup_percpu_clockev();
|
|
|
|
}
|
|
|
|
|
2021-02-03 23:04:25 +08:00
|
|
|
static void __init hv_get_partition_id(void)
|
|
|
|
{
|
|
|
|
struct hv_get_partition_id *output_page;
|
|
|
|
u64 status;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
|
|
|
|
status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output_page);
|
2021-04-17 08:43:03 +08:00
|
|
|
if (!hv_result_success(status)) {
|
2021-02-03 23:04:25 +08:00
|
|
|
/* No point in proceeding if this failed */
|
|
|
|
pr_err("Failed to get partition ID: %lld\n", status);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
hv_current_partition_id = output_page->partition_id;
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
2017-01-19 07:45:02 +08:00
|
|
|
/*
|
|
|
|
* This function is to be invoked early in the boot sequence after the
|
|
|
|
* hypervisor has been detected.
|
|
|
|
*
|
|
|
|
* 1. Setup the hypercall page.
|
2017-01-20 02:51:46 +08:00
|
|
|
* 2. Register Hyper-V specific clocksource.
|
2018-05-17 05:53:30 +08:00
|
|
|
* 3. Setup Hyper-V specific APIC entry points.
|
2017-01-19 07:45:02 +08:00
|
|
|
*/
|
2018-05-17 05:53:30 +08:00
|
|
|
void __init hyperv_init(void)
|
2017-01-19 07:45:02 +08:00
|
|
|
{
|
2021-11-05 02:22:39 +08:00
|
|
|
u64 guest_id;
|
2017-01-19 07:45:02 +08:00
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
2021-07-15 02:34:45 +08:00
|
|
|
int cpuhp;
|
2017-01-19 07:45:02 +08:00
|
|
|
|
2017-11-09 21:27:36 +08:00
|
|
|
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
|
2017-01-19 07:45:02 +08:00
|
|
|
return;
|
|
|
|
|
2021-07-15 02:34:45 +08:00
|
|
|
if (hv_common_init())
|
2017-08-03 00:09:18 +08:00
|
|
|
return;
|
|
|
|
|
2018-03-20 22:02:08 +08:00
|
|
|
hv_vp_assist_page = kcalloc(num_possible_cpus(),
|
|
|
|
sizeof(*hv_vp_assist_page), GFP_KERNEL);
|
|
|
|
if (!hv_vp_assist_page) {
|
|
|
|
ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
|
2021-07-15 02:34:45 +08:00
|
|
|
goto common_free;
|
2018-03-20 22:02:08 +08:00
|
|
|
}
|
|
|
|
|
2021-10-25 20:21:06 +08:00
|
|
|
if (hv_isolation_type_snp()) {
|
2021-10-25 20:21:11 +08:00
|
|
|
hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
|
2021-10-25 20:21:06 +08:00
|
|
|
if (!hv_ghcb_pg)
|
|
|
|
goto free_vp_assist_page;
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:02:08 +08:00
|
|
|
cpuhp = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online",
|
|
|
|
hv_cpu_init, hv_cpu_die);
|
|
|
|
if (cpuhp < 0)
|
2021-10-25 20:21:06 +08:00
|
|
|
goto free_ghcb_page;
|
2017-08-03 00:09:18 +08:00
|
|
|
|
2017-01-19 07:45:02 +08:00
|
|
|
/*
|
|
|
|
* Setup the hypercall page and enable hypercalls.
|
|
|
|
* 1. Register the guest ID
|
|
|
|
* 2. Enable the hypercall and register the hypercall page
|
|
|
|
*/
|
|
|
|
guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
|
|
|
|
wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
|
|
|
|
2021-10-25 20:21:11 +08:00
|
|
|
/* Hyper-V requires to write guest os id via ghcb in SNP IVM. */
|
|
|
|
hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
|
|
|
|
2020-06-26 11:30:40 +08:00
|
|
|
hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
|
|
|
|
VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
|
2020-07-04 06:15:27 +08:00
|
|
|
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
|
|
|
|
__builtin_return_address(0));
|
2021-10-25 20:21:06 +08:00
|
|
|
if (hv_hypercall_pg == NULL)
|
|
|
|
goto clean_guest_os_id;
|
2017-01-19 07:45:02 +08:00
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
hypercall_msr.enable = 1;
|
2021-02-03 23:04:26 +08:00
|
|
|
|
|
|
|
if (hv_root_partition) {
|
|
|
|
struct page *pg;
|
|
|
|
void *src, *dst;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the root partition, the hypervisor will set up its
|
|
|
|
* hypercall page. The hypervisor guarantees it will not show
|
|
|
|
* up in the root's address space. The root can't change the
|
|
|
|
* location of the hypercall page.
|
|
|
|
*
|
|
|
|
* Order is important here. We must enable the hypercall page
|
|
|
|
* so it is populated with code, then copy the code to an
|
|
|
|
* executable page.
|
|
|
|
*/
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
|
|
|
|
pg = vmalloc_to_page(hv_hypercall_pg);
|
|
|
|
dst = kmap(pg);
|
|
|
|
src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE,
|
|
|
|
MEMREMAP_WB);
|
|
|
|
BUG_ON(!(src && dst));
|
|
|
|
memcpy(dst, src, HV_HYP_PAGE_SIZE);
|
|
|
|
memunmap(src);
|
|
|
|
kunmap(pg);
|
|
|
|
} else {
|
|
|
|
hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
}
|
2017-01-20 02:51:46 +08:00
|
|
|
|
x86/hyperv: Initialize clockevents earlier in CPU onlining
Hyper-V has historically initialized stimer-based clockevents late in the
process of onlining a CPU because clockevents depend on stimer
interrupts. In the original Hyper-V design, stimer interrupts generate a
VMbus message, so the VMbus machinery must be running first, and VMbus
can't be initialized until relatively late. On x86/64, LAPIC timer based
clockevents are used during early initialization before VMbus and
stimer-based clockevents are ready, and again during CPU offlining after
the stimer clockevents have been shut down.
Unfortunately, this design creates problems when offlining CPUs for
hibernation or other purposes. stimer-based clockevents are shut down
relatively early in the offlining process, so clockevents_unbind_device()
must be used to fallback to the LAPIC-based clockevents for the remainder
of the offlining process. Furthermore, the late initialization and early
shutdown of stimer-based clockevents doesn't work well on ARM64 since there
is no other timer like the LAPIC to fallback to. So CPU onlining and
offlining doesn't work properly.
Fix this by recognizing that stimer Direct Mode is the normal path for
newer versions of Hyper-V on x86/64, and the only path on other
architectures. With stimer Direct Mode, stimer interrupts don't require any
VMbus machinery. stimer clockevents can be initialized and shut down
consistent with how it is done for other clockevent devices. While the old
VMbus-based stimer interrupts must still be supported for backward
compatibility on x86, that mode of operation can be treated as legacy.
So add a new Hyper-V stimer entry in the CPU hotplug state list, and use
that new state when in Direct Mode. Update the Hyper-V clocksource driver
to allocate and initialize stimer clockevents earlier during boot. Update
Hyper-V initialization and the VMbus driver to use this new design. As a
result, the LAPIC timer is no longer used during boot or CPU
onlining/offlining and clockevents_unbind_device() is not called. But
retain the old design as a legacy implementation for older versions of
Hyper-V that don't support Direct Mode.
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Link: https://lkml.kernel.org/r/1573607467-9456-1-git-send-email-mikelley@microsoft.com
2019-11-13 09:11:49 +08:00
|
|
|
/*
|
x86/hyperv: Initialize clockevents after LAPIC is initialized
With commit 4df4cb9e99f8, the Hyper-V direct-mode STIMER is actually
initialized before LAPIC is initialized: see
apic_intr_mode_init()
x86_platform.apic_post_init()
hyperv_init()
hv_stimer_alloc()
apic_bsp_setup()
setup_local_APIC()
setup_local_APIC() temporarily disables LAPIC, initializes it and
re-eanble it. The direct-mode STIMER depends on LAPIC, and when it's
registered, it can be programmed immediately and the timer can fire
very soon:
hv_stimer_init
clockevents_config_and_register
clockevents_register_device
tick_check_new_device
tick_setup_device
tick_setup_periodic(), tick_setup_oneshot()
clockevents_program_event
When the timer fires in the hypervisor, if the LAPIC is in the
disabled state, new versions of Hyper-V ignore the event and don't inject
the timer interrupt into the VM, and hence the VM hangs when it boots.
Note: when the VM starts/reboots, the LAPIC is pre-enabled by the
firmware, so the window of LAPIC being temporarily disabled is pretty
small, and the issue can only happen once out of 100~200 reboots for
a 40-vCPU VM on one dev host, and on another host the issue doesn't
reproduce after 2000 reboots.
The issue is more noticeable for kdump/kexec, because the LAPIC is
disabled by the first kernel, and stays disabled until the kdump/kexec
kernel enables it. This is especially an issue to a Generation-2 VM
(for which Hyper-V doesn't emulate the PIT timer) when CONFIG_HZ=1000
(rather than CONFIG_HZ=250) is used.
Fix the issue by moving hv_stimer_alloc() to a later place where the
LAPIC timer is initialized.
Fixes: 4df4cb9e99f8 ("x86/hyperv: Initialize clockevents earlier in CPU onlining")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20210116223136.13892-1-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2021-01-17 06:31:36 +08:00
|
|
|
* hyperv_init() is called before LAPIC is initialized: see
|
|
|
|
* apic_intr_mode_init() -> x86_platform.apic_post_init() and
|
|
|
|
* apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
|
|
|
|
* depends on LAPIC, so hv_stimer_alloc() should be called from
|
|
|
|
* x86_init.timers.setup_percpu_clockev.
|
x86/hyperv: Initialize clockevents earlier in CPU onlining
Hyper-V has historically initialized stimer-based clockevents late in the
process of onlining a CPU because clockevents depend on stimer
interrupts. In the original Hyper-V design, stimer interrupts generate a
VMbus message, so the VMbus machinery must be running first, and VMbus
can't be initialized until relatively late. On x86/64, LAPIC timer based
clockevents are used during early initialization before VMbus and
stimer-based clockevents are ready, and again during CPU offlining after
the stimer clockevents have been shut down.
Unfortunately, this design creates problems when offlining CPUs for
hibernation or other purposes. stimer-based clockevents are shut down
relatively early in the offlining process, so clockevents_unbind_device()
must be used to fallback to the LAPIC-based clockevents for the remainder
of the offlining process. Furthermore, the late initialization and early
shutdown of stimer-based clockevents doesn't work well on ARM64 since there
is no other timer like the LAPIC to fallback to. So CPU onlining and
offlining doesn't work properly.
Fix this by recognizing that stimer Direct Mode is the normal path for
newer versions of Hyper-V on x86/64, and the only path on other
architectures. With stimer Direct Mode, stimer interrupts don't require any
VMbus machinery. stimer clockevents can be initialized and shut down
consistent with how it is done for other clockevent devices. While the old
VMbus-based stimer interrupts must still be supported for backward
compatibility on x86, that mode of operation can be treated as legacy.
So add a new Hyper-V stimer entry in the CPU hotplug state list, and use
that new state when in Direct Mode. Update the Hyper-V clocksource driver
to allocate and initialize stimer clockevents earlier during boot. Update
Hyper-V initialization and the VMbus driver to use this new design. As a
result, the LAPIC timer is no longer used during boot or CPU
onlining/offlining and clockevents_unbind_device() is not called. But
retain the old design as a legacy implementation for older versions of
Hyper-V that don't support Direct Mode.
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Link: https://lkml.kernel.org/r/1573607467-9456-1-git-send-email-mikelley@microsoft.com
2019-11-13 09:11:49 +08:00
|
|
|
*/
|
x86/hyperv: Initialize clockevents after LAPIC is initialized
With commit 4df4cb9e99f8, the Hyper-V direct-mode STIMER is actually
initialized before LAPIC is initialized: see
apic_intr_mode_init()
x86_platform.apic_post_init()
hyperv_init()
hv_stimer_alloc()
apic_bsp_setup()
setup_local_APIC()
setup_local_APIC() temporarily disables LAPIC, initializes it and
re-eanble it. The direct-mode STIMER depends on LAPIC, and when it's
registered, it can be programmed immediately and the timer can fire
very soon:
hv_stimer_init
clockevents_config_and_register
clockevents_register_device
tick_check_new_device
tick_setup_device
tick_setup_periodic(), tick_setup_oneshot()
clockevents_program_event
When the timer fires in the hypervisor, if the LAPIC is in the
disabled state, new versions of Hyper-V ignore the event and don't inject
the timer interrupt into the VM, and hence the VM hangs when it boots.
Note: when the VM starts/reboots, the LAPIC is pre-enabled by the
firmware, so the window of LAPIC being temporarily disabled is pretty
small, and the issue can only happen once out of 100~200 reboots for
a 40-vCPU VM on one dev host, and on another host the issue doesn't
reproduce after 2000 reboots.
The issue is more noticeable for kdump/kexec, because the LAPIC is
disabled by the first kernel, and stays disabled until the kdump/kexec
kernel enables it. This is especially an issue to a Generation-2 VM
(for which Hyper-V doesn't emulate the PIT timer) when CONFIG_HZ=1000
(rather than CONFIG_HZ=250) is used.
Fix the issue by moving hv_stimer_alloc() to a later place where the
LAPIC timer is initialized.
Fixes: 4df4cb9e99f8 ("x86/hyperv: Initialize clockevents earlier in CPU onlining")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20210116223136.13892-1-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2021-01-17 06:31:36 +08:00
|
|
|
old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
|
|
|
|
x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
|
x86/hyperv: Initialize clockevents earlier in CPU onlining
Hyper-V has historically initialized stimer-based clockevents late in the
process of onlining a CPU because clockevents depend on stimer
interrupts. In the original Hyper-V design, stimer interrupts generate a
VMbus message, so the VMbus machinery must be running first, and VMbus
can't be initialized until relatively late. On x86/64, LAPIC timer based
clockevents are used during early initialization before VMbus and
stimer-based clockevents are ready, and again during CPU offlining after
the stimer clockevents have been shut down.
Unfortunately, this design creates problems when offlining CPUs for
hibernation or other purposes. stimer-based clockevents are shut down
relatively early in the offlining process, so clockevents_unbind_device()
must be used to fallback to the LAPIC-based clockevents for the remainder
of the offlining process. Furthermore, the late initialization and early
shutdown of stimer-based clockevents doesn't work well on ARM64 since there
is no other timer like the LAPIC to fallback to. So CPU onlining and
offlining doesn't work properly.
Fix this by recognizing that stimer Direct Mode is the normal path for
newer versions of Hyper-V on x86/64, and the only path on other
architectures. With stimer Direct Mode, stimer interrupts don't require any
VMbus machinery. stimer clockevents can be initialized and shut down
consistent with how it is done for other clockevent devices. While the old
VMbus-based stimer interrupts must still be supported for backward
compatibility on x86, that mode of operation can be treated as legacy.
So add a new Hyper-V stimer entry in the CPU hotplug state list, and use
that new state when in Direct Mode. Update the Hyper-V clocksource driver
to allocate and initialize stimer clockevents earlier during boot. Update
Hyper-V initialization and the VMbus driver to use this new design. As a
result, the LAPIC timer is no longer used during boot or CPU
onlining/offlining and clockevents_unbind_device() is not called. But
retain the old design as a legacy implementation for older versions of
Hyper-V that don't support Direct Mode.
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Link: https://lkml.kernel.org/r/1573607467-9456-1-git-send-email-mikelley@microsoft.com
2019-11-13 09:11:49 +08:00
|
|
|
|
2018-05-17 05:53:30 +08:00
|
|
|
hv_apic_init();
|
|
|
|
|
2018-09-19 06:29:50 +08:00
|
|
|
x86_init.pci.arch_init = hv_pci_init;
|
|
|
|
|
2020-01-07 06:42:39 +08:00
|
|
|
register_syscore_ops(&hv_syscore_ops);
|
|
|
|
|
2020-12-22 14:55:41 +08:00
|
|
|
hyperv_init_cpuhp = cpuhp;
|
2021-02-03 23:04:25 +08:00
|
|
|
|
|
|
|
if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
|
|
|
|
hv_get_partition_id();
|
|
|
|
|
|
|
|
BUG_ON(hv_root_partition && hv_current_partition_id == ~0ull);
|
|
|
|
|
2021-02-03 23:04:34 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
/*
|
|
|
|
* If we're running as root, we want to create our own PCI MSI domain.
|
|
|
|
* We can't set this in hv_pci_init because that would be too late.
|
|
|
|
*/
|
|
|
|
if (hv_root_partition)
|
|
|
|
x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain;
|
|
|
|
#endif
|
|
|
|
|
2021-03-24 02:47:16 +08:00
|
|
|
/* Query the VMs extended capability once, so that it can be cached. */
|
|
|
|
hv_query_ext_cap(0);
|
2021-12-13 15:14:04 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_SWIOTLB
|
|
|
|
/*
|
|
|
|
* Swiotlb bounce buffer needs to be mapped in extra address
|
|
|
|
* space. Map function doesn't work in the early place and so
|
|
|
|
* call swiotlb_update_mem_attributes() here.
|
|
|
|
*/
|
|
|
|
if (hv_is_isolation_supported())
|
|
|
|
swiotlb_update_mem_attributes();
|
|
|
|
#endif
|
|
|
|
|
2017-08-03 00:09:18 +08:00
|
|
|
return;
|
|
|
|
|
2021-10-25 20:21:06 +08:00
|
|
|
clean_guest_os_id:
|
|
|
|
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
2021-10-25 20:21:11 +08:00
|
|
|
hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
|
2018-03-20 22:02:08 +08:00
|
|
|
cpuhp_remove_state(cpuhp);
|
2021-10-25 20:21:06 +08:00
|
|
|
free_ghcb_page:
|
|
|
|
free_percpu(hv_ghcb_pg);
|
2018-03-20 22:02:08 +08:00
|
|
|
free_vp_assist_page:
|
|
|
|
kfree(hv_vp_assist_page);
|
|
|
|
hv_vp_assist_page = NULL;
|
2021-07-15 02:34:45 +08:00
|
|
|
common_free:
|
|
|
|
hv_common_free();
|
2017-01-19 07:45:02 +08:00
|
|
|
}
|
2017-01-19 07:45:03 +08:00
|
|
|
|
2017-01-29 03:37:14 +08:00
|
|
|
/*
|
|
|
|
* This routine is called before kexec/kdump, it does the required cleanup.
|
|
|
|
*/
|
|
|
|
void hyperv_cleanup(void)
|
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
|
|
|
|
2020-01-07 06:42:39 +08:00
|
|
|
unregister_syscore_ops(&hv_syscore_ops);
|
|
|
|
|
2017-01-29 03:37:14 +08:00
|
|
|
/* Reset our OS id */
|
|
|
|
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
2021-10-25 20:21:11 +08:00
|
|
|
hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
|
2017-01-29 03:37:14 +08:00
|
|
|
|
2019-03-06 19:18:27 +08:00
|
|
|
/*
|
|
|
|
* Reset hypercall page reference before reset the page,
|
|
|
|
* let hypercall operations fail safely rather than
|
|
|
|
* panic the kernel for using invalid hypercall page
|
|
|
|
*/
|
|
|
|
hv_hypercall_pg = NULL;
|
|
|
|
|
2017-01-29 03:37:14 +08:00
|
|
|
/* Reset the hypercall page */
|
|
|
|
hypercall_msr.as_uint64 = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
2017-01-29 03:37:15 +08:00
|
|
|
|
|
|
|
/* Reset the TSC page */
|
|
|
|
hypercall_msr.as_uint64 = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
|
2017-01-29 03:37:14 +08:00
|
|
|
}
|
|
|
|
|
2020-04-06 23:53:31 +08:00
|
|
|
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
|
2017-01-20 02:51:48 +08:00
|
|
|
{
|
|
|
|
static bool panic_reported;
|
2017-10-30 02:33:41 +08:00
|
|
|
u64 guest_id;
|
2017-01-20 02:51:48 +08:00
|
|
|
|
2020-04-06 23:53:31 +08:00
|
|
|
if (in_die && !panic_on_oops)
|
|
|
|
return;
|
|
|
|
|
2017-01-20 02:51:48 +08:00
|
|
|
/*
|
|
|
|
* We prefer to report panic on 'die' chain as we have proper
|
|
|
|
* registers to report, but if we miss it (e.g. on BUG()) we need
|
|
|
|
* to report it on 'panic'.
|
|
|
|
*/
|
|
|
|
if (panic_reported)
|
|
|
|
return;
|
|
|
|
panic_reported = true;
|
|
|
|
|
2017-10-30 02:33:41 +08:00
|
|
|
rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
|
|
|
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P0, err);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp);
|
2017-01-20 02:51:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Let Hyper-V know there is crash data available
|
|
|
|
*/
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hyperv_report_panic);
|
2017-01-20 02:51:49 +08:00
|
|
|
|
2017-12-23 02:19:02 +08:00
|
|
|
bool hv_is_hyperv_initialized(void)
|
2017-01-20 02:51:49 +08:00
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
|
|
|
|
2017-12-23 02:19:02 +08:00
|
|
|
/*
|
|
|
|
* Ensure that we're really on Hyper-V, and not a KVM or Xen
|
|
|
|
* emulation of Hyper-V
|
|
|
|
*/
|
|
|
|
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verify that earlier initialization succeeded by checking
|
|
|
|
* that the hypercall page is setup
|
|
|
|
*/
|
2017-01-20 02:51:49 +08:00
|
|
|
hypercall_msr.as_uint64 = 0;
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
|
2017-12-23 02:19:02 +08:00
|
|
|
return hypercall_msr.enable;
|
2017-01-20 02:51:49 +08:00
|
|
|
}
|
2017-12-23 02:19:02 +08:00
|
|
|
EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
|