x86/bugs, KVM: Support the combination of guest and host IBRS
A guest may modify the SPEC_CTRL MSR from the value used by the kernel. Since the kernel doesn't use IBRS, this means a value of zero is what is needed in the host. But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to the other bits as reserved so the kernel should respect the boot time SPEC_CTRL value and use that. This allows to deal with future extensions to the SPEC_CTRL interface if any at all. Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any difference as paravirt will over-write the callq *0xfff.. with the wrmsrl assembler code. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Reviewed-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
1b86883ccb
commit
5cf6875487
|
@ -228,6 +228,16 @@ enum spectre_v2_mitigation {
|
||||||
extern void x86_spec_ctrl_set(u64);
|
extern void x86_spec_ctrl_set(u64);
|
||||||
extern u64 x86_spec_ctrl_get_default(void);
|
extern u64 x86_spec_ctrl_get_default(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
|
||||||
|
* the guest has, while on VMEXIT we restore the host view. This
|
||||||
|
* would be easier if SPEC_CTRL were architecturally maskable or
|
||||||
|
* shadowable for guests but this is not (currently) the case.
|
||||||
|
* Takes the guest view of SPEC_CTRL MSR as a parameter.
|
||||||
|
*/
|
||||||
|
extern void x86_spec_ctrl_set_guest(u64);
|
||||||
|
extern void x86_spec_ctrl_restore_host(u64);
|
||||||
|
|
||||||
extern char __indirect_thunk_start[];
|
extern char __indirect_thunk_start[];
|
||||||
extern char __indirect_thunk_end[];
|
extern char __indirect_thunk_end[];
|
||||||
|
|
||||||
|
|
|
@ -123,6 +123,24 @@ u64 x86_spec_ctrl_get_default(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
|
||||||
|
|
||||||
|
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
|
||||||
|
{
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_IBRS))
|
||||||
|
return;
|
||||||
|
if (x86_spec_ctrl_base != guest_spec_ctrl)
|
||||||
|
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
|
||||||
|
|
||||||
|
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
|
||||||
|
{
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_IBRS))
|
||||||
|
return;
|
||||||
|
if (x86_spec_ctrl_base != guest_spec_ctrl)
|
||||||
|
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
|
||||||
|
|
||||||
#ifdef RETPOLINE
|
#ifdef RETPOLINE
|
||||||
static bool spectre_v2_bad_module;
|
static bool spectre_v2_bad_module;
|
||||||
|
|
||||||
|
|
|
@ -5557,8 +5557,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
* is no need to worry about the conditional branch over the wrmsr
|
* is no need to worry about the conditional branch over the wrmsr
|
||||||
* being speculatively taken.
|
* being speculatively taken.
|
||||||
*/
|
*/
|
||||||
if (svm->spec_ctrl)
|
x86_spec_ctrl_set_guest(svm->spec_ctrl);
|
||||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
|
|
||||||
|
|
||||||
asm volatile (
|
asm volatile (
|
||||||
"push %%" _ASM_BP "; \n\t"
|
"push %%" _ASM_BP "; \n\t"
|
||||||
|
@ -5670,8 +5669,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
||||||
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
||||||
|
|
||||||
if (svm->spec_ctrl)
|
x86_spec_ctrl_restore_host(svm->spec_ctrl);
|
||||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
||||||
|
|
||||||
/* Eliminate branch target predictions from guest mode */
|
/* Eliminate branch target predictions from guest mode */
|
||||||
vmexit_fill_RSB();
|
vmexit_fill_RSB();
|
||||||
|
|
|
@ -9720,8 +9720,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
* is no need to worry about the conditional branch over the wrmsr
|
* is no need to worry about the conditional branch over the wrmsr
|
||||||
* being speculatively taken.
|
* being speculatively taken.
|
||||||
*/
|
*/
|
||||||
if (vmx->spec_ctrl)
|
x86_spec_ctrl_set_guest(vmx->spec_ctrl);
|
||||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
|
|
||||||
|
|
||||||
vmx->__launched = vmx->loaded_vmcs->launched;
|
vmx->__launched = vmx->loaded_vmcs->launched;
|
||||||
|
|
||||||
|
@ -9869,8 +9868,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
||||||
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
||||||
|
|
||||||
if (vmx->spec_ctrl)
|
x86_spec_ctrl_restore_host(vmx->spec_ctrl);
|
||||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
||||||
|
|
||||||
/* Eliminate branch target predictions from guest mode */
|
/* Eliminate branch target predictions from guest mode */
|
||||||
vmexit_fill_RSB();
|
vmexit_fill_RSB();
|
||||||
|
|
Loading…
Reference in New Issue