KVM: x86: Re-split x2APIC ICR into ICR+ICR2 for AMD (x2AVIC)
commit 73b42dc69be8564d4951a14d00f827929fe5ef79 upstream.
Re-introduce the "split" x2APIC ICR storage that KVM used prior to Intel's
IPI virtualization support, but only for AMD. While not stated anywhere
in the APM, despite stating the ICR is a single 64-bit register, AMD CPUs
store the 64-bit ICR as two separate 32-bit values in ICR and ICR2. When
IPI virtualization (IPIv on Intel, all AVIC flavors on AMD) is enabled,
KVM needs to match CPU behavior as some ICR ICR writes will be handled by
the CPU, not by KVM.
Add a kvm_x86_ops knob to control the underlying format used by the CPU to
store the x2APIC ICR, and tune it to AMD vs. Intel regardless of whether
or not x2AVIC is enabled. If KVM is handling all ICR writes, the storage
format for x2APIC mode doesn't matter, and having the behavior follow AMD
versus Intel will provide better test coverage and ease debugging.
Fixes: 4d1d7942e3
("KVM: SVM: Introduce logic to (de)activate x2AVIC mode")
Cc: stable@vger.kernel.org
Cc: Maxim Levitsky <mlevitsk@redhat.com>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20240719235107.3023592-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Like Xu <likexu@tencent.com>
This commit is contained in:
parent
cb437b579a
commit
0b3734e06f
|
@ -1666,7 +1666,7 @@ struct kvm_x86_ops {
|
|||
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
|
||||
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
|
||||
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
|
||||
bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit reason);
|
||||
const bool x2apic_icr_is_split;
|
||||
const unsigned long required_apicv_inhibits;
|
||||
bool allow_apicv_in_x2apic_without_x2apic_virtualization;
|
||||
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -2459,11 +2459,25 @@ int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
|
|||
data &= ~APIC_ICR_BUSY;
|
||||
|
||||
kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
|
||||
if (kvm_x86_ops.x2apic_icr_is_split) {
|
||||
kvm_lapic_set_reg(apic, APIC_ICR, data);
|
||||
kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32);
|
||||
} else {
|
||||
kvm_lapic_set_reg64(apic, APIC_ICR, data);
|
||||
}
|
||||
trace_kvm_apic_write(APIC_ICR, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic)
|
||||
{
|
||||
if (kvm_x86_ops.x2apic_icr_is_split)
|
||||
return (u64)kvm_lapic_get_reg(apic, APIC_ICR) |
|
||||
(u64)kvm_lapic_get_reg(apic, APIC_ICR2) << 32;
|
||||
|
||||
return kvm_lapic_get_reg64(apic, APIC_ICR);
|
||||
}
|
||||
|
||||
/* emulate APIC access in a trap manner */
|
||||
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
|
||||
{
|
||||
|
@ -2481,7 +2495,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
|
|||
* maybe-unecessary write, and both are in the noise anyways.
|
||||
*/
|
||||
if (apic_x2apic_mode(apic) && offset == APIC_ICR)
|
||||
WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR)));
|
||||
WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_x2apic_icr_read(apic)));
|
||||
else
|
||||
kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
|
||||
}
|
||||
|
@ -2980,12 +2994,15 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
|
|||
|
||||
/*
|
||||
* In x2APIC mode, the LDR is fixed and based on the id. And
|
||||
* ICR is internally a single 64-bit register, but needs to be
|
||||
* split to ICR+ICR2 in userspace for backwards compatibility.
|
||||
* if the ICR is _not_ split, ICR is internally a single 64-bit
|
||||
* register, but needs to be split to ICR+ICR2 in userspace for
|
||||
* backwards compatibility.
|
||||
*/
|
||||
if (set) {
|
||||
if (set)
|
||||
*ldr = kvm_apic_calc_x2apic_ldr(x2apic_id);
|
||||
|
||||
if (!kvm_x86_ops.x2apic_icr_is_split) {
|
||||
if (set) {
|
||||
icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
|
||||
(u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
|
||||
__kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
|
||||
|
@ -2994,6 +3011,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
|
|||
__kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3188,7 +3206,7 @@ static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
|
|||
u32 low;
|
||||
|
||||
if (reg == APIC_ICR) {
|
||||
*data = kvm_lapic_get_reg64(apic, APIC_ICR);
|
||||
*data = kvm_x2apic_icr_read(apic);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -5065,6 +5065,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||
.enable_nmi_window = svm_enable_nmi_window,
|
||||
.enable_irq_window = svm_enable_irq_window,
|
||||
.update_cr8_intercept = svm_update_cr8_intercept,
|
||||
|
||||
.x2apic_icr_is_split = true,
|
||||
.set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
|
||||
.refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
|
||||
.apicv_post_state_restore = avic_apicv_post_state_restore,
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/moduleparam.h>
|
||||
|
||||
#include "x86_ops.h"
|
||||
#include "vmx.h"
|
||||
#include "nested.h"
|
||||
#include "pmu.h"
|
||||
#include "posted_intr.h"
|
||||
|
||||
#define VMX_REQUIRED_APICV_INHIBITS \
|
||||
(BIT(APICV_INHIBIT_REASON_DISABLED) | \
|
||||
BIT(APICV_INHIBIT_REASON_ABSENT) | \
|
||||
BIT(APICV_INHIBIT_REASON_HYPERV) | \
|
||||
BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
|
||||
BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
|
||||
BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
|
||||
BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED))
|
||||
|
||||
struct kvm_x86_ops vt_x86_ops __initdata = {
|
||||
.name = KBUILD_MODNAME,
|
||||
|
||||
.check_processor_compatibility = vmx_check_processor_compat,
|
||||
|
||||
.hardware_unsetup = vmx_hardware_unsetup,
|
||||
|
||||
.hardware_enable = vmx_hardware_enable,
|
||||
.hardware_disable = vmx_hardware_disable,
|
||||
.has_emulated_msr = vmx_has_emulated_msr,
|
||||
|
||||
.vm_size = sizeof(struct kvm_vmx),
|
||||
.vm_init = vmx_vm_init,
|
||||
.vm_destroy = vmx_vm_destroy,
|
||||
|
||||
.vcpu_precreate = vmx_vcpu_precreate,
|
||||
.vcpu_create = vmx_vcpu_create,
|
||||
.vcpu_free = vmx_vcpu_free,
|
||||
.vcpu_reset = vmx_vcpu_reset,
|
||||
|
||||
.prepare_switch_to_guest = vmx_prepare_switch_to_guest,
|
||||
.vcpu_load = vmx_vcpu_load,
|
||||
.vcpu_put = vmx_vcpu_put,
|
||||
|
||||
.update_exception_bitmap = vmx_update_exception_bitmap,
|
||||
.get_feature_msr = vmx_get_feature_msr,
|
||||
.get_msr = vmx_get_msr,
|
||||
.set_msr = vmx_set_msr,
|
||||
.get_segment_base = vmx_get_segment_base,
|
||||
.get_segment = vmx_get_segment,
|
||||
.set_segment = vmx_set_segment,
|
||||
.get_cpl = vmx_get_cpl,
|
||||
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
|
||||
.is_valid_cr0 = vmx_is_valid_cr0,
|
||||
.set_cr0 = vmx_set_cr0,
|
||||
.is_valid_cr4 = vmx_is_valid_cr4,
|
||||
.set_cr4 = vmx_set_cr4,
|
||||
.set_efer = vmx_set_efer,
|
||||
.get_idt = vmx_get_idt,
|
||||
.set_idt = vmx_set_idt,
|
||||
.get_gdt = vmx_get_gdt,
|
||||
.set_gdt = vmx_set_gdt,
|
||||
.set_dr7 = vmx_set_dr7,
|
||||
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
|
||||
.cache_reg = vmx_cache_reg,
|
||||
.get_rflags = vmx_get_rflags,
|
||||
.set_rflags = vmx_set_rflags,
|
||||
.get_if_flag = vmx_get_if_flag,
|
||||
|
||||
.flush_tlb_all = vmx_flush_tlb_all,
|
||||
.flush_tlb_current = vmx_flush_tlb_current,
|
||||
.flush_tlb_gva = vmx_flush_tlb_gva,
|
||||
.flush_tlb_guest = vmx_flush_tlb_guest,
|
||||
|
||||
.vcpu_pre_run = vmx_vcpu_pre_run,
|
||||
.vcpu_run = vmx_vcpu_run,
|
||||
.handle_exit = vmx_handle_exit,
|
||||
.skip_emulated_instruction = vmx_skip_emulated_instruction,
|
||||
.update_emulated_instruction = vmx_update_emulated_instruction,
|
||||
.set_interrupt_shadow = vmx_set_interrupt_shadow,
|
||||
.get_interrupt_shadow = vmx_get_interrupt_shadow,
|
||||
.patch_hypercall = vmx_patch_hypercall,
|
||||
.inject_irq = vmx_inject_irq,
|
||||
.inject_nmi = vmx_inject_nmi,
|
||||
.inject_exception = vmx_inject_exception,
|
||||
.cancel_injection = vmx_cancel_injection,
|
||||
.interrupt_allowed = vmx_interrupt_allowed,
|
||||
.nmi_allowed = vmx_nmi_allowed,
|
||||
.get_nmi_mask = vmx_get_nmi_mask,
|
||||
.set_nmi_mask = vmx_set_nmi_mask,
|
||||
.enable_nmi_window = vmx_enable_nmi_window,
|
||||
.enable_irq_window = vmx_enable_irq_window,
|
||||
.update_cr8_intercept = vmx_update_cr8_intercept,
|
||||
|
||||
.x2apic_icr_is_split = false,
|
||||
.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
|
||||
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
|
||||
.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
|
||||
.load_eoi_exitmap = vmx_load_eoi_exitmap,
|
||||
.apicv_pre_state_restore = vmx_apicv_pre_state_restore,
|
||||
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
|
||||
.hwapic_irr_update = vmx_hwapic_irr_update,
|
||||
.hwapic_isr_update = vmx_hwapic_isr_update,
|
||||
.sync_pir_to_irr = vmx_sync_pir_to_irr,
|
||||
.deliver_interrupt = vmx_deliver_interrupt,
|
||||
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
|
||||
|
||||
.set_tss_addr = vmx_set_tss_addr,
|
||||
.set_identity_map_addr = vmx_set_identity_map_addr,
|
||||
.get_mt_mask = vmx_get_mt_mask,
|
||||
|
||||
.get_exit_info = vmx_get_exit_info,
|
||||
|
||||
.vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
|
||||
|
||||
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
|
||||
|
||||
.get_l2_tsc_offset = vmx_get_l2_tsc_offset,
|
||||
.get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
|
||||
.write_tsc_offset = vmx_write_tsc_offset,
|
||||
.write_tsc_multiplier = vmx_write_tsc_multiplier,
|
||||
|
||||
.load_mmu_pgd = vmx_load_mmu_pgd,
|
||||
|
||||
.check_intercept = vmx_check_intercept,
|
||||
.handle_exit_irqoff = vmx_handle_exit_irqoff,
|
||||
|
||||
.cpu_dirty_log_size = PML_ENTITY_NUM,
|
||||
.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
|
||||
|
||||
.nested_ops = &vmx_nested_ops,
|
||||
|
||||
.pi_update_irte = vmx_pi_update_irte,
|
||||
.pi_start_assignment = vmx_pi_start_assignment,
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
.set_hv_timer = vmx_set_hv_timer,
|
||||
.cancel_hv_timer = vmx_cancel_hv_timer,
|
||||
#endif
|
||||
|
||||
.setup_mce = vmx_setup_mce,
|
||||
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
.smi_allowed = vmx_smi_allowed,
|
||||
.enter_smm = vmx_enter_smm,
|
||||
.leave_smm = vmx_leave_smm,
|
||||
.enable_smi_window = vmx_enable_smi_window,
|
||||
#endif
|
||||
|
||||
.check_emulate_instruction = vmx_check_emulate_instruction,
|
||||
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
|
||||
.migrate_timers = vmx_migrate_timers,
|
||||
|
||||
.msr_filter_changed = vmx_msr_filter_changed,
|
||||
.complete_emulated_msr = kvm_complete_insn_gp,
|
||||
|
||||
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
|
||||
|
||||
.get_untagged_addr = vmx_get_untagged_addr,
|
||||
};
|
||||
|
||||
struct kvm_x86_init_ops vt_init_ops __initdata = {
|
||||
.hardware_setup = vmx_hardware_setup,
|
||||
.handle_intel_pt_intr = NULL,
|
||||
|
||||
.runtime_ops = &vt_x86_ops,
|
||||
.pmu_ops = &intel_pmu_ops,
|
||||
};
|
Loading…
Reference in New Issue