KVM: VMX & SVM: FIXED+PHYSICAL mode single target IPI fastpath

commit opencloudos.

Signed-off-by: Chen Zhuo <sagazchen@tencent.com>
Signed-off-by: Xinghui Li <korantli@tencent.com>
This commit is contained in:
Chen Zhuo 2021-04-13 20:35:32 +08:00 committed by Jianping Liu
parent f882999e2b
commit 1a738c8fbc
8 changed files with 117 additions and 26 deletions

View File

@ -177,6 +177,11 @@ enum {
VCPU_SREG_LDTR,
};
enum exit_fastpath_completion {
EXIT_FASTPATH_NONE,
EXIT_FASTPATH_SKIP_EMUL_INS,
};
#include <asm/kvm_emulate.h>
#define KVM_NR_MEM_OBJS 40
@ -1073,8 +1078,9 @@ struct kvm_x86_ops {
*/
void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
void (*run)(struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_vcpu *vcpu);
enum exit_fastpath_completion (*run)(struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion exit_fastpath);
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);

View File

@ -52,15 +52,15 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
unsigned int dest_vcpus = 0;
if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
return r;
if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
kvm_lowest_prio_delivery(irq)) {
printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
irq->delivery_mode = APIC_DM_FIXED;
}
if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
return r;
memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
kvm_for_each_vcpu(i, vcpu, kvm) {

View File

@ -56,15 +56,9 @@
#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
#define LAPIC_MMIO_LENGTH (1 << 12)
/* followed define is not in apicdef.h */
#define APIC_SHORT_MASK 0xc0000
#define APIC_DEST_NOSHORT 0x0
#define APIC_DEST_MASK 0x800
#define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32
#define APIC_BROADCAST 0xFF
#define X2APIC_BROADCAST 0xFFFFFFFFul
static bool lapic_timer_advance_dynamic __read_mostly;
#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
@ -1195,7 +1189,7 @@ void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
}
EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
static void apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
{
struct kvm_lapic_irq irq;
@ -1909,7 +1903,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_ICR:
/* No delay here, so we always clear the pending bit */
val &= ~(1 << 12);
apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
kvm_lapic_set_reg(apic, APIC_ICR, val);
break;

View File

@ -10,12 +10,19 @@
#define KVM_APIC_SIPI 1
#define KVM_APIC_LVT_NUM 6
#define APIC_SHORT_MASK 0xc0000
#define APIC_DEST_NOSHORT 0x0
#define APIC_DEST_MASK 0x800
#define KVM_APIC_SHORT_MASK 0xc0000
#define KVM_APIC_DEST_MASK 0x800
#define APIC_BUS_CYCLE_NS 1
#define APIC_BUS_FREQUENCY (1000000000ULL / APIC_BUS_CYCLE_NS)
#define APIC_BROADCAST 0xFF
#define X2APIC_BROADCAST 0xFFFFFFFFul
enum lapic_mode {
LAPIC_MODE_DISABLED = 0,
LAPIC_MODE_INVALID = X2APIC_ENABLE,
@ -111,6 +118,7 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high);
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);

View File

@ -4978,7 +4978,8 @@ static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
*info2 = control->exit_info_2;
}
static int handle_exit(struct kvm_vcpu *vcpu)
static int handle_exit(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion exit_fastpath)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_run *kvm_run = vcpu->run;
@ -5036,7 +5037,10 @@ static int handle_exit(struct kvm_vcpu *vcpu)
__func__, svm->vmcb->control.exit_int_info,
exit_code);
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
skip_emulated_instruction(vcpu);
return 1;
} else if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|| !svm_exit_handlers[exit_code]) {
vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
dump_vmcb(vcpu);
@ -5653,8 +5657,19 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
svm_complete_interrupts(svm);
}
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
static enum exit_fastpath_completion svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
if (!is_guest_mode(vcpu) &&
to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
to_svm(vcpu)->vmcb->control.exit_info_1)
return handle_fastpath_set_msr_irqoff(vcpu);
return EXIT_FASTPATH_NONE;
}
static enum exit_fastpath_completion svm_vcpu_run(struct kvm_vcpu *vcpu)
{
enum exit_fastpath_completion exit_fastpath;
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
@ -5666,7 +5681,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* again.
*/
if (unlikely(svm->nested.exit_required))
return;
return EXIT_FASTPATH_NONE;
/*
* Disable singlestep if we're injecting an interrupt/exception.
@ -5845,6 +5860,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
stgi();
/* Any pending NMI will happen here */
exit_fastpath = svm_exit_handlers_fastpath(vcpu);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(&svm->vcpu);
@ -5873,6 +5889,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
svm_handle_mce(svm);
mark_all_clean(svm->vmcb);
return exit_fastpath;
}
STACK_FRAME_NON_STANDARD(svm_vcpu_run);

View File

@ -5855,7 +5855,8 @@ void dump_vmcs(void)
* The guest has exited. See if we can fix it or if we need userspace
* assistance.
*/
static int vmx_handle_exit(struct kvm_vcpu *vcpu)
static int vmx_handle_exit(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion exit_fastpath)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason = vmx->exit_reason;
@ -5942,10 +5943,13 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
}
}
if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason])
if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
skip_emulated_instruction(vcpu);
return 1;
} else if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason]) {
return kvm_vmx_exit_handlers[exit_reason](vcpu);
else {
} else {
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
exit_reason);
dump_vmcs();
@ -6478,10 +6482,11 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4;
enum exit_fastpath_completion exit_fastpath;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
@ -6491,7 +6496,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
if (vmx->emulation_required)
return;
return EXIT_FASTPATH_NONE;
if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false;
@ -6628,13 +6633,20 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
kvm_machine_check();
if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
return;
return EXIT_FASTPATH_NONE;
vmx->loaded_vmcs->launched = 1;
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE)
exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
else
exit_fastpath = EXIT_FASTPATH_NONE;
return exit_fastpath;
}
static struct kvm *vmx_vm_alloc(void)

View File

@ -1611,6 +1611,57 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
/*
* The fast path for frequent and performance sensitive wrmsr emulation,
* i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
* the latency of virtual IPI by avoiding the expensive bits of transitioning
* from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
* other cases which must be called after interrupts are enabled on the host.
*/
static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
{
if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
return 1;
if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
((data & KVM_APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
((data & APIC_MODE_MASK) == APIC_DM_FIXED) &&
((u32)(data >> 32) != X2APIC_BROADCAST)) {
data &= ~(1 << 12);
kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32));
kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32));
kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data);
trace_kvm_apic_write(APIC_ICR, (u32)data);
return 0;
}
return 1;
}
enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
{
u32 msr = kvm_register_read(vcpu, VCPU_REGS_RCX);
u64 data;
int ret = 0;
switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4):
data = kvm_read_edx_eax(vcpu);
ret = handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
break;
default:
return EXIT_FASTPATH_NONE;
}
if (!ret) {
trace_kvm_msr_write(msr, data);
return EXIT_FASTPATH_SKIP_EMUL_INS;
}
return EXIT_FASTPATH_NONE;
}
EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
/*
* Adapt set_msr() to msr_io()'s calling convention
*/
@ -8057,6 +8108,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
bool req_int_win =
dm_request_for_irq_injection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu);
enum exit_fastpath_completion exit_fastpath;
bool req_immediate_exit = false;
@ -8275,7 +8327,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
kvm_x86_ops->run(vcpu);
exit_fastpath = kvm_x86_ops->run(vcpu);
/*
* Do this here before restoring debug registers on the host. And
@ -8351,7 +8403,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_lapic_sync_from_vapic(vcpu);
vcpu->arch.gpa_available = false;
r = kvm_x86_ops->handle_exit(vcpu);
r = kvm_x86_ops->handle_exit(vcpu, exit_fastpath);
return r;
cancel_injection:

View File

@ -289,6 +289,8 @@ bool kvm_vector_hashing_enabled(void);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len);
enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \