KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection
Replace the kvm_x86_ops pointer in common x86 with an instance of the struct to save one pointer dereference when invoking functions. Copy the struct by value to set the ops during kvm_init(). Arbitrarily use kvm_x86_ops.hardware_enable to track whether or not the ops have been initialized, i.e. a vendor KVM module has been loaded. Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200321202603.19355-7-sean.j.christopherson@intel.com> Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
69c6f69aa3
commit
afaf0b2f9b
|
@ -1274,13 +1274,13 @@ struct kvm_arch_async_pf {
|
|||
|
||||
extern u64 __read_mostly host_efer;
|
||||
|
||||
extern struct kvm_x86_ops *kvm_x86_ops;
|
||||
extern struct kvm_x86_ops kvm_x86_ops;
|
||||
extern struct kmem_cache *x86_fpu_cache;
|
||||
|
||||
#define __KVM_HAVE_ARCH_VM_ALLOC
|
||||
static inline struct kvm *kvm_arch_alloc_vm(void)
|
||||
{
|
||||
return __vmalloc(kvm_x86_ops->vm_size,
|
||||
return __vmalloc(kvm_x86_ops.vm_size,
|
||||
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
|
||||
}
|
||||
void kvm_arch_free_vm(struct kvm *kvm);
|
||||
|
@ -1288,8 +1288,8 @@ void kvm_arch_free_vm(struct kvm *kvm);
|
|||
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
|
||||
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
|
||||
{
|
||||
if (kvm_x86_ops->tlb_remote_flush &&
|
||||
!kvm_x86_ops->tlb_remote_flush(kvm))
|
||||
if (kvm_x86_ops.tlb_remote_flush &&
|
||||
!kvm_x86_ops.tlb_remote_flush(kvm))
|
||||
return 0;
|
||||
else
|
||||
return -ENOTSUPP;
|
||||
|
@ -1375,7 +1375,7 @@ extern u64 kvm_mce_cap_supported;
|
|||
*
|
||||
* EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
|
||||
* decode the instruction length. For use *only* by
|
||||
* kvm_x86_ops->skip_emulated_instruction() implementations.
|
||||
* kvm_x86_ops.skip_emulated_instruction() implementations.
|
||||
*
|
||||
* EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
|
||||
* retry native execution under certain conditions,
|
||||
|
@ -1669,14 +1669,14 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
|
|||
|
||||
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_x86_ops->vcpu_blocking)
|
||||
kvm_x86_ops->vcpu_blocking(vcpu);
|
||||
if (kvm_x86_ops.vcpu_blocking)
|
||||
kvm_x86_ops.vcpu_blocking(vcpu);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_x86_ops->vcpu_unblocking)
|
||||
kvm_x86_ops->vcpu_unblocking(vcpu);
|
||||
if (kvm_x86_ops.vcpu_unblocking)
|
||||
kvm_x86_ops.vcpu_unblocking(vcpu);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
|
||||
|
|
|
@ -209,7 +209,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.cpuid_nent = cpuid->nent;
|
||||
cpuid_fix_nx_cap(vcpu);
|
||||
kvm_apic_set_version(vcpu);
|
||||
kvm_x86_ops->cpuid_update(vcpu);
|
||||
kvm_x86_ops.cpuid_update(vcpu);
|
||||
r = kvm_update_cpuid(vcpu);
|
||||
|
||||
out:
|
||||
|
@ -232,7 +232,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
|
|||
goto out;
|
||||
vcpu->arch.cpuid_nent = cpuid->nent;
|
||||
kvm_apic_set_version(vcpu);
|
||||
kvm_x86_ops->cpuid_update(vcpu);
|
||||
kvm_x86_ops.cpuid_update(vcpu);
|
||||
r = kvm_update_cpuid(vcpu);
|
||||
out:
|
||||
return r;
|
||||
|
|
|
@ -1022,7 +1022,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
|
|||
addr = gfn_to_hva(kvm, gfn);
|
||||
if (kvm_is_error_hva(addr))
|
||||
return 1;
|
||||
kvm_x86_ops->patch_hypercall(vcpu, instructions);
|
||||
kvm_x86_ops.patch_hypercall(vcpu, instructions);
|
||||
((unsigned char *)instructions)[3] = 0xc3; /* ret */
|
||||
if (__copy_to_user((void __user *)addr, instructions, 4))
|
||||
return 1;
|
||||
|
@ -1607,7 +1607,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
|||
* hypercall generates UD from non zero cpl and real mode
|
||||
* per HYPER-V spec
|
||||
*/
|
||||
if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
|
||||
if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
|
@ -1800,8 +1800,8 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
|||
};
|
||||
int i, nent = ARRAY_SIZE(cpuid_entries);
|
||||
|
||||
if (kvm_x86_ops->nested_get_evmcs_version)
|
||||
evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
|
||||
if (kvm_x86_ops.nested_get_evmcs_version)
|
||||
evmcs_ver = kvm_x86_ops.nested_get_evmcs_version(vcpu);
|
||||
|
||||
/* Skip NESTED_FEATURES if eVMCS is not supported */
|
||||
if (!evmcs_ver)
|
||||
|
|
|
@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
|
|||
return 0;
|
||||
|
||||
if (!kvm_register_is_available(vcpu, reg))
|
||||
kvm_x86_ops->cache_reg(vcpu, reg);
|
||||
kvm_x86_ops.cache_reg(vcpu, reg);
|
||||
|
||||
return vcpu->arch.regs[reg];
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
|
|||
might_sleep(); /* on svm */
|
||||
|
||||
if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
|
||||
kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
|
||||
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);
|
||||
|
||||
return vcpu->arch.walk_mmu->pdptrs[index];
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
|
|||
{
|
||||
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
|
||||
if (tmask & vcpu->arch.cr0_guest_owned_bits)
|
||||
kvm_x86_ops->decache_cr0_guest_bits(vcpu);
|
||||
kvm_x86_ops.decache_cr0_guest_bits(vcpu);
|
||||
return vcpu->arch.cr0 & mask;
|
||||
}
|
||||
|
||||
|
@ -130,14 +130,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
|
|||
{
|
||||
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
|
||||
if (tmask & vcpu->arch.cr4_guest_owned_bits)
|
||||
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
|
||||
kvm_x86_ops.decache_cr4_guest_bits(vcpu);
|
||||
return vcpu->arch.cr4 & mask;
|
||||
}
|
||||
|
||||
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
|
||||
kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_CR3);
|
||||
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
|
||||
return vcpu->arch.cr3;
|
||||
}
|
||||
|
||||
|
|
|
@ -463,7 +463,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
|
|||
if (unlikely(vcpu->arch.apicv_active)) {
|
||||
/* need to update RVI */
|
||||
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
||||
kvm_x86_ops->hwapic_irr_update(vcpu,
|
||||
kvm_x86_ops.hwapic_irr_update(vcpu,
|
||||
apic_find_highest_irr(apic));
|
||||
} else {
|
||||
apic->irr_pending = false;
|
||||
|
@ -488,7 +488,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
|
|||
* just set SVI.
|
||||
*/
|
||||
if (unlikely(vcpu->arch.apicv_active))
|
||||
kvm_x86_ops->hwapic_isr_update(vcpu, vec);
|
||||
kvm_x86_ops.hwapic_isr_update(vcpu, vec);
|
||||
else {
|
||||
++apic->isr_count;
|
||||
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
|
||||
|
@ -536,7 +536,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
|
|||
* and must be left alone.
|
||||
*/
|
||||
if (unlikely(vcpu->arch.apicv_active))
|
||||
kvm_x86_ops->hwapic_isr_update(vcpu,
|
||||
kvm_x86_ops.hwapic_isr_update(vcpu,
|
||||
apic_find_highest_isr(apic));
|
||||
else {
|
||||
--apic->isr_count;
|
||||
|
@ -674,7 +674,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
|
|||
{
|
||||
int highest_irr;
|
||||
if (apic->vcpu->arch.apicv_active)
|
||||
highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
|
||||
highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu);
|
||||
else
|
||||
highest_irr = apic_find_highest_irr(apic);
|
||||
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
|
||||
|
@ -1063,7 +1063,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
|||
apic->regs + APIC_TMR);
|
||||
}
|
||||
|
||||
if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
|
||||
if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
|
@ -1746,7 +1746,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
|
|||
{
|
||||
WARN_ON(preemptible());
|
||||
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
|
||||
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
|
||||
kvm_x86_ops.cancel_hv_timer(apic->vcpu);
|
||||
apic->lapic_timer.hv_timer_in_use = false;
|
||||
}
|
||||
|
||||
|
@ -1757,13 +1757,13 @@ static bool start_hv_timer(struct kvm_lapic *apic)
|
|||
bool expired;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
if (!kvm_x86_ops->set_hv_timer)
|
||||
if (!kvm_x86_ops.set_hv_timer)
|
||||
return false;
|
||||
|
||||
if (!ktimer->tscdeadline)
|
||||
return false;
|
||||
|
||||
if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
|
||||
if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
|
||||
return false;
|
||||
|
||||
ktimer->hv_timer_in_use = true;
|
||||
|
@ -2190,7 +2190,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
|
|||
kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
|
||||
|
||||
if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
|
||||
kvm_x86_ops->set_virtual_apic_mode(vcpu);
|
||||
kvm_x86_ops.set_virtual_apic_mode(vcpu);
|
||||
|
||||
apic->base_address = apic->vcpu->arch.apic_base &
|
||||
MSR_IA32_APICBASE_BASE;
|
||||
|
@ -2268,9 +2268,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||
vcpu->arch.pv_eoi.msr_val = 0;
|
||||
apic_update_ppr(apic);
|
||||
if (vcpu->arch.apicv_active) {
|
||||
kvm_x86_ops->apicv_post_state_restore(vcpu);
|
||||
kvm_x86_ops->hwapic_irr_update(vcpu, -1);
|
||||
kvm_x86_ops->hwapic_isr_update(vcpu, -1);
|
||||
kvm_x86_ops.apicv_post_state_restore(vcpu);
|
||||
kvm_x86_ops.hwapic_irr_update(vcpu, -1);
|
||||
kvm_x86_ops.hwapic_isr_update(vcpu, -1);
|
||||
}
|
||||
|
||||
vcpu->arch.apic_arb_prio = 0;
|
||||
|
@ -2521,10 +2521,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
|||
kvm_apic_update_apicv(vcpu);
|
||||
apic->highest_isr_cache = -1;
|
||||
if (vcpu->arch.apicv_active) {
|
||||
kvm_x86_ops->apicv_post_state_restore(vcpu);
|
||||
kvm_x86_ops->hwapic_irr_update(vcpu,
|
||||
kvm_x86_ops.apicv_post_state_restore(vcpu);
|
||||
kvm_x86_ops.hwapic_irr_update(vcpu,
|
||||
apic_find_highest_irr(apic));
|
||||
kvm_x86_ops->hwapic_isr_update(vcpu,
|
||||
kvm_x86_ops.hwapic_isr_update(vcpu,
|
||||
apic_find_highest_isr(apic));
|
||||
}
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
|
|
|
@ -98,8 +98,8 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
|
|||
static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
|
||||
kvm_x86_ops->load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
|
||||
kvm_get_active_pcid(vcpu));
|
||||
kvm_x86_ops.load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
|
||||
kvm_get_active_pcid(vcpu));
|
||||
}
|
||||
|
||||
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
||||
|
@ -170,8 +170,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|||
unsigned pte_access, unsigned pte_pkey,
|
||||
unsigned pfec)
|
||||
{
|
||||
int cpl = kvm_x86_ops->get_cpl(vcpu);
|
||||
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
|
||||
int cpl = kvm_x86_ops.get_cpl(vcpu);
|
||||
unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
|
||||
|
||||
/*
|
||||
* If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
|
||||
|
|
|
@ -305,7 +305,7 @@ kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
|
|||
|
||||
static inline bool kvm_available_flush_tlb_with_range(void)
|
||||
{
|
||||
return kvm_x86_ops->tlb_remote_flush_with_range;
|
||||
return kvm_x86_ops.tlb_remote_flush_with_range;
|
||||
}
|
||||
|
||||
static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
|
||||
|
@ -313,8 +313,8 @@ static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
|
|||
{
|
||||
int ret = -ENOTSUPP;
|
||||
|
||||
if (range && kvm_x86_ops->tlb_remote_flush_with_range)
|
||||
ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
|
||||
if (range && kvm_x86_ops.tlb_remote_flush_with_range)
|
||||
ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range);
|
||||
|
||||
if (ret)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
|
@ -1642,7 +1642,7 @@ static bool spte_set_dirty(u64 *sptep)
|
|||
rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
|
||||
|
||||
/*
|
||||
* Similar to the !kvm_x86_ops->slot_disable_log_dirty case,
|
||||
* Similar to the !kvm_x86_ops.slot_disable_log_dirty case,
|
||||
* do not bother adding back write access to pages marked
|
||||
* SPTE_AD_WRPROT_ONLY_MASK.
|
||||
*/
|
||||
|
@ -1731,8 +1731,8 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
|
|||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset, unsigned long mask)
|
||||
{
|
||||
if (kvm_x86_ops->enable_log_dirty_pt_masked)
|
||||
kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
|
||||
if (kvm_x86_ops.enable_log_dirty_pt_masked)
|
||||
kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
|
||||
mask);
|
||||
else
|
||||
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
|
||||
|
@ -1747,8 +1747,8 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
|
|||
*/
|
||||
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_x86_ops->write_log_dirty)
|
||||
return kvm_x86_ops->write_log_dirty(vcpu);
|
||||
if (kvm_x86_ops.write_log_dirty)
|
||||
return kvm_x86_ops.write_log_dirty(vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3036,7 +3036,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
if (level > PT_PAGE_TABLE_LEVEL)
|
||||
spte |= PT_PAGE_SIZE_MASK;
|
||||
if (tdp_enabled)
|
||||
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
|
||||
spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn,
|
||||
kvm_is_mmio_pfn(pfn));
|
||||
|
||||
if (host_writable)
|
||||
|
@ -4909,7 +4909,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
|
|||
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
|
||||
|
||||
role.base.ad_disabled = (shadow_accessed_mask == 0);
|
||||
role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
|
||||
role.base.level = kvm_x86_ops.get_tdp_level(vcpu);
|
||||
role.base.direct = true;
|
||||
role.base.gpte_is_8_bytes = true;
|
||||
|
||||
|
@ -4930,7 +4930,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
|||
context->sync_page = nonpaging_sync_page;
|
||||
context->invlpg = nonpaging_invlpg;
|
||||
context->update_pte = nonpaging_update_pte;
|
||||
context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
|
||||
context->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu);
|
||||
context->direct_map = true;
|
||||
context->get_guest_pgd = get_cr3;
|
||||
context->get_pdptr = kvm_pdptr_read;
|
||||
|
@ -5183,7 +5183,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
|
|||
if (r)
|
||||
goto out;
|
||||
kvm_mmu_load_pgd(vcpu);
|
||||
kvm_x86_ops->tlb_flush(vcpu, true);
|
||||
kvm_x86_ops.tlb_flush(vcpu, true);
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
@ -5488,7 +5488,7 @@ emulate:
|
|||
* guest, with the exception of AMD Erratum 1096 which is unrecoverable.
|
||||
*/
|
||||
if (unlikely(insn && !insn_len)) {
|
||||
if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
|
||||
if (!kvm_x86_ops.need_emulation_on_page_fault(vcpu))
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -5523,7 +5523,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
|
|||
if (VALID_PAGE(mmu->prev_roots[i].hpa))
|
||||
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
|
||||
|
||||
kvm_x86_ops->tlb_flush_gva(vcpu, gva);
|
||||
kvm_x86_ops.tlb_flush_gva(vcpu, gva);
|
||||
++vcpu->stat.invlpg;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
|
||||
|
@ -5548,7 +5548,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
|
|||
}
|
||||
|
||||
if (tlb_flush)
|
||||
kvm_x86_ops->tlb_flush_gva(vcpu, gva);
|
||||
kvm_x86_ops.tlb_flush_gva(vcpu, gva);
|
||||
|
||||
++vcpu->stat.invlpg;
|
||||
|
||||
|
@ -5672,7 +5672,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
|
|||
* SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
|
||||
* skip allocating the PDP table.
|
||||
*/
|
||||
if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
|
||||
if (tdp_enabled && kvm_x86_ops.get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
|
||||
return 0;
|
||||
|
||||
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
|
||||
|
|
|
@ -211,7 +211,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
|||
ARCH_PERFMON_EVENTSEL_CMASK |
|
||||
HSW_IN_TX |
|
||||
HSW_IN_TX_CHECKPOINTED))) {
|
||||
config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
|
||||
config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
|
||||
event_select,
|
||||
unit_mask);
|
||||
if (config != PERF_COUNT_HW_MAX)
|
||||
|
@ -265,7 +265,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
|
|||
|
||||
pmc->current_config = (u64)ctrl;
|
||||
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
|
||||
kvm_x86_ops->pmu_ops->find_fixed_event(idx),
|
||||
kvm_x86_ops.pmu_ops->find_fixed_event(idx),
|
||||
!(en_field & 0x2), /* exclude user */
|
||||
!(en_field & 0x1), /* exclude kernel */
|
||||
pmi, false, false);
|
||||
|
@ -274,7 +274,7 @@ EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
|
|||
|
||||
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
|
||||
{
|
||||
struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
|
||||
struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
|
||||
|
||||
if (!pmc)
|
||||
return;
|
||||
|
@ -296,7 +296,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
|
|||
int bit;
|
||||
|
||||
for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
|
||||
struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
|
||||
struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
|
||||
|
||||
if (unlikely(!pmc || !pmc->perf_event)) {
|
||||
clear_bit(bit, pmu->reprogram_pmi);
|
||||
|
@ -318,7 +318,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
|
|||
/* check if idx is a valid index to access PMU */
|
||||
int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
return kvm_x86_ops->pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
|
||||
return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
|
||||
}
|
||||
|
||||
bool is_vmware_backdoor_pmc(u32 pmc_idx)
|
||||
|
@ -368,7 +368,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
|||
if (is_vmware_backdoor_pmc(idx))
|
||||
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
|
||||
|
||||
pmc = kvm_x86_ops->pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
|
||||
pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
|
||||
if (!pmc)
|
||||
return 1;
|
||||
|
||||
|
@ -384,14 +384,14 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
|
|||
|
||||
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
|
||||
{
|
||||
return kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
|
||||
kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
|
||||
return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
|
||||
kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
|
||||
}
|
||||
|
||||
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, msr);
|
||||
struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
|
||||
|
||||
if (pmc)
|
||||
__set_bit(pmc->idx, pmu->pmc_in_use);
|
||||
|
@ -399,13 +399,13 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
|
|||
|
||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
|
||||
{
|
||||
return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
|
||||
return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr, data);
|
||||
}
|
||||
|
||||
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
|
||||
return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
|
||||
return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
|
||||
}
|
||||
|
||||
/* refresh PMU settings. This function generally is called when underlying
|
||||
|
@ -414,7 +414,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
*/
|
||||
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_x86_ops->pmu_ops->refresh(vcpu);
|
||||
kvm_x86_ops.pmu_ops->refresh(vcpu);
|
||||
}
|
||||
|
||||
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
|
@ -422,7 +422,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
|
|||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
|
||||
irq_work_sync(&pmu->irq_work);
|
||||
kvm_x86_ops->pmu_ops->reset(vcpu);
|
||||
kvm_x86_ops.pmu_ops->reset(vcpu);
|
||||
}
|
||||
|
||||
void kvm_pmu_init(struct kvm_vcpu *vcpu)
|
||||
|
@ -430,7 +430,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
|
|||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
|
||||
memset(pmu, 0, sizeof(*pmu));
|
||||
kvm_x86_ops->pmu_ops->init(vcpu);
|
||||
kvm_x86_ops.pmu_ops->init(vcpu);
|
||||
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
|
||||
pmu->event_count = 0;
|
||||
pmu->need_cleanup = false;
|
||||
|
@ -462,7 +462,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
|
|||
pmu->pmc_in_use, X86_PMC_IDX_MAX);
|
||||
|
||||
for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
|
||||
pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, i);
|
||||
pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
|
||||
|
||||
if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
|
||||
pmc_stop_counter(pmc);
|
||||
|
|
|
@ -88,7 +88,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
|
|||
|
||||
static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
|
||||
{
|
||||
return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
|
||||
return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc);
|
||||
}
|
||||
|
||||
static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
|
||||
|
|
|
@ -7329,7 +7329,7 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
|
|||
* TODO: Last condition latch INIT signals on vCPU when
|
||||
* vCPU is in guest-mode and vmcb12 defines intercept on INIT.
|
||||
* To properly emulate the INIT intercept, SVM should implement
|
||||
* kvm_x86_ops->check_nested_events() and call nested_svm_vmexit()
|
||||
* kvm_x86_ops.check_nested_events() and call nested_svm_vmexit()
|
||||
* there if an INIT signal is pending.
|
||||
*/
|
||||
return !gif_set(svm) ||
|
||||
|
|
|
@ -246,7 +246,7 @@ TRACE_EVENT(kvm_exit,
|
|||
__entry->guest_rip = kvm_rip_read(vcpu);
|
||||
__entry->isa = isa;
|
||||
__entry->vcpu_id = vcpu->vcpu_id;
|
||||
kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
|
||||
kvm_x86_ops.get_exit_info(vcpu, &__entry->info1,
|
||||
&__entry->info2);
|
||||
),
|
||||
|
||||
|
@ -750,7 +750,7 @@ TRACE_EVENT(kvm_emulate_insn,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
|
||||
__entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS);
|
||||
__entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
|
||||
- vcpu->arch.emulate_ctxt->fetch.data;
|
||||
__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
|
||||
|
|
|
@ -4535,7 +4535,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
|
|||
return;
|
||||
|
||||
vmx = to_vmx(vcpu);
|
||||
if (kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
|
||||
if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
|
||||
vmx->nested.msrs.entry_ctls_high |=
|
||||
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||
vmx->nested.msrs.exit_ctls_high |=
|
||||
|
|
|
@ -2986,7 +2986,7 @@ void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3)
|
|||
eptp = construct_eptp(vcpu, cr3);
|
||||
vmcs_write64(EPT_POINTER, eptp);
|
||||
|
||||
if (kvm_x86_ops->tlb_remote_flush) {
|
||||
if (kvm_x86_ops.tlb_remote_flush) {
|
||||
spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
|
||||
to_vmx(vcpu)->ept_pointer = eptp;
|
||||
to_kvm_vmx(kvm)->ept_pointers_match
|
||||
|
@ -7479,7 +7479,7 @@ static void pi_post_block(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void vmx_post_block(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_x86_ops->set_hv_timer)
|
||||
if (kvm_x86_ops.set_hv_timer)
|
||||
kvm_lapic_switch_to_hv_timer(vcpu);
|
||||
|
||||
pi_post_block(vcpu);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -97,7 +97,7 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (!is_long_mode(vcpu))
|
||||
return false;
|
||||
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
|
||||
kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
|
||||
return cs_l;
|
||||
}
|
||||
|
||||
|
@ -237,7 +237,7 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
|
|||
|
||||
static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu);
|
||||
return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
|
||||
}
|
||||
|
||||
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
|
||||
|
|
Loading…
Reference in New Issue