KVM: x86/mmu: Always use current mmu's role when loading new PGD

Since the guest PGD is now loaded after the MMU has been set up
completely, the desired role for a cache hit is simply the current
mmu_role.  There is no need to compute it again, so __kvm_mmu_new_pgd
can be folded in kvm_mmu_new_pgd.

Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2021-11-22 13:18:23 -05:00
parent 3cffc89d9d
commit d2e5f33341
1 changed files with 4 additions and 25 deletions

View File

@ -181,8 +181,6 @@ struct kmem_cache *mmu_page_header_cache;
static struct percpu_counter kvm_total_used_mmu_pages;
static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
struct kvm_mmu_role_regs {
const unsigned long cr0;
@ -4169,10 +4167,10 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
}
static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
union kvm_mmu_page_role new_role)
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
union kvm_mmu_page_role new_role = mmu->mmu_role.base;
if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) {
/* kvm_mmu_ensure_valid_pgd will set up a new root. */
@ -4208,11 +4206,6 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
__clear_sp_write_flooding_count(
to_shadow_page(vcpu->arch.mmu->root.hpa));
}
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
{
__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
@ -4882,7 +4875,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
shadow_mmu_init_context(vcpu, context, &regs, new_role);
__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
kvm_mmu_new_pgd(vcpu, nested_cr3);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
@ -4938,7 +4931,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
reset_ept_shadow_zero_bits_mask(context, execonly);
}
__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
kvm_mmu_new_pgd(vcpu, new_eptp);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
@ -5023,20 +5016,6 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_init_mmu);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role role;
if (tdp_enabled)
role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
else
role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
return role.base;
}
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
/*