KVM: x86: avoid loading PDPTRs after migration when possible

if new KVM_*_SREGS2 ioctls are used, the PDPTRs are
a part of the migration state and are correctly
restored by those ioctls.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210607090203.133058-9-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Maxim Levitsky 2021-06-07 12:02:03 +03:00 committed by Paolo Bonzini
parent 6dba940352
commit 158a48ecf7
4 changed files with 13 additions and 2 deletions

View File

@ -862,6 +862,12 @@ struct kvm_vcpu_arch {
/* Protected Guests */ /* Protected Guests */
bool guest_state_protected; bool guest_state_protected;
/*
* Set when PDPTS were loaded directly by the userspace without
* reading the guest memory
*/
bool pdptrs_from_userspace;
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
hpa_t hv_root_tdp; hpa_t hv_root_tdp;
#endif #endif

View File

@ -1367,7 +1367,8 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
if (WARN_ON(!is_guest_mode(vcpu))) if (WARN_ON(!is_guest_mode(vcpu)))
return true; return true;
if (!nested_npt_enabled(svm) && is_pae_paging(vcpu)) if (!vcpu->arch.pdptrs_from_userspace &&
!nested_npt_enabled(svm) && is_pae_paging(vcpu))
/* /*
* Reload the guest's PDPTRs since after a migration * Reload the guest's PDPTRs since after a migration
* the guest CR3 might be restored prior to setting the nested * the guest CR3 might be restored prior to setting the nested

View File

@ -3122,7 +3122,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
struct page *page; struct page *page;
u64 hpa; u64 hpa;
if (!nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { if (!vcpu->arch.pdptrs_from_userspace &&
!nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
/* /*
* Reload the guest's PDPTRs since after a migration * Reload the guest's PDPTRs since after a migration
* the guest CR3 might be restored prior to setting the nested * the guest CR3 might be restored prior to setting the nested

View File

@ -820,6 +820,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
vcpu->arch.pdptrs_from_userspace = false;
out: out:
return ret; return ret;
@ -10265,6 +10267,7 @@ static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
mmu_reset_needed = 1; mmu_reset_needed = 1;
vcpu->arch.pdptrs_from_userspace = true;
} }
if (mmu_reset_needed) if (mmu_reset_needed)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);