KVM: x86/mmu: Don't bounce through page-track mechanism for guest PTEs
Don't use the generic page-track mechanism to handle writes to guest PTEs in KVM's MMU. KVM's MMU needs access to information that should not be exposed to external page-track users, e.g. KVM needs (for some definitions of "need") the vCPU to query the current paging mode, whereas external users, i.e. KVMGT, have no ties to the current vCPU and so should never need the vCPU. Moving away from the page-track mechanism will allow dropping use of the page-track mechanism for KVM's own MMU, and will also allow simplifying and cleaning up the page-track APIs. Reviewed-by: Yan Zhao <yan.y.zhao@intel.com> Tested-by: Yongwei Ma <yongwei.ma@intel.com> Link: https://lore.kernel.org/r/20230729013535.1070024-15-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
eeb87272a3
commit
932844462a
|
@ -1265,7 +1265,6 @@ struct kvm_arch {
|
|||
* create an NX huge page (without hanging the guest).
|
||||
*/
|
||||
struct list_head possible_nx_huge_pages;
|
||||
struct kvm_page_track_notifier_node mmu_sp_tracker;
|
||||
struct kvm_page_track_notifier_head track_notifier_head;
|
||||
/*
|
||||
* Protects marking pages unsync during page faults, as TDP MMU page
|
||||
|
|
|
@ -121,6 +121,8 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu);
|
|||
void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
|
||||
int bytes);
|
||||
|
||||
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
|
|
@ -5635,9 +5635,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
|
|||
return spte;
|
||||
}
|
||||
|
||||
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
const u8 *new, int bytes,
|
||||
struct kvm_page_track_notifier_node *node)
|
||||
void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
|
||||
int bytes)
|
||||
{
|
||||
gfn_t gfn = gpa >> PAGE_SHIFT;
|
||||
struct kvm_mmu_page *sp;
|
||||
|
@ -6161,7 +6160,6 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
|
|||
|
||||
int kvm_mmu_init_vm(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
|
||||
|
@ -6175,9 +6173,6 @@ int kvm_mmu_init_vm(struct kvm *kvm)
|
|||
return r;
|
||||
}
|
||||
|
||||
node->track_write = kvm_mmu_pte_write;
|
||||
kvm_page_track_register_notifier(kvm, node);
|
||||
|
||||
kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
|
||||
kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
|
||||
|
||||
|
@ -6198,10 +6193,6 @@ static void mmu_free_vm_memory_caches(struct kvm *kvm)
|
|||
|
||||
void kvm_mmu_uninit_vm(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
|
||||
|
||||
kvm_page_track_unregister_notifier(kvm, node);
|
||||
|
||||
if (tdp_mmu_enabled)
|
||||
kvm_mmu_uninit_tdp_mmu(kvm);
|
||||
|
||||
|
|
|
@ -274,6 +274,8 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
|
|||
if (n->track_write)
|
||||
n->track_write(vcpu, gpa, new, bytes, n);
|
||||
srcu_read_unlock(&head->track_srcu, idx);
|
||||
|
||||
kvm_mmu_track_write(vcpu, gpa, new, bytes);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue