KVM: Drop kvm_reload_remote_mmus(), open code request in x86 users
Remove the generic kvm_reload_remote_mmus() and open code its functionality into the two x86 callers. x86 is (obviously) the only architecture that uses the hook, and is also the only architecture that uses KVM_REQ_MMU_RELOAD in a way that's consistent with the name. That will change in a future patch, as x86's usage when zapping a single shadow page x86 doesn't actually _need_ to reload all vCPUs' MMUs, only MMUs whose root is being zapped actually need to be reloaded. s390 also uses KVM_REQ_MMU_RELOAD, but for a slightly different purpose. Drop the generic code in anticipation of implementing s390 and x86 arch specific requests, which will allow dropping KVM_REQ_MMU_RELOAD entirely. Opportunistically reword the x86 TDP MMU comment to avoid making references to functions (and requests!) when possible, and to remove the rather ambiguous "this". No functional change intended. Cc: Ben Gardon <bgardon@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Ben Gardon <bgardon@google.com> Message-Id: <20220225182248.3812651-4-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
f6d0a2521c
commit
2f6f66ccd2
|
@ -2353,7 +2353,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
|
|||
* treats invalid shadow pages as being obsolete.
|
||||
*/
|
||||
if (!is_obsolete_sp(kvm, sp))
|
||||
kvm_reload_remote_mmus(kvm);
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
|
||||
}
|
||||
|
||||
if (sp->lpage_disallowed)
|
||||
|
@ -5639,11 +5639,11 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
|
|||
*/
|
||||
kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
|
||||
|
||||
/* In order to ensure all threads see this change when
|
||||
* handling the MMU reload signal, this must happen in the
|
||||
* same critical section as kvm_reload_remote_mmus, and
|
||||
* before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
|
||||
* could drop the MMU lock and yield.
|
||||
/*
|
||||
* In order to ensure all vCPUs drop their soon-to-be invalid roots,
|
||||
* invalidating TDP MMU roots must be done while holding mmu_lock for
|
||||
* write and in the same critical section as making the reload request,
|
||||
* e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
|
||||
*/
|
||||
if (is_tdp_mmu_enabled(kvm))
|
||||
kvm_tdp_mmu_invalidate_all_roots(kvm);
|
||||
|
@ -5656,7 +5656,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
|
|||
* Note: we need to do this under the protection of mmu_lock,
|
||||
* otherwise, vcpu would purge shadow page but miss tlb flush.
|
||||
*/
|
||||
kvm_reload_remote_mmus(kvm);
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
|
||||
|
||||
kvm_zap_obsolete_pages(kvm);
|
||||
|
||||
|
|
|
@ -1325,7 +1325,6 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target);
|
|||
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
|
||||
|
||||
void kvm_flush_remote_tlbs(struct kvm *kvm);
|
||||
void kvm_reload_remote_mmus(struct kvm *kvm);
|
||||
|
||||
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
||||
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
|
||||
|
|
|
@ -354,11 +354,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|||
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
|
||||
#endif
|
||||
|
||||
void kvm_reload_remote_mmus(struct kvm *kvm)
|
||||
{
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
|
||||
}
|
||||
|
||||
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
||||
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
|
||||
gfp_t gfp_flags)
|
||||
|
|
Loading…
Reference in New Issue