KVM: Replace old tlb flush function with new one to flush a specified range.
This patch is to replace kvm_flush_remote_tlbs() with kvm_flush_ remote_tlbs_with_address() in some functions without logic change. Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
40ef75a758
commit
c3134ce240
|
@ -1485,8 +1485,12 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
|
||||||
|
|
||||||
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
|
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
|
||||||
{
|
{
|
||||||
if (__drop_large_spte(vcpu->kvm, sptep))
|
if (__drop_large_spte(vcpu->kvm, sptep)) {
|
||||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
||||||
|
|
||||||
|
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
|
||||||
|
KVM_PAGES_PER_HPAGE(sp->role.level));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1954,7 +1958,8 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
||||||
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
|
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
|
||||||
|
|
||||||
kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
|
kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
|
||||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
|
||||||
|
KVM_PAGES_PER_HPAGE(sp->role.level));
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||||
|
@ -2470,7 +2475,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||||
account_shadowed(vcpu->kvm, sp);
|
account_shadowed(vcpu->kvm, sp);
|
||||||
if (level == PT_PAGE_TABLE_LEVEL &&
|
if (level == PT_PAGE_TABLE_LEVEL &&
|
||||||
rmap_write_protect(vcpu, gfn))
|
rmap_write_protect(vcpu, gfn))
|
||||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
|
||||||
|
|
||||||
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
|
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
|
||||||
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
|
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
|
||||||
|
@ -2590,7 +2595,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
drop_parent_pte(child, sptep);
|
drop_parent_pte(child, sptep);
|
||||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3014,8 +3019,10 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
||||||
ret = RET_PF_EMULATE;
|
ret = RET_PF_EMULATE;
|
||||||
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
|
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
|
||||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
|
||||||
|
KVM_PAGES_PER_HPAGE(level));
|
||||||
|
|
||||||
if (unlikely(is_mmio_spte(*sptep)))
|
if (unlikely(is_mmio_spte(*sptep)))
|
||||||
ret = RET_PF_EMULATE;
|
ret = RET_PF_EMULATE;
|
||||||
|
@ -5672,7 +5679,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
||||||
* on PT_WRITABLE_MASK anymore.
|
* on PT_WRITABLE_MASK anymore.
|
||||||
*/
|
*/
|
||||||
if (flush)
|
if (flush)
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
|
||||||
|
memslot->npages);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
|
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
|
||||||
|
@ -5742,7 +5750,8 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
|
||||||
* dirty_bitmap.
|
* dirty_bitmap.
|
||||||
*/
|
*/
|
||||||
if (flush)
|
if (flush)
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
|
||||||
|
memslot->npages);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
|
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
|
||||||
|
|
||||||
|
@ -5760,7 +5769,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
|
||||||
lockdep_assert_held(&kvm->slots_lock);
|
lockdep_assert_held(&kvm->slots_lock);
|
||||||
|
|
||||||
if (flush)
|
if (flush)
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
|
||||||
|
memslot->npages);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
|
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
|
||||||
|
|
||||||
|
@ -5777,7 +5787,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
|
||||||
|
|
||||||
/* see kvm_mmu_slot_leaf_clear_dirty */
|
/* see kvm_mmu_slot_leaf_clear_dirty */
|
||||||
if (flush)
|
if (flush)
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
|
||||||
|
memslot->npages);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
|
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
|
||||||
|
|
||||||
|
|
|
@ -894,7 +894,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
|
||||||
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
|
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
|
||||||
|
|
||||||
if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
|
if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
|
||||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
kvm_flush_remote_tlbs_with_address(vcpu->kvm,
|
||||||
|
sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
|
||||||
|
|
||||||
if (!rmap_can_add(vcpu))
|
if (!rmap_can_add(vcpu))
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Reference in New Issue