Optimize TLB flush in kvm_mmu_slot_remove_write_access.
No TLB flush is needed when there's no valid rmap in memory slot. Signed-off-by: Kai Huang <kai.huang@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0c55d6d931
commit
d91ffee9ec
|
@ -4302,6 +4302,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
gfn_t last_gfn;
|
gfn_t last_gfn;
|
||||||
int i;
|
int i;
|
||||||
|
bool flush = false;
|
||||||
|
|
||||||
memslot = id_to_memslot(kvm->memslots, slot);
|
memslot = id_to_memslot(kvm->memslots, slot);
|
||||||
last_gfn = memslot->base_gfn + memslot->npages - 1;
|
last_gfn = memslot->base_gfn + memslot->npages - 1;
|
||||||
|
@ -4318,7 +4319,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
||||||
|
|
||||||
for (index = 0; index <= last_index; ++index, ++rmapp) {
|
for (index = 0; index <= last_index; ++index, ++rmapp) {
|
||||||
if (*rmapp)
|
if (*rmapp)
|
||||||
__rmap_write_protect(kvm, rmapp, false);
|
flush |= __rmap_write_protect(kvm, rmapp,
|
||||||
|
false);
|
||||||
|
|
||||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
|
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
|
||||||
cond_resched_lock(&kvm->mmu_lock);
|
cond_resched_lock(&kvm->mmu_lock);
|
||||||
|
@ -4345,7 +4347,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
||||||
* instead of PT_WRITABLE_MASK, that means it does not depend
|
* instead of PT_WRITABLE_MASK, that means it does not depend
|
||||||
* on PT_WRITABLE_MASK anymore.
|
* on PT_WRITABLE_MASK anymore.
|
||||||
*/
|
*/
|
||||||
kvm_flush_remote_tlbs(kvm);
|
if (flush)
|
||||||
|
kvm_flush_remote_tlbs(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BATCH_ZAP_PAGES 10
|
#define BATCH_ZAP_PAGES 10
|
||||||
|
|
Loading…
Reference in New Issue