KVM: Write protect the updated slot only when dirty logging is enabled
Calling kvm_mmu_slot_remove_write_access() for a deleted slot does nothing but search for non-existent mmu pages which have mappings to that deleted memory; this is safe but a waste of time. Since we want to make the function rmap based in a later patch, in a manner which makes it unsafe to be called for a deleted slot, we makes the caller see if the slot is non-zero and being dirty logged. Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Signed-off-by: Gleb Natapov <gleb@redhat.com>
This commit is contained in:
parent
aa11e3a8a6
commit
c972f3b125
|
@ -6897,7 +6897,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||
spin_lock(&kvm->mmu_lock);
|
||||
if (nr_mmu_pages)
|
||||
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
|
||||
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
|
||||
/*
|
||||
* Write protect all pages for dirty logging.
|
||||
* Existing largepage mappings are destroyed here and new ones will
|
||||
* not be created until the end of the logging.
|
||||
*/
|
||||
if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
|
||||
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
/*
|
||||
* If memory slot is created, or moved, we need to clear all
|
||||
|
|
|
@ -817,7 +817,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
|
||||
if (kvm_create_dirty_bitmap(&new) < 0)
|
||||
goto out_free;
|
||||
/* destroy any largepage mappings for dirty tracking */
|
||||
}
|
||||
|
||||
if (!npages || base_gfn != old.base_gfn) {
|
||||
|
|
Loading…
Reference in New Issue