KVM: arm64: Use "new" memslot instead of userspace memory region
Get the slot ID, hva, etc... from the "new" memslot instead of the userspace memory region when preparing/committing a memory region. This will allow a future commit to drop @mem from the prepare/commit hooks once all architectures convert to using "new". Opportunistically wait to get the hva begin+end until after filtering out the DELETE case in anticipation of a future commit passing NULL for @new when deleting a memslot. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com> Message-Id: <c019d00c2531520c52e0b52dfda1be5aa898103c.1638817639.git.maciej.szmigiero@oracle.com>
This commit is contained in:
parent
537a17b314
commit
509c594ca2
|
@ -1473,14 +1473,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
* allocated dirty_bitmap[], dirty pages will be tracked while the
|
* allocated dirty_bitmap[], dirty pages will be tracked while the
|
||||||
* memory slot is write protected.
|
* memory slot is write protected.
|
||||||
*/
|
*/
|
||||||
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
||||||
/*
|
/*
|
||||||
* If we're with initial-all-set, we don't need to write
|
* If we're with initial-all-set, we don't need to write
|
||||||
* protect any pages because they're all reported as dirty.
|
* protect any pages because they're all reported as dirty.
|
||||||
* Huge pages and normal pages will be write protect gradually.
|
* Huge pages and normal pages will be write protect gradually.
|
||||||
*/
|
*/
|
||||||
if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
|
if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
|
||||||
kvm_mmu_wp_memory_region(kvm, mem->slot);
|
kvm_mmu_wp_memory_region(kvm, new->id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1491,8 +1491,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *new,
|
struct kvm_memory_slot *new,
|
||||||
enum kvm_mr_change change)
|
enum kvm_mr_change change)
|
||||||
{
|
{
|
||||||
hva_t hva = mem->userspace_addr;
|
hva_t hva, reg_end;
|
||||||
hva_t reg_end = hva + mem->memory_size;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
|
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
|
||||||
|
@ -1506,6 +1505,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
|
if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
hva = new->userspace_addr;
|
||||||
|
reg_end = hva + (new->npages << PAGE_SHIFT);
|
||||||
|
|
||||||
mmap_read_lock(current->mm);
|
mmap_read_lock(current->mm);
|
||||||
/*
|
/*
|
||||||
* A memory region could potentially cover multiple VMAs, and any holes
|
* A memory region could potentially cover multiple VMAs, and any holes
|
||||||
|
|
Loading…
Reference in New Issue