KVM: PPC: Book3S: Replace current->mm by kvm->mm
Given that in kvm_create_vm() there is: kvm->mm = current->mm; And that on every kvm_*_ioctl we have: if (kvm->mm != current->mm) return -EIO; I see no reason to keep using current->mm instead of kvm->mm. By doing so, we would reduce the use of 'global' variables on code, relying more in the contents of kvm struct. Signed-off-by: Leonardo Bras <leonardo@linux.ibm.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
parent
4de0a83554
commit
8a9c892514
|
@ -284,7 +284,7 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
|
|||
/* Protect linux PTE lookup from page table destruction */
|
||||
rcu_read_lock_sched(); /* this disables preemption too */
|
||||
ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
|
||||
current->mm->pgd, false, pte_idx_ret);
|
||||
kvm->mm->pgd, false, pte_idx_ret);
|
||||
rcu_read_unlock_sched();
|
||||
if (ret == H_TOO_HARD) {
|
||||
/* this can't happen */
|
||||
|
@ -573,7 +573,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
is_ci = false;
|
||||
pfn = 0;
|
||||
page = NULL;
|
||||
mm = current->mm;
|
||||
mm = kvm->mm;
|
||||
pte_size = PAGE_SIZE;
|
||||
writing = (dsisr & DSISR_ISSTORE) != 0;
|
||||
/* If writing != 0, then the HPTE must allow writing, if we get here */
|
||||
|
|
|
@ -253,10 +253,11 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
|
|||
}
|
||||
}
|
||||
|
||||
account_locked_vm(kvm->mm,
|
||||
kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
|
||||
|
||||
kvm_put_kvm(stt->kvm);
|
||||
|
||||
account_locked_vm(current->mm,
|
||||
kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
|
||||
call_rcu(&stt->rcu, release_spapr_tce_table);
|
||||
|
||||
return 0;
|
||||
|
@ -272,6 +273,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
{
|
||||
struct kvmppc_spapr_tce_table *stt = NULL;
|
||||
struct kvmppc_spapr_tce_table *siter;
|
||||
struct mm_struct *mm = kvm->mm;
|
||||
unsigned long npages, size = args->size;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
|
@ -280,7 +282,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
return -EINVAL;
|
||||
|
||||
npages = kvmppc_tce_pages(size);
|
||||
ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
|
||||
ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -326,7 +328,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
|
||||
kfree(stt);
|
||||
fail_acct:
|
||||
account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
|
||||
account_locked_vm(mm, kvmppc_stt_pages(npages), false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -4285,7 +4285,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
user_vrsave = mfspr(SPRN_VRSAVE);
|
||||
|
||||
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
|
||||
vcpu->arch.pgdir = current->mm->pgd;
|
||||
vcpu->arch.pgdir = kvm->mm->pgd;
|
||||
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
||||
|
||||
do {
|
||||
|
@ -4640,14 +4640,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|||
|
||||
/* Look up the VMA for the start of this memory slot */
|
||||
hva = memslot->userspace_addr;
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
vma = find_vma(current->mm, hva);
|
||||
down_read(&kvm->mm->mmap_sem);
|
||||
vma = find_vma(kvm->mm, hva);
|
||||
if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
|
||||
goto up_out;
|
||||
|
||||
psize = vma_kernel_pagesize(vma);
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
up_read(&kvm->mm->mmap_sem);
|
||||
|
||||
/* We can handle 4k, 64k or 16M pages in the VRMA */
|
||||
if (psize >= 0x1000000)
|
||||
|
@ -4680,7 +4680,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|||
return err;
|
||||
|
||||
up_out:
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
up_read(&kvm->mm->mmap_sem);
|
||||
goto out_srcu;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue