KVM/MIPS32: Wrap calls to gfn_to_pfn() with srcu_read_lock/unlock()

- As suggested by Gleb, wrap calls to gfn_to_pfn() with srcu_read_lock/unlock().
  Memory slots should be acccessed from a SRCU read section.
- kvm_mips_map_page() now returns an error code to it's callers, instead of
  calling panic() if it cannot find a mapping for a particular gfn.

Signed-off-by: Sanjay Lal <sanjayl@kymasys.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
This commit is contained in:
Sanjay Lal 2013-05-18 06:54:24 -07:00 committed by Gleb Natapov
parent ba86e4dda7
commit 6d17c0d1e8
1 changed files with 26 additions and 9 deletions

View File

@ -17,6 +17,8 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/srcu.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
@ -169,21 +171,27 @@ void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
} }
} }
static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
{ {
int srcu_idx, err = 0;
pfn_t pfn; pfn_t pfn;
if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
return; return 0;
srcu_idx = srcu_read_lock(&kvm->srcu);
pfn = kvm_mips_gfn_to_pfn(kvm, gfn); pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
if (kvm_mips_is_error_pfn(pfn)) { if (kvm_mips_is_error_pfn(pfn)) {
panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
err = -EFAULT;
goto out;
} }
kvm->arch.guest_pmap[gfn] = pfn; kvm->arch.guest_pmap[gfn] = pfn;
return; out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return err;
} }
/* Translate guest KSEG0 addresses to Host PA */ /* Translate guest KSEG0 addresses to Host PA */
@ -207,7 +215,10 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
gva); gva);
return KVM_INVALID_PAGE; return KVM_INVALID_PAGE;
} }
kvm_mips_map_page(vcpu->kvm, gfn);
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
return KVM_INVALID_ADDR;
return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
} }
@ -310,8 +321,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
even = !(gfn & 0x1); even = !(gfn & 0x1);
vaddr = badvaddr & (PAGE_MASK << 1); vaddr = badvaddr & (PAGE_MASK << 1);
kvm_mips_map_page(vcpu->kvm, gfn); if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1); return -1;
if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
return -1;
if (even) { if (even) {
pfn0 = kvm->arch.guest_pmap[gfn]; pfn0 = kvm->arch.guest_pmap[gfn];
@ -389,8 +403,11 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
pfn0 = 0; pfn0 = 0;
pfn1 = 0; pfn1 = 0;
} else { } else {
kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT); if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT); return -1;
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
return -1;
pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];