MIPS: KVM: Move commpage so 0x0 is unmapped

The comm page which is mapped into the guest kernel address space at
0x0 has the unfortunate side effect of allowing guest kernel NULL
pointer dereferences to succeed. The only constraint on this address is
that it must be within 32KiB of 0x0, so that single lw/sw instructions
(which have 16-bit signed offset fields) can be used to access it, using
the zero register as a base.

So lets move the comm page as high as possible within that constraint so
that 0x0 can be left unmapped, at least for page sizes < 32KiB.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
James Hogan 2016-06-15 19:29:57 +01:00 committed by Paolo Bonzini
parent 0510870952
commit 42aa12e74e
4 changed files with 20 additions and 14 deletions

View File

@ -74,8 +74,14 @@
/* Special address that contains the comm page, used for reducing # of traps */
#define KVM_GUEST_COMMPAGE_ADDR 0x0
/*
* Special address that contains the comm page, used for reducing # of traps
* This needs to be within 32Kb of 0x0 (so the zero register can be used), but
* preferably not at 0x0 so that most kernel NULL pointer dereferences can be
* caught.
*/
#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
(0x8000 - PAGE_SIZE))
#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))

View File

@ -4,7 +4,7 @@
* for more details.
*
* commpage, currently used for Virtual COP0 registers.
* Mapped into the guest kernel @ 0x0.
* Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>

View File

@ -93,7 +93,7 @@ int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
} else {
mfc0_inst.i_format.opcode = lw_op;
mfc0_inst.i_format.rt = inst.c0r_format.rt;
mfc0_inst.i_format.simmediate =
mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
}
@ -111,7 +111,7 @@ int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
mtc0_inst.i_format.opcode = sw_op;
mtc0_inst.i_format.rt = inst.c0r_format.rt;
mtc0_inst.i_format.simmediate =
mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);

View File

@ -171,23 +171,23 @@ EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write);
int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu)
{
kvm_pfn_t pfn0, pfn1;
kvm_pfn_t pfn;
unsigned long flags, old_entryhi = 0, vaddr = 0;
unsigned long entrylo0 = 0, entrylo1 = 0;
unsigned long entrylo[2] = { 0, 0 };
unsigned int pair_idx;
pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
pfn1 = 0;
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
pfn = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
pair_idx = (badvaddr >> PAGE_SHIFT) & 1;
entrylo[pair_idx] = mips3_paddr_to_tlbpfn(pfn << PAGE_SHIFT) |
(0x3 << ENTRYLO_C_SHIFT) | ENTRYLO_D | ENTRYLO_V;
entrylo1 = 0;
local_irq_save(flags);
old_entryhi = read_c0_entryhi();
vaddr = badvaddr & (PAGE_MASK << 1);
write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
write_c0_entrylo0(entrylo[0]);
write_c0_entrylo1(entrylo[1]);
write_c0_index(kvm_mips_get_commpage_asid(vcpu));
mtc0_tlbw_hazard();
tlb_write_indexed();