MIPS: KVM: Remove ifdef DEBUG around kvm_debug

kvm_debug() uses pr_debug() which is already compiled out in the absence
of a DEBUG define, so remove the unnecessary ifdef DEBUG lines around
kvm_debug() calls which are littered around arch/mips/kvm/.

As well as generally cleaning up, this prevents future bit-rot due to
DEBUG not being commonly used.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: kvm@vger.kernel.org
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: Sanjay Lal <sanjayl@kymasys.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
James Hogan 2014-05-29 10:16:42 +01:00 committed by Paolo Bonzini
parent 3d65483371
commit d5c704d525
3 changed files with 0 additions and 28 deletions

View File

@ -2319,11 +2319,9 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
er = EMULATE_FAIL; er = EMULATE_FAIL;
} }
} else { } else {
#ifdef DEBUG
kvm_debug kvm_debug
("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
#endif
/* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
NULL); NULL);

View File

@ -232,11 +232,9 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
tlb_write_indexed(); tlb_write_indexed();
tlbw_use_hazard(); tlbw_use_hazard();
#ifdef DEBUG
kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
vcpu->arch.pc, idx, read_c0_entryhi(), vcpu->arch.pc, idx, read_c0_entryhi(),
read_c0_entrylo0(), read_c0_entrylo1()); read_c0_entrylo0(), read_c0_entrylo1());
#endif
/* Flush D-cache */ /* Flush D-cache */
if (flush_dcache_mask) { if (flush_dcache_mask) {
@ -343,11 +341,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
mtc0_tlbw_hazard(); mtc0_tlbw_hazard();
tlbw_use_hazard(); tlbw_use_hazard();
#ifdef DEBUG
kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
read_c0_entrylo0(), read_c0_entrylo1()); read_c0_entrylo0(), read_c0_entrylo1());
#endif
/* Restore old ASID */ /* Restore old ASID */
write_c0_entryhi(old_entryhi); write_c0_entryhi(old_entryhi);
@ -395,10 +391,8 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
#ifdef DEBUG
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
tlb->tlb_lo0, tlb->tlb_lo1); tlb->tlb_lo0, tlb->tlb_lo1);
#endif
return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
tlb->tlb_mask); tlb->tlb_mask);
@ -419,10 +413,8 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
} }
} }
#ifdef DEBUG
kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
__func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1); __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
#endif
return index; return index;
} }
@ -456,9 +448,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
local_irq_restore(flags); local_irq_restore(flags);
#ifdef DEBUG
kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
#endif
return idx; return idx;
} }
@ -503,11 +493,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
local_irq_restore(flags); local_irq_restore(flags);
#ifdef DEBUG
if (idx > 0) if (idx > 0)
kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
(va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx); (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
#endif
return 0; return 0;
} }
@ -675,9 +663,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
unsigned long flags; unsigned long flags;
int newasid = 0; int newasid = 0;
#ifdef DEBUG
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
#endif
/* Alocate new kernel and user ASIDs if needed */ /* Alocate new kernel and user ASIDs if needed */

View File

@ -32,9 +32,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
gpa = KVM_INVALID_ADDR; gpa = KVM_INVALID_ADDR;
} }
#ifdef DEBUG
kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
#endif
return gpa; return gpa;
} }
@ -85,11 +83,9 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
#ifdef DEBUG
kvm_debug kvm_debug
("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr); cause, opc, badvaddr);
#endif
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
@ -138,11 +134,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
} }
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
#ifdef DEBUG
kvm_debug kvm_debug
("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr); cause, opc, badvaddr);
#endif
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
ret = RESUME_GUEST; ret = RESUME_GUEST;
@ -188,10 +182,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
} }
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
#ifdef DEBUG
kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
vcpu->arch.pc, badvaddr); vcpu->arch.pc, badvaddr);
#endif
/* User Address (UA) fault, this could happen if /* User Address (UA) fault, this could happen if
* (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
@ -236,9 +228,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
if (KVM_GUEST_KERNEL_MODE(vcpu) if (KVM_GUEST_KERNEL_MODE(vcpu)
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
#ifdef DEBUG
kvm_debug("Emulate Store to MMIO space\n"); kvm_debug("Emulate Store to MMIO space\n");
#endif
er = kvm_mips_emulate_inst(cause, opc, run, vcpu); er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
if (er == EMULATE_FAIL) { if (er == EMULATE_FAIL) {
printk("Emulate Store to MMIO space failed\n"); printk("Emulate Store to MMIO space failed\n");
@ -268,9 +258,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
#ifdef DEBUG
kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
#endif
er = kvm_mips_emulate_inst(cause, opc, run, vcpu); er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
if (er == EMULATE_FAIL) { if (er == EMULATE_FAIL) {
printk("Emulate Load from MMIO space failed\n"); printk("Emulate Load from MMIO space failed\n");