KVM: PPC: booke: Improve timer register emulation
Decrementers are now properly driven by TCR/TSR, and the guest has full read/write access to these registers. The decrementer keeps ticking (and setting the TSR bit) regardless of whether the interrupts are enabled with TCR. The decrementer stops at zero, rather than going negative. Decrementers (and FITs, once implemented) are delivered as level-triggered interrupts -- dequeued when the TSR bit is cleared, not on delivery. Signed-off-by: Liu Yu <yu.liu@freescale.com> [scottwood@freescale.com: significant changes] Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
b59049720d
commit
dfd4d47e9a
|
@ -330,7 +330,7 @@ struct kvm_vcpu_arch {
|
|||
u32 tbl;
|
||||
u32 tbu;
|
||||
u32 tcr;
|
||||
u32 tsr;
|
||||
ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
|
||||
u32 ivor[64];
|
||||
ulong ivpr;
|
||||
u32 pvr;
|
||||
|
|
|
@ -66,6 +66,7 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run,
|
|||
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
|
||||
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
|
||||
extern void kvmppc_decrementer_func(unsigned long data);
|
||||
extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Core-specific hooks */
|
||||
|
|
|
@ -515,3 +515,11 @@ out:
|
|||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
void kvmppc_decrementer_func(unsigned long data)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
|
||||
|
||||
kvmppc_core_queue_dec(vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
}
|
||||
|
|
|
@ -252,9 +252,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
|||
allowed = vcpu->arch.shared->msr & MSR_ME;
|
||||
msr_mask = 0;
|
||||
break;
|
||||
case BOOKE_IRQPRIO_EXTERNAL:
|
||||
case BOOKE_IRQPRIO_DECREMENTER:
|
||||
case BOOKE_IRQPRIO_FIT:
|
||||
keep_irq = true;
|
||||
/* fall through */
|
||||
case BOOKE_IRQPRIO_EXTERNAL:
|
||||
allowed = vcpu->arch.shared->msr & MSR_EE;
|
||||
allowed = allowed && !crit;
|
||||
msr_mask = MSR_CE|MSR_ME|MSR_DE;
|
||||
|
@ -282,11 +284,26 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
|||
return allowed;
|
||||
}
|
||||
|
||||
static void update_timer_ints(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
|
||||
kvmppc_core_queue_dec(vcpu);
|
||||
else
|
||||
kvmppc_core_dequeue_dec(vcpu);
|
||||
}
|
||||
|
||||
static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long *pending = &vcpu->arch.pending_exceptions;
|
||||
unsigned int priority;
|
||||
|
||||
if (vcpu->requests) {
|
||||
if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
|
||||
smp_mb();
|
||||
update_timer_ints(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
priority = __ffs(*pending);
|
||||
while (priority <= BOOKE_IRQPRIO_MAX) {
|
||||
if (kvmppc_booke_irqprio_deliver(vcpu, priority))
|
||||
|
@ -749,25 +766,16 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.shared->esr = sregs->u.e.esr;
|
||||
vcpu->arch.shared->dar = sregs->u.e.dear;
|
||||
vcpu->arch.vrsave = sregs->u.e.vrsave;
|
||||
vcpu->arch.tcr = sregs->u.e.tcr;
|
||||
kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
|
||||
|
||||
if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC)
|
||||
if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
|
||||
vcpu->arch.dec = sregs->u.e.dec;
|
||||
|
||||
kvmppc_emulate_dec(vcpu);
|
||||
}
|
||||
|
||||
if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
|
||||
/*
|
||||
* FIXME: existing KVM timer handling is incomplete.
|
||||
* TSR cannot be read by the guest, and its value in
|
||||
* vcpu->arch is always zero. For now, just handle
|
||||
* the case where the caller is trying to inject a
|
||||
* decrementer interrupt.
|
||||
*/
|
||||
|
||||
if ((sregs->u.e.tsr & TSR_DIS) &&
|
||||
(vcpu->arch.tcr & TCR_DIE))
|
||||
kvmppc_core_queue_dec(vcpu);
|
||||
vcpu->arch.tsr = sregs->u.e.tsr;
|
||||
update_timer_ints(vcpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -923,6 +931,33 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
|
|||
{
|
||||
}
|
||||
|
||||
void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
|
||||
{
|
||||
vcpu->arch.tcr = new_tcr;
|
||||
update_timer_ints(vcpu);
|
||||
}
|
||||
|
||||
void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
|
||||
{
|
||||
set_bits(tsr_bits, &vcpu->arch.tsr);
|
||||
smp_wmb();
|
||||
kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
}
|
||||
|
||||
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
|
||||
{
|
||||
clear_bits(tsr_bits, &vcpu->arch.tsr);
|
||||
update_timer_ints(vcpu);
|
||||
}
|
||||
|
||||
void kvmppc_decrementer_func(unsigned long data)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
|
||||
|
||||
kvmppc_set_tsr_bits(vcpu, TSR_DIS);
|
||||
}
|
||||
|
||||
int __init kvmppc_booke_init(void)
|
||||
{
|
||||
unsigned long ivor[16];
|
||||
|
|
|
@ -55,6 +55,10 @@ extern unsigned long kvmppc_booke_handlers;
|
|||
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
|
||||
void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
|
||||
|
||||
void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
|
||||
void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
|
||||
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
|
||||
|
||||
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance);
|
||||
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
* Copyright 2011 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
||||
*/
|
||||
|
@ -115,10 +116,10 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
|
|||
case SPRN_DBSR:
|
||||
vcpu->arch.dbsr &= ~spr_val; break;
|
||||
case SPRN_TSR:
|
||||
vcpu->arch.tsr &= ~spr_val; break;
|
||||
kvmppc_clr_tsr_bits(vcpu, spr_val);
|
||||
break;
|
||||
case SPRN_TCR:
|
||||
vcpu->arch.tcr = spr_val;
|
||||
kvmppc_emulate_dec(vcpu);
|
||||
kvmppc_set_tcr(vcpu, spr_val);
|
||||
break;
|
||||
|
||||
/* Note: SPRG4-7 are user-readable. These values are
|
||||
|
@ -209,6 +210,10 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
|
|||
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
|
||||
case SPRN_DBSR:
|
||||
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
|
||||
case SPRN_TSR:
|
||||
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break;
|
||||
case SPRN_TCR:
|
||||
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break;
|
||||
|
||||
case SPRN_IVOR0:
|
||||
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
* Copyright 2011 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
||||
*/
|
||||
|
@ -69,42 +70,37 @@
|
|||
#define OP_STH 44
|
||||
#define OP_STHU 45
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
|
||||
return (vcpu->arch.tcr & TCR_DIE) && vcpu->arch.dec;
|
||||
}
|
||||
#endif
|
||||
|
||||
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long dec_nsec;
|
||||
unsigned long long dec_time;
|
||||
|
||||
pr_debug("mtDEC: %x\n", vcpu->arch.dec);
|
||||
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
/* mtdec lowers the interrupt line when positive. */
|
||||
kvmppc_core_dequeue_dec(vcpu);
|
||||
|
||||
/* POWER4+ triggers a dec interrupt if the value is < 0 */
|
||||
if (vcpu->arch.dec & 0x80000000) {
|
||||
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
|
||||
kvmppc_core_queue_dec(vcpu);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if (kvmppc_dec_enabled(vcpu)) {
|
||||
/* The decrementer ticks at the same rate as the timebase, so
|
||||
* that's how we convert the guest DEC value to the number of
|
||||
* host ticks. */
|
||||
|
||||
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
|
||||
#ifdef CONFIG_BOOKE
|
||||
/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
|
||||
if (vcpu->arch.dec == 0)
|
||||
return;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The decrementer ticks at the same rate as the timebase, so
|
||||
* that's how we convert the guest DEC value to the number of
|
||||
* host ticks.
|
||||
*/
|
||||
|
||||
dec_time = vcpu->arch.dec;
|
||||
dec_time *= 1000;
|
||||
do_div(dec_time, tb_ticks_per_usec);
|
||||
|
@ -112,14 +108,17 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
|
|||
hrtimer_start(&vcpu->arch.dec_timer,
|
||||
ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
|
||||
vcpu->arch.dec_jiffies = get_tb();
|
||||
} else {
|
||||
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
|
||||
}
|
||||
}
|
||||
|
||||
u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
|
||||
{
|
||||
u64 jd = tb - vcpu->arch.dec_jiffies;
|
||||
|
||||
#ifdef CONFIG_BOOKE
|
||||
if (vcpu->arch.dec < jd)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
return vcpu->arch.dec - jd;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,8 @@
|
|||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
||||
{
|
||||
return !(v->arch.shared->msr & MSR_WE) ||
|
||||
!!(v->arch.pending_exceptions);
|
||||
!!(v->arch.pending_exceptions) ||
|
||||
v->requests;
|
||||
}
|
||||
|
||||
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
|
||||
|
@ -311,18 +312,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
|||
return kvmppc_core_pending_dec(vcpu);
|
||||
}
|
||||
|
||||
static void kvmppc_decrementer_func(unsigned long data)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
|
||||
|
||||
kvmppc_core_queue_dec(vcpu);
|
||||
|
||||
if (waitqueue_active(vcpu->arch.wqp)) {
|
||||
wake_up_interruptible(vcpu->arch.wqp);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* low level hrtimer wake routine. Because this runs in hardirq context
|
||||
* we schedule a tasklet to do the real work.
|
||||
|
@ -567,6 +556,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
return r;
|
||||
}
|
||||
|
||||
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (waitqueue_active(&vcpu->wq)) {
|
||||
wake_up_interruptible(vcpu->arch.wqp);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
} else if (vcpu->cpu != -1) {
|
||||
smp_send_reschedule(vcpu->cpu);
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
|
||||
{
|
||||
if (irq->irq == KVM_INTERRUPT_UNSET) {
|
||||
|
@ -575,13 +574,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
|
|||
}
|
||||
|
||||
kvmppc_core_queue_external(vcpu, irq);
|
||||
|
||||
if (waitqueue_active(vcpu->arch.wqp)) {
|
||||
wake_up_interruptible(vcpu->arch.wqp);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
} else if (vcpu->cpu != -1) {
|
||||
smp_send_reschedule(vcpu->cpu);
|
||||
}
|
||||
kvm_vcpu_kick(vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue