KVM: arm64: timers: Convert per-vcpu virtual offset to a global value
Having a per-vcpu virtual offset is a pain. It needs to be synchronized on each update, and expands badly to a setup where different timers can have different offsets, or have composite offsets (as with NV). So let's start by replacing the use of the CNTVOFF_EL2 shadow register (which we want to reclaim for NV anyway), and make the virtual timer carry a pointer to a VM-wide offset. This simplifies the code significantly. It also addresses two terrible bugs: - The use of CNTVOFF_EL2 leads to some nice offset corruption when the sysreg gets reset, as reported by Joey. - The kvm mutex is taken from a vcpu ioctl, which goes against the locking rules... Reported-by: Joey Gouly <joey.gouly@arm.com> Reviewed-by: Reiji Watanabe <reijiw@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20230224173915.GA17407@e124191.cambridge.arm.com Tested-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20230224191640.3396734-1-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
fe15c26ee2
commit
47053904e1
|
@ -193,6 +193,9 @@ struct kvm_arch {
|
|||
/* Interrupt controller */
|
||||
struct vgic_dist vgic;
|
||||
|
||||
/* Timers */
|
||||
struct arch_timer_vm_data timer_data;
|
||||
|
||||
/* Mandated version of PSCI */
|
||||
u32 psci_version;
|
||||
|
||||
|
|
|
@ -84,14 +84,10 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
|
|||
|
||||
static u64 timer_get_offset(struct arch_timer_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
if (ctxt->offset.vm_offset)
|
||||
return *ctxt->offset.vm_offset;
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
||||
|
@ -128,15 +124,12 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
|||
|
||||
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
|
||||
break;
|
||||
default:
|
||||
if (!ctxt->offset.vm_offset) {
|
||||
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
|
||||
return;
|
||||
}
|
||||
|
||||
WRITE_ONCE(*ctxt->offset.vm_offset, offset);
|
||||
}
|
||||
|
||||
u64 kvm_phys_timer_read(void)
|
||||
|
@ -765,25 +758,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Make the updates of cntvoff for all vtimer contexts atomic */
|
||||
static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
|
||||
{
|
||||
unsigned long i;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_vcpu *tmp;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_for_each_vcpu(i, tmp, kvm)
|
||||
timer_set_offset(vcpu_vtimer(tmp), cntvoff);
|
||||
|
||||
/*
|
||||
* When called from the vcpu create path, the CPU being created is not
|
||||
* included in the loop above, so we just set it here as well.
|
||||
*/
|
||||
timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
|
@ -791,10 +765,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
|
|||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
|
||||
vtimer->vcpu = vcpu;
|
||||
vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset;
|
||||
ptimer->vcpu = vcpu;
|
||||
|
||||
/* Synchronize cntvoff across all vtimers of a VM. */
|
||||
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
|
||||
timer_set_offset(vtimer, kvm_phys_timer_read());
|
||||
timer_set_offset(ptimer, 0);
|
||||
|
||||
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
|
||||
|
@ -840,7 +815,7 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
|
|||
break;
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
|
||||
timer_set_offset(timer, kvm_phys_timer_read() - value);
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
|
|
|
@ -44,7 +44,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
|
|||
feature = smccc_get_arg1(vcpu);
|
||||
switch (feature) {
|
||||
case KVM_PTP_VIRT_COUNTER:
|
||||
cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2);
|
||||
cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
|
||||
break;
|
||||
case KVM_PTP_PHYS_COUNTER:
|
||||
cycles = systime_snapshot.cycles;
|
||||
|
|
|
@ -23,6 +23,19 @@ enum kvm_arch_timer_regs {
|
|||
TIMER_REG_CTL,
|
||||
};
|
||||
|
||||
struct arch_timer_offset {
|
||||
/*
|
||||
* If set, pointer to one of the offsets in the kvm's offset
|
||||
* structure. If NULL, assume a zero offset.
|
||||
*/
|
||||
u64 *vm_offset;
|
||||
};
|
||||
|
||||
struct arch_timer_vm_data {
|
||||
/* Offset applied to the virtual timer/counter */
|
||||
u64 voffset;
|
||||
};
|
||||
|
||||
struct arch_timer_context {
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
|
@ -32,6 +45,8 @@ struct arch_timer_context {
|
|||
/* Emulated Timer (may be unused) */
|
||||
struct hrtimer hrtimer;
|
||||
|
||||
/* Offset for this counter/timer */
|
||||
struct arch_timer_offset offset;
|
||||
/*
|
||||
* We have multiple paths which can save/restore the timer state onto
|
||||
* the hardware, so we need some way of keeping track of where the
|
||||
|
|
Loading…
Reference in New Issue