KVM: arm/arm64: Remove arch timer workqueue

The use of a work queue in the hrtimer expire function for the bg_timer
is a leftover from the time when we would inject interrupts when the
bg_timer expired.

Since we are no longer doing that, we can instead call
kvm_vcpu_wake_up() directly from the hrtimer function and remove all
workqueue functionality from the arch timer code.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
Christoffer Dall 2018-11-27 13:48:08 +01:00 committed by Marc Zyngier
parent 71a7e47f39
commit 8a411b060f
2 changed files with 7 additions and 31 deletions

View File

@ -21,7 +21,6 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/workqueue.h>
struct arch_timer_context { struct arch_timer_context {
/* Registers: control register, timer value */ /* Registers: control register, timer value */
@ -52,9 +51,6 @@ struct arch_timer_cpu {
/* Background timer used when the guest is not running */ /* Background timer used when the guest is not running */
struct hrtimer bg_timer; struct hrtimer bg_timer;
/* Work queued with the above timer expires */
struct work_struct expired;
/* Physical timer emulation */ /* Physical timer emulation */
struct hrtimer phys_timer; struct hrtimer phys_timer;

View File

@ -70,11 +70,9 @@ static void soft_timer_start(struct hrtimer *hrt, u64 ns)
HRTIMER_MODE_ABS); HRTIMER_MODE_ABS);
} }
static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) static void soft_timer_cancel(struct hrtimer *hrt)
{ {
hrtimer_cancel(hrt); hrtimer_cancel(hrt);
if (work)
cancel_work_sync(work);
} }
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
@ -102,23 +100,6 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/*
* Work function for handling the backup timer that we schedule when a vcpu is
* no longer running, but had a timer programmed to fire in the future.
*/
static void kvm_timer_inject_irq_work(struct work_struct *work)
{
struct kvm_vcpu *vcpu;
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
/*
* If the vcpu is blocked we want to wake it up so that it will see
* the timer has expired when entering the guest.
*/
kvm_vcpu_wake_up(vcpu);
}
static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
{ {
u64 cval, now; u64 cval, now;
@ -188,7 +169,7 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
return HRTIMER_RESTART; return HRTIMER_RESTART;
} }
schedule_work(&timer->expired); kvm_vcpu_wake_up(vcpu);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
@ -300,7 +281,7 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu)
* then we also don't need a soft timer. * then we also don't need a soft timer.
*/ */
if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) { if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) {
soft_timer_cancel(&timer->phys_timer, NULL); soft_timer_cancel(&timer->phys_timer);
return; return;
} }
@ -426,7 +407,7 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
vtimer_restore_state(vcpu); vtimer_restore_state(vcpu);
soft_timer_cancel(&timer->bg_timer, &timer->expired); soft_timer_cancel(&timer->bg_timer);
} }
static void set_cntvoff(u64 cntvoff) static void set_cntvoff(u64 cntvoff)
@ -544,7 +525,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
* In any case, we re-schedule the hrtimer for the physical timer when * In any case, we re-schedule the hrtimer for the physical timer when
* coming back to the VCPU thread in kvm_timer_vcpu_load(). * coming back to the VCPU thread in kvm_timer_vcpu_load().
*/ */
soft_timer_cancel(&timer->phys_timer, NULL); soft_timer_cancel(&timer->phys_timer);
/* /*
* The kernel may decide to run userspace after calling vcpu_put, so * The kernel may decide to run userspace after calling vcpu_put, so
@ -637,7 +618,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
vcpu_ptimer(vcpu)->cntvoff = 0; vcpu_ptimer(vcpu)->cntvoff = 0;
INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
timer->bg_timer.function = kvm_bg_timer_expire; timer->bg_timer.function = kvm_bg_timer_expire;
@ -794,8 +774,8 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
soft_timer_cancel(&timer->bg_timer, &timer->expired); soft_timer_cancel(&timer->bg_timer);
soft_timer_cancel(&timer->phys_timer, NULL); soft_timer_cancel(&timer->phys_timer);
kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq); kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
} }