OpenCloudOS-Kernel/arch/arm64/kvm/arch_timer.c

1172 lines
29 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 ARM Ltd.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/cpu.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/uaccess.h>
#include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
#include "trace.h"
static struct timecounter *timecounter;
static unsigned int host_vtimer_irq;
static unsigned int host_ptimer_irq;
static u32 host_vtimer_irq_flags;
static u32 host_ptimer_irq_flags;
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
static const struct kvm_irq_level default_ptimer_irq = {
.irq = 30,
.level = 1,
};
static const struct kvm_irq_level default_vtimer_irq = {
.irq = 27,
.level = 1,
};
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
struct arch_timer_context *timer_ctx);
static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg,
u64 val);
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg);
u64 kvm_phys_timer_read(void)
{
return timecounter->cc->read(timecounter->cc);
}
static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
{
if (has_vhe()) {
map->direct_vtimer = vcpu_vtimer(vcpu);
map->direct_ptimer = vcpu_ptimer(vcpu);
map->emul_ptimer = NULL;
} else {
map->direct_vtimer = vcpu_vtimer(vcpu);
map->direct_ptimer = NULL;
map->emul_ptimer = vcpu_ptimer(vcpu);
}
trace_kvm_get_timer_map(vcpu->vcpu_id, map);
}
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
static inline bool userspace_irqchip(struct kvm *kvm)
{
return static_branch_unlikely(&userspace_irqchip_in_use) &&
unlikely(!irqchip_in_kernel(kvm));
}
static void soft_timer_start(struct hrtimer *hrt, u64 ns)
{
hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
HRTIMER_MODE_ABS_HARD);
}
static void soft_timer_cancel(struct hrtimer *hrt)
{
hrtimer_cancel(hrt);
}
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
struct arch_timer_context *ctx;
struct timer_map map;
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
/*
* We may see a timer interrupt after vcpu_put() has been called which
* sets the CPU's vcpu pointer to NULL, because even though the timer
* has been disabled in timer_save_state(), the hardware interrupt
* signal may not have been retired from the interrupt controller yet.
*/
if (!vcpu)
return IRQ_HANDLED;
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
get_timer_map(vcpu, &map);
if (irq == host_vtimer_irq)
ctx = map.direct_vtimer;
else
ctx = map.direct_ptimer;
if (kvm_timer_should_fire(ctx))
kvm_timer_update_irq(vcpu, true, ctx);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
if (userspace_irqchip(vcpu->kvm) &&
!static_branch_unlikely(&has_gic_active_state))
disable_percpu_irq(host_vtimer_irq);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
return IRQ_HANDLED;
}
static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
{
u64 cval, now;
cval = timer_ctx->cnt_cval;
now = kvm_phys_timer_read() - timer_ctx->cntvoff;
if (now < cval) {
u64 ns;
ns = cyclecounter_cyc2ns(timecounter->cc,
cval - now,
timecounter->mask,
&timecounter->frac);
return ns;
}
return 0;
}
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
WARN_ON(timer_ctx && timer_ctx->loaded);
return timer_ctx &&
!(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
}
/*
* Returns the earliest expiration time in ns among guest timers.
* Note that it will return 0 if none of timers can fire.
*/
static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
{
u64 min_delta = ULLONG_MAX;
int i;
for (i = 0; i < NR_KVM_TIMERS; i++) {
struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
WARN(ctx->loaded, "timer %d loaded\n", i);
if (kvm_timer_irq_can_fire(ctx))
min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
}
/* If none of timers can fire, then return 0 */
if (min_delta == ULLONG_MAX)
return 0;
return min_delta;
}
static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
{
struct arch_timer_cpu *timer;
struct kvm_vcpu *vcpu;
u64 ns;
timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
/*
* Check that the timer has really expired from the guest's
* PoV (NTP on the host may have forced it to expire
* early). If we should have slept longer, restart it.
*/
ns = kvm_timer_earliest_exp(vcpu);
if (unlikely(ns)) {
hrtimer_forward_now(hrt, ns_to_ktime(ns));
return HRTIMER_RESTART;
}
kvm_vcpu_wake_up(vcpu);
return HRTIMER_NORESTART;
}
static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
{
struct arch_timer_context *ctx;
struct kvm_vcpu *vcpu;
u64 ns;
ctx = container_of(hrt, struct arch_timer_context, hrtimer);
vcpu = ctx->vcpu;
trace_kvm_timer_hrtimer_expire(ctx);
/*
* Check that the timer has really expired from the guest's
* PoV (NTP on the host may have forced it to expire
* early). If not ready, schedule for a later time.
*/
ns = kvm_timer_compute_delta(ctx);
if (unlikely(ns)) {
hrtimer_forward_now(hrt, ns_to_ktime(ns));
return HRTIMER_RESTART;
}
kvm_timer_update_irq(vcpu, true, ctx);
return HRTIMER_NORESTART;
}
static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
{
enum kvm_arch_timers index;
u64 cval, now;
if (!timer_ctx)
return false;
index = arch_timer_ctx_index(timer_ctx);
if (timer_ctx->loaded) {
u32 cnt_ctl = 0;
switch (index) {
case TIMER_VTIMER:
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
break;
case TIMER_PTIMER:
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
break;
case NR_KVM_TIMERS:
/* GCC is braindead */
cnt_ctl = 0;
break;
}
2018-01-25 21:20:19 +08:00
return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
(cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
!(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
}
if (!kvm_timer_irq_can_fire(timer_ctx))
return false;
cval = timer_ctx->cnt_cval;
now = kvm_phys_timer_read() - timer_ctx->cntvoff;
return cval <= now;
}
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
{
struct timer_map map;
get_timer_map(vcpu, &map);
return kvm_timer_should_fire(map.direct_vtimer) ||
kvm_timer_should_fire(map.direct_ptimer) ||
kvm_timer_should_fire(map.emul_ptimer);
}
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
/*
* Reflect the timer output level into the kvm_run structure
*/
void kvm_timer_update_run(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
struct kvm_sync_regs *regs = &vcpu->run->s.regs;
/* Populate the device bitmap with the timer states */
regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
KVM_ARM_DEV_EL1_PTIMER);
2018-01-25 21:20:19 +08:00
if (kvm_timer_should_fire(vtimer))
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
2018-01-25 21:20:19 +08:00
if (kvm_timer_should_fire(ptimer))
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
}
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
struct arch_timer_context *timer_ctx)
arm/arm64: KVM: Rework the arch timer to use level-triggered semantics The arch timer currently uses edge-triggered semantics in the sense that the line is never sampled by the vgic and lowering the line from the timer to the vgic doesn't have any effect on the pending state of virtual interrupts in the vgic. This means that we do not support a guest with the otherwise valid behavior of (1) disable interrupts (2) enable the timer (3) disable the timer (4) enable interrupts. Such a guest would validly not expect to see any interrupts on real hardware, but will see interrupts on KVM. This patch fixes this shortcoming through the following series of changes. First, we change the flow of the timer/vgic sync/flush operations. Now the timer is always flushed/synced before the vgic, because the vgic samples the state of the timer output. This has the implication that we move the timer operations in to non-preempible sections, but that is fine after the previous commit getting rid of hrtimer schedules on every entry/exit. Second, we change the internal behavior of the timer, letting the timer keep track of its previous output state, and only lower/raise the line to the vgic when the state changes. Note that in theory this could have been accomplished more simply by signalling the vgic every time the state *potentially* changed, but we don't want to be hitting the vgic more often than necessary. Third, we get rid of the use of the map->active field in the vgic and instead simply set the interrupt as active on the physical distributor whenever the input to the GIC is asserted and conversely clear the physical active state when the input to the GIC is deasserted. Fourth, and finally, we now initialize the timer PPIs (and all the other unused PPIs for now), to be level-triggered, and modify the sync code to sample the line state on HW sync and re-inject a new interrupt if it is still pending at that time. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2015-08-30 21:01:27 +08:00
{
int ret;
timer_ctx->irq.level = new_level;
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
timer_ctx->irq.level);
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
if (!userspace_irqchip(vcpu->kvm)) {
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
timer_ctx->irq.irq,
timer_ctx->irq.level,
timer_ctx);
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
WARN_ON(ret);
}
arm/arm64: KVM: Rework the arch timer to use level-triggered semantics The arch timer currently uses edge-triggered semantics in the sense that the line is never sampled by the vgic and lowering the line from the timer to the vgic doesn't have any effect on the pending state of virtual interrupts in the vgic. This means that we do not support a guest with the otherwise valid behavior of (1) disable interrupts (2) enable the timer (3) disable the timer (4) enable interrupts. Such a guest would validly not expect to see any interrupts on real hardware, but will see interrupts on KVM. This patch fixes this shortcoming through the following series of changes. First, we change the flow of the timer/vgic sync/flush operations. Now the timer is always flushed/synced before the vgic, because the vgic samples the state of the timer output. This has the implication that we move the timer operations in to non-preempible sections, but that is fine after the previous commit getting rid of hrtimer schedules on every entry/exit. Second, we change the internal behavior of the timer, letting the timer keep track of its previous output state, and only lower/raise the line to the vgic when the state changes. Note that in theory this could have been accomplished more simply by signalling the vgic every time the state *potentially* changed, but we don't want to be hitting the vgic more often than necessary. Third, we get rid of the use of the map->active field in the vgic and instead simply set the interrupt as active on the physical distributor whenever the input to the GIC is asserted and conversely clear the physical active state when the input to the GIC is deasserted. Fourth, and finally, we now initialize the timer PPIs (and all the other unused PPIs for now), to be level-triggered, and modify the sync code to sample the line state on HW sync and re-inject a new interrupt if it is still pending at that time. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2015-08-30 21:01:27 +08:00
}
/* Only called for a fully emulated timer */
static void timer_emulate(struct arch_timer_context *ctx)
{
bool should_fire = kvm_timer_should_fire(ctx);
trace_kvm_timer_emulate(ctx, should_fire);
arm/arm64: KVM: Rework the arch timer to use level-triggered semantics The arch timer currently uses edge-triggered semantics in the sense that the line is never sampled by the vgic and lowering the line from the timer to the vgic doesn't have any effect on the pending state of virtual interrupts in the vgic. This means that we do not support a guest with the otherwise valid behavior of (1) disable interrupts (2) enable the timer (3) disable the timer (4) enable interrupts. Such a guest would validly not expect to see any interrupts on real hardware, but will see interrupts on KVM. This patch fixes this shortcoming through the following series of changes. First, we change the flow of the timer/vgic sync/flush operations. Now the timer is always flushed/synced before the vgic, because the vgic samples the state of the timer output. This has the implication that we move the timer operations in to non-preempible sections, but that is fine after the previous commit getting rid of hrtimer schedules on every entry/exit. Second, we change the internal behavior of the timer, letting the timer keep track of its previous output state, and only lower/raise the line to the vgic when the state changes. Note that in theory this could have been accomplished more simply by signalling the vgic every time the state *potentially* changed, but we don't want to be hitting the vgic more often than necessary. Third, we get rid of the use of the map->active field in the vgic and instead simply set the interrupt as active on the physical distributor whenever the input to the GIC is asserted and conversely clear the physical active state when the input to the GIC is deasserted. Fourth, and finally, we now initialize the timer PPIs (and all the other unused PPIs for now), to be level-triggered, and modify the sync code to sample the line state on HW sync and re-inject a new interrupt if it is still pending at that time. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2015-08-30 21:01:27 +08:00
if (should_fire != ctx->irq.level) {
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
return;
}
arm/arm64: KVM: Rework the arch timer to use level-triggered semantics The arch timer currently uses edge-triggered semantics in the sense that the line is never sampled by the vgic and lowering the line from the timer to the vgic doesn't have any effect on the pending state of virtual interrupts in the vgic. This means that we do not support a guest with the otherwise valid behavior of (1) disable interrupts (2) enable the timer (3) disable the timer (4) enable interrupts. Such a guest would validly not expect to see any interrupts on real hardware, but will see interrupts on KVM. This patch fixes this shortcoming through the following series of changes. First, we change the flow of the timer/vgic sync/flush operations. Now the timer is always flushed/synced before the vgic, because the vgic samples the state of the timer output. This has the implication that we move the timer operations in to non-preempible sections, but that is fine after the previous commit getting rid of hrtimer schedules on every entry/exit. Second, we change the internal behavior of the timer, letting the timer keep track of its previous output state, and only lower/raise the line to the vgic when the state changes. Note that in theory this could have been accomplished more simply by signalling the vgic every time the state *potentially* changed, but we don't want to be hitting the vgic more often than necessary. Third, we get rid of the use of the map->active field in the vgic and instead simply set the interrupt as active on the physical distributor whenever the input to the GIC is asserted and conversely clear the physical active state when the input to the GIC is deasserted. Fourth, and finally, we now initialize the timer PPIs (and all the other unused PPIs for now), to be level-triggered, and modify the sync code to sample the line state on HW sync and re-inject a new interrupt if it is still pending at that time. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2015-08-30 21:01:27 +08:00
KVM: arm/arm64: Don't cache the timer IRQ level The timer logic was designed after a strict idea of modeling an interrupt line level in software, meaning that only transitions in the level need to be reported to the VGIC. This works well for the timer, because the arch timer code is in complete control of the device and can track the transitions of the line. However, as we are about to support using the HW bit in the VGIC not just for the timer, but also for VFIO which cannot track transitions of the interrupt line, we have to decide on an interface between the GIC and other subsystems for level triggered mapped interrupts, which both the timer and VFIO can use. VFIO only sees an asserting transition of the physical interrupt line, and tells the VGIC when that happens. That means that part of the interrupt flow is offloaded to the hardware. To use the same interface for VFIO devices and the timer, we therefore have to change the timer (we cannot change VFIO because it doesn't know the details of the device it is assigning to a VM). Luckily, changing the timer is simple, we just need to stop 'caching' the line level, but instead let the VGIC know the state of the timer every time there is a potential change in the line level, and when the line level should be asserted from the timer ISR. The VGIC can ignore extra notifications using its validate mechanism. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2017-09-04 17:56:37 +08:00
/*
* If the timer can fire now, we don't need to have a soft timer
* scheduled for the future. If the timer cannot fire at all,
* then we also don't need a soft timer.
KVM: arm/arm64: Don't cache the timer IRQ level The timer logic was designed after a strict idea of modeling an interrupt line level in software, meaning that only transitions in the level need to be reported to the VGIC. This works well for the timer, because the arch timer code is in complete control of the device and can track the transitions of the line. However, as we are about to support using the HW bit in the VGIC not just for the timer, but also for VFIO which cannot track transitions of the interrupt line, we have to decide on an interface between the GIC and other subsystems for level triggered mapped interrupts, which both the timer and VFIO can use. VFIO only sees an asserting transition of the physical interrupt line, and tells the VGIC when that happens. That means that part of the interrupt flow is offloaded to the hardware. To use the same interface for VFIO devices and the timer, we therefore have to change the timer (we cannot change VFIO because it doesn't know the details of the device it is assigning to a VM). Luckily, changing the timer is simple, we just need to stop 'caching' the line level, but instead let the VGIC know the state of the timer every time there is a potential change in the line level, and when the line level should be asserted from the timer ISR. The VGIC can ignore extra notifications using its validate mechanism. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2017-09-04 17:56:37 +08:00
*/
if (!kvm_timer_irq_can_fire(ctx)) {
soft_timer_cancel(&ctx->hrtimer);
return;
}
soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
arm/arm64: KVM: Rework the arch timer to use level-triggered semantics The arch timer currently uses edge-triggered semantics in the sense that the line is never sampled by the vgic and lowering the line from the timer to the vgic doesn't have any effect on the pending state of virtual interrupts in the vgic. This means that we do not support a guest with the otherwise valid behavior of (1) disable interrupts (2) enable the timer (3) disable the timer (4) enable interrupts. Such a guest would validly not expect to see any interrupts on real hardware, but will see interrupts on KVM. This patch fixes this shortcoming through the following series of changes. First, we change the flow of the timer/vgic sync/flush operations. Now the timer is always flushed/synced before the vgic, because the vgic samples the state of the timer output. This has the implication that we move the timer operations in to non-preempible sections, but that is fine after the previous commit getting rid of hrtimer schedules on every entry/exit. Second, we change the internal behavior of the timer, letting the timer keep track of its previous output state, and only lower/raise the line to the vgic when the state changes. Note that in theory this could have been accomplished more simply by signalling the vgic every time the state *potentially* changed, but we don't want to be hitting the vgic more often than necessary. Third, we get rid of the use of the map->active field in the vgic and instead simply set the interrupt as active on the physical distributor whenever the input to the GIC is asserted and conversely clear the physical active state when the input to the GIC is deasserted. Fourth, and finally, we now initialize the timer PPIs (and all the other unused PPIs for now), to be level-triggered, and modify the sync code to sample the line state on HW sync and re-inject a new interrupt if it is still pending at that time. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2015-08-30 21:01:27 +08:00
}
static void timer_save_state(struct arch_timer_context *ctx)
{
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
unsigned long flags;
if (!timer->enabled)
return;
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
local_irq_save(flags);
if (!ctx->loaded)
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
goto out;
switch (index) {
case TIMER_VTIMER:
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
ctx->cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
ctx->cnt_cval = read_sysreg_el0(SYS_CNTV_CVAL);
/* Disable the timer */
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
write_sysreg_el0(0, SYS_CNTV_CTL);
isb();
break;
case TIMER_PTIMER:
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
ctx->cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
ctx->cnt_cval = read_sysreg_el0(SYS_CNTP_CVAL);
/* Disable the timer */
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
write_sysreg_el0(0, SYS_CNTP_CTL);
isb();
break;
case NR_KVM_TIMERS:
BUG();
}
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
trace_kvm_timer_save_state(ctx);
ctx->loaded = false;
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
out:
local_irq_restore(flags);
}
/*
* Schedule the background timer before calling kvm_vcpu_block, so that this
* thread is removed from its waitqueue and made runnable when there's a timer
* interrupt to handle.
*/
static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
get_timer_map(vcpu, &map);
/*
* If no timers are capable of raising interrupts (disabled or
* masked), then there's no more work for us to do.
*/
if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
!kvm_timer_irq_can_fire(map.direct_ptimer) &&
!kvm_timer_irq_can_fire(map.emul_ptimer))
return;
/*
* At least one guest time will expire. Schedule a background timer.
* Set the earliest expiration time among the guest timers.
*/
soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
}
static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
soft_timer_cancel(&timer->bg_timer);
}
static void timer_restore_state(struct arch_timer_context *ctx)
{
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
unsigned long flags;
if (!timer->enabled)
return;
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
local_irq_save(flags);
if (ctx->loaded)
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
goto out;
switch (index) {
case TIMER_VTIMER:
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
write_sysreg_el0(ctx->cnt_cval, SYS_CNTV_CVAL);
isb();
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
write_sysreg_el0(ctx->cnt_ctl, SYS_CNTV_CTL);
break;
case TIMER_PTIMER:
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
write_sysreg_el0(ctx->cnt_cval, SYS_CNTP_CVAL);
isb();
KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Currently, the {read,write}_sysreg_el*() accessors for accessing particular ELs' sysregs in the presence of VHE rely on some local hacks and define their system register encodings in a way that is inconsistent with the core definitions in <asm/sysreg.h>. As a result, it is necessary to add duplicate definitions for any system register that already needs a definition in sysreg.h for other reasons. This is a bit of a maintenance headache, and the reasons for the _el*() accessors working the way they do is a bit historical. This patch gets rid of the shadow sysreg definitions in <asm/kvm_hyp.h>, converts the _el*() accessors to use the core __msr_s/__mrs_s interface, and converts all call sites to use the standard sysreg #define names (i.e., upper case, with SYS_ prefix). This patch will conflict heavily anyway, so the opportunity to clean up some bad whitespace in the context of the changes is taken. The change exposes a few system registers that have no sysreg.h definition, due to msr_s/mrs_s being used in place of msr/mrs: additions are made in order to fill in the gaps. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Link: https://www.spinics.net/lists/kvm-arm/msg31717.html [Rebased to v4.21-rc1] Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> [Rebased to v5.2-rc5, changelog updates] Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-06 18:29:40 +08:00
write_sysreg_el0(ctx->cnt_ctl, SYS_CNTP_CTL);
break;
case NR_KVM_TIMERS:
BUG();
}
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
trace_kvm_timer_restore_state(ctx);
ctx->loaded = true;
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
out:
local_irq_restore(flags);
}
static void set_cntvoff(u64 cntvoff)
{
kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
}
static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
{
int r;
r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
WARN_ON(r);
}
static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
{
struct kvm_vcpu *vcpu = ctx->vcpu;
KVM: arm/arm64: arch_timer: Mark physical interrupt active when a virtual interrupt is pending When a guest gets scheduled, KVM performs a "load" operation, which for the timer includes evaluating the virtual "active" state of the interrupt, and replicating it on the physical side. This ensures that the deactivation in the guest will also take place in the physical GIC distributor. If the interrupt is not yet active, we flag it as inactive on the physical side. This means that on restoring the timer registers, if the timer has expired, we'll immediately take an interrupt. That's absolutely fine, as the interrupt will then be flagged as active on the physical side. What this assumes though is that we'll enter the guest right after having taken the interrupt, and that the guest will quickly ACK the interrupt, making it active at on the virtual side. It turns out that quite often, this assumption doesn't really hold. The guest may be preempted on the back on this interrupt, either from kernel space or whilst running at EL1 when a host interrupt fires. When this happens, we repeat the whole sequence on the next load (interrupt marked as inactive, timer registers restored, interrupt fires). And if it takes a really long time for a guest to activate the interrupt (as it does with nested virt), we end-up with many such events in quick succession, leading to the guest only making very slow progress. This can also be seen with the number of virtual timer interrupt on the host being far greater than the same number in the guest. An easy way to fix this is to evaluate the timer state when performing the "load" operation, just like we do when the interrupt actually fires. If the timer has a pending virtual interrupt at this stage, then we can safely flag the physical interrupt as being active, which prevents spurious exits. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-01-21 04:32:31 +08:00
bool phys_active = false;
/*
* Update the timer output so that it is likely to match the
* state we're about to restore. If the timer expires between
* this point and the register restoration, we'll take the
* interrupt anyway.
*/
kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
if (irqchip_in_kernel(vcpu->kvm))
phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
KVM: arm/arm64: arch_timer: Mark physical interrupt active when a virtual interrupt is pending When a guest gets scheduled, KVM performs a "load" operation, which for the timer includes evaluating the virtual "active" state of the interrupt, and replicating it on the physical side. This ensures that the deactivation in the guest will also take place in the physical GIC distributor. If the interrupt is not yet active, we flag it as inactive on the physical side. This means that on restoring the timer registers, if the timer has expired, we'll immediately take an interrupt. That's absolutely fine, as the interrupt will then be flagged as active on the physical side. What this assumes though is that we'll enter the guest right after having taken the interrupt, and that the guest will quickly ACK the interrupt, making it active at on the virtual side. It turns out that quite often, this assumption doesn't really hold. The guest may be preempted on the back on this interrupt, either from kernel space or whilst running at EL1 when a host interrupt fires. When this happens, we repeat the whole sequence on the next load (interrupt marked as inactive, timer registers restored, interrupt fires). And if it takes a really long time for a guest to activate the interrupt (as it does with nested virt), we end-up with many such events in quick succession, leading to the guest only making very slow progress. This can also be seen with the number of virtual timer interrupt on the host being far greater than the same number in the guest. An easy way to fix this is to evaluate the timer state when performing the "load" operation, just like we do when the interrupt actually fires. If the timer has a pending virtual interrupt at this stage, then we can safely flag the physical interrupt as being active, which prevents spurious exits. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-01-21 04:32:31 +08:00
phys_active |= ctx->irq.level;
set_timer_irq_phys_active(ctx, phys_active);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
}
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
{
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
KVM: arm/arm64: Don't emulate virtual timers on userspace ioctls When a VCPU never runs before a guest exists, but we set timer registers up via ioctls, the associated hrtimer might never get cancelled. Since we moved vcpu_load/put into the arch-specific implementations and only have load/put for KVM_RUN, we won't ever have a scheduled hrtimer for emulating a timer when modifying the timer state via an ioctl from user space. All we need to do is make sure that we pick up the right state when we load the timer state next time userspace calls KVM_RUN again. We also do not need to worry about this interacting with the bg_timer, because if we were in WFI from the guest, and somehow ended up in a kvm_arm_timer_set_reg, it means that: 1. the VCPU thread has received a signal, 2. we have called vcpu_load when being scheduled in again, 3. we have called vcpu_put when we returned to userspace for it to issue another ioctl And therefore will not have a bg_timer programmed and the event is treated as a spurious wakeup from WFI if userspace decides to run the vcpu again even if there are not virtual interrupts. This fixes stray virtual timer interrupts triggered by an expiring hrtimer, which happens after a failed live migration, for instance. Fixes: bee038a674875 ("KVM: arm/arm64: Rework the timer code to use a timer_map") Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> Reported-by: Andre Przywara <andre.przywara@arm.com> Tested-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-25 20:57:40 +08:00
/*
* Update the timer output so that it is likely to match the
* state we're about to restore. If the timer expires between
* this point and the register restoration, we'll take the
* interrupt anyway.
*/
kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
/*
* When using a userspace irqchip with the architected timers and a
* host interrupt controller that doesn't support an active state, we
* must still prevent continuously exiting from the guest, and
* therefore mask the physical interrupt by disabling it on the host
* interrupt controller when the virtual level is high, such that the
* guest can make forward progress. Once we detect the output level
* being de-asserted, we unmask the interrupt again so that we exit
* from the guest when the timer fires.
*/
if (vtimer->irq.level)
disable_percpu_irq(host_vtimer_irq);
else
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
}
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
if (unlikely(!timer->enabled))
return;
get_timer_map(vcpu, &map);
if (static_branch_likely(&has_gic_active_state)) {
kvm_timer_vcpu_load_gic(map.direct_vtimer);
if (map.direct_ptimer)
kvm_timer_vcpu_load_gic(map.direct_ptimer);
} else {
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
kvm_timer_vcpu_load_nogic(vcpu);
}
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
set_cntvoff(map.direct_vtimer->cntvoff);
kvm_timer_unblocking(vcpu);
timer_restore_state(map.direct_vtimer);
if (map.direct_ptimer)
timer_restore_state(map.direct_ptimer);
if (map.emul_ptimer)
timer_emulate(map.emul_ptimer);
}
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
bool vlevel, plevel;
if (likely(irqchip_in_kernel(vcpu->kvm)))
return false;
vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
2018-01-25 21:20:19 +08:00
return kvm_timer_should_fire(vtimer) != vlevel ||
kvm_timer_should_fire(ptimer) != plevel;
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
}
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
if (unlikely(!timer->enabled))
return;
get_timer_map(vcpu, &map);
timer_save_state(map.direct_vtimer);
if (map.direct_ptimer)
timer_save_state(map.direct_ptimer);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
/*
* Cancel soft timer emulation, because the only case where we
* need it after a vcpu_put is in the context of a sleeping VCPU, and
* in that case we already factor in the deadline for the physical
* timer when scheduling the bg_timer.
*
* In any case, we re-schedule the hrtimer for the physical timer when
* coming back to the VCPU thread in kvm_timer_vcpu_load().
*/
if (map.emul_ptimer)
soft_timer_cancel(&map.emul_ptimer->hrtimer);
if (rcuwait_active(wait))
kvm_timer_blocking(vcpu);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
/*
* The kernel may decide to run userspace after calling vcpu_put, so
* we reset cntvoff to 0 to ensure a consistent read between user
* accesses to the virtual counter and kernel access to the physical
* counter of non-VHE case. For VHE, the virtual counter uses a fixed
* virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
*/
set_cntvoff(0);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
}
/*
* With a userspace irqchip we have to check if the guest de-asserted the
* timer and if so, unmask the timer irq signal on the host interrupt
* controller to ensure that we see future timer signals.
*/
static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
if (!kvm_timer_should_fire(vtimer)) {
kvm_timer_update_irq(vcpu, false, vtimer);
if (static_branch_likely(&has_gic_active_state))
set_timer_irq_phys_active(vtimer, false);
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
else
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
2016-10-17 02:30:38 +08:00
}
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
}
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
if (unlikely(!timer->enabled))
return;
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
unmask_vtimer_irq_user(vcpu);
}
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
get_timer_map(vcpu, &map);
/*
* The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
* and to 0 for ARMv7. We provide an implementation that always
* resets the timer to be disabled and unmasked and is compliant with
* the ARMv7 architecture.
*/
vcpu_vtimer(vcpu)->cnt_ctl = 0;
vcpu_ptimer(vcpu)->cnt_ctl = 0;
if (timer->enabled) {
kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
if (irqchip_in_kernel(vcpu->kvm)) {
kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
if (map.direct_ptimer)
kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
}
}
if (map.emul_ptimer)
soft_timer_cancel(&map.emul_ptimer->hrtimer);
return 0;
}
/* Make the updates of cntvoff for all vtimer contexts atomic */
static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
{
int i;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, tmp, kvm)
vcpu_vtimer(tmp)->cntvoff = cntvoff;
/*
* When called from the vcpu create path, the CPU being created is not
* included in the loop above, so we just set it here as well.
*/
vcpu_vtimer(vcpu)->cntvoff = cntvoff;
mutex_unlock(&kvm->lock);
}
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
/* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
ptimer->cntvoff = 0;
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
timer->bg_timer.function = kvm_bg_timer_expire;
hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
vtimer->hrtimer.function = kvm_hrtimer_expire;
ptimer->hrtimer.function = kvm_hrtimer_expire;
vtimer->irq.irq = default_vtimer_irq.irq;
ptimer->irq.irq = default_ptimer_irq.irq;
vtimer->host_timer_irq = host_vtimer_irq;
ptimer->host_timer_irq = host_ptimer_irq;
vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
vtimer->vcpu = vcpu;
ptimer->vcpu = vcpu;
}
static void kvm_timer_init_interrupt(void *info)
{
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
}
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
{
struct arch_timer_context *timer;
switch (regid) {
case KVM_REG_ARM_TIMER_CTL:
timer = vcpu_vtimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
break;
case KVM_REG_ARM_TIMER_CNT:
timer = vcpu_vtimer(vcpu);
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
break;
case KVM_REG_ARM_TIMER_CVAL:
timer = vcpu_vtimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
break;
case KVM_REG_ARM_PTIMER_CTL:
timer = vcpu_ptimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
break;
case KVM_REG_ARM_PTIMER_CVAL:
timer = vcpu_ptimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
break;
default:
return -1;
}
arm/arm64: KVM: Rework the arch timer to use level-triggered semantics The arch timer currently uses edge-triggered semantics in the sense that the line is never sampled by the vgic and lowering the line from the timer to the vgic doesn't have any effect on the pending state of virtual interrupts in the vgic. This means that we do not support a guest with the otherwise valid behavior of (1) disable interrupts (2) enable the timer (3) disable the timer (4) enable interrupts. Such a guest would validly not expect to see any interrupts on real hardware, but will see interrupts on KVM. This patch fixes this shortcoming through the following series of changes. First, we change the flow of the timer/vgic sync/flush operations. Now the timer is always flushed/synced before the vgic, because the vgic samples the state of the timer output. This has the implication that we move the timer operations in to non-preempible sections, but that is fine after the previous commit getting rid of hrtimer schedules on every entry/exit. Second, we change the internal behavior of the timer, letting the timer keep track of its previous output state, and only lower/raise the line to the vgic when the state changes. Note that in theory this could have been accomplished more simply by signalling the vgic every time the state *potentially* changed, but we don't want to be hitting the vgic more often than necessary. Third, we get rid of the use of the map->active field in the vgic and instead simply set the interrupt as active on the physical distributor whenever the input to the GIC is asserted and conversely clear the physical active state when the input to the GIC is deasserted. Fourth, and finally, we now initialize the timer PPIs (and all the other unused PPIs for now), to be level-triggered, and modify the sync code to sample the line state on HW sync and re-inject a new interrupt if it is still pending at that time. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2015-08-30 21:01:27 +08:00
return 0;
}
static u64 read_timer_ctl(struct arch_timer_context *timer)
{
/*
* Set ISTATUS bit if it's expired.
* Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
* UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
* regardless of ENABLE bit for our implementation convenience.
*/
if (!kvm_timer_compute_delta(timer))
return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT;
else
return timer->cnt_ctl;
}
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
{
switch (regid) {
case KVM_REG_ARM_TIMER_CTL:
return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CTL);
case KVM_REG_ARM_TIMER_CNT:
return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CNT);
case KVM_REG_ARM_TIMER_CVAL:
return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CVAL);
case KVM_REG_ARM_PTIMER_CTL:
return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CTL);
case KVM_REG_ARM_PTIMER_CNT:
return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CNT);
case KVM_REG_ARM_PTIMER_CVAL:
return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CVAL);
}
return (u64)-1;
}
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg)
{
u64 val;
switch (treg) {
case TIMER_REG_TVAL:
val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
val &= lower_32_bits(val);
break;
case TIMER_REG_CTL:
val = read_timer_ctl(timer);
break;
case TIMER_REG_CVAL:
val = timer->cnt_cval;
break;
case TIMER_REG_CNT:
val = kvm_phys_timer_read() - timer->cntvoff;
break;
default:
BUG();
}
return val;
}
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timers tmr,
enum kvm_arch_timer_regs treg)
{
u64 val;
preempt_disable();
kvm_timer_vcpu_put(vcpu);
val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
kvm_timer_vcpu_load(vcpu);
preempt_enable();
return val;
}
static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg,
u64 val)
{
switch (treg) {
case TIMER_REG_TVAL:
timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
break;
case TIMER_REG_CTL:
timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT;
break;
case TIMER_REG_CVAL:
timer->cnt_cval = val;
break;
default:
BUG();
}
}
void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timers tmr,
enum kvm_arch_timer_regs treg,
u64 val)
{
preempt_disable();
kvm_timer_vcpu_put(vcpu);
kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
kvm_timer_vcpu_load(vcpu);
preempt_enable();
}
static int kvm_timer_starting_cpu(unsigned int cpu)
{
kvm_timer_init_interrupt(NULL);
return 0;
}
static int kvm_timer_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(host_vtimer_irq);
return 0;
}
int kvm_timer_hyp_init(bool has_gic)
{
struct arch_timer_kvm_info *info;
int err;
info = arch_timer_get_kvm_info();
timecounter = &info->timecounter;
if (!timecounter->cc) {
kvm_err("kvm_arch_timer: uninitialized timecounter\n");
return -ENODEV;
}
/* First, do the virtual EL1 timer irq */
if (info->virtual_irq <= 0) {
kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
info->virtual_irq);
return -ENODEV;
}
host_vtimer_irq = info->virtual_irq;
host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
host_vtimer_irq);
host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
}
err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
"kvm guest vtimer", kvm_get_running_vcpus());
if (err) {
kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
host_vtimer_irq, err);
return err;
}
if (has_gic) {
err = irq_set_vcpu_affinity(host_vtimer_irq,
kvm_get_running_vcpus());
if (err) {
kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
goto out_free_irq;
}
KVM: arm/arm64: Fix arch timers with userspace irqchips When introducing support for irqchip in userspace we needed a way to mask the timer signal to prevent the guest continuously exiting due to a screaming timer. We did this by disabling the corresponding percpu interrupt on the host interrupt controller, because we cannot rely on the host system having a GIC, and therefore cannot make any assumptions about having an active state to hide the timer signal. Unfortunately, when introducing this feature, it became entirely possible that a VCPU which belongs to a VM that has a userspace irqchip can disable the vtimer irq on the host on some physical CPU, and then go away without ever enabling the vtimer irq on that physical CPU again. This means that using irqchips in userspace on a system that also supports running VMs with an in-kernel GIC can prevent forward progress from in-kernel GIC VMs. Later on, when we started taking virtual timer interrupts in the arch timer code, we would also leave this timer state active for userspace irqchip VMs, because we leave it up to a VGIC-enabled guest to deactivate the hardware IRQ using the HW bit in the LR. Both issues are solved by only using the enable/disable trick on systems that do not have a host GIC which supports the active state, because all VMs on such systems must use irqchips in userspace. Systems that have a working GIC with support for an active state use the active state to mask the timer signal for both userspace and in-kernel irqchips. Cc: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Cc: <stable@vger.kernel.org> # v4.12+ Fixes: d9e139778376 ("KVM: arm/arm64: Support arch timers with a userspace gic") Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-01-26 23:06:51 +08:00
static_branch_enable(&has_gic_active_state);
}
kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
/* Now let's do the physical EL1 timer irq */
if (info->physical_irq > 0) {
host_ptimer_irq = info->physical_irq;
host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
host_ptimer_irq);
host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
}
err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
"kvm guest ptimer", kvm_get_running_vcpus());
if (err) {
kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
host_ptimer_irq, err);
return err;
}
if (has_gic) {
err = irq_set_vcpu_affinity(host_ptimer_irq,
kvm_get_running_vcpus());
if (err) {
kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
goto out_free_irq;
}
}
kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
} else if (has_vhe()) {
kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
info->physical_irq);
err = -ENODEV;
goto out_free_irq;
}
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
"kvm/arm/timer:starting", kvm_timer_starting_cpu,
kvm_timer_dying_cpu);
return 0;
out_free_irq:
free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
return err;
}
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
soft_timer_cancel(&timer->bg_timer);
}
static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
{
int vtimer_irq, ptimer_irq;
int i, ret;
vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
if (ret)
return false;
ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
if (ret)
return false;
kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
return false;
}
return true;
}
bool kvm_arch_timer_get_input_level(int vintid)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
struct arch_timer_context *timer;
if (vintid == vcpu_vtimer(vcpu)->irq.irq)
timer = vcpu_vtimer(vcpu);
else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
timer = vcpu_ptimer(vcpu);
else
BUG();
return kvm_timer_should_fire(timer);
}
int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
int ret;
if (timer->enabled)
return 0;
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
/* Without a VGIC we do not map virtual IRQs to physical IRQs */
if (!irqchip_in_kernel(vcpu->kvm))
goto no_vgic;
if (!vgic_initialized(vcpu->kvm))
return -ENODEV;
if (!timer_irqs_are_valid(vcpu)) {
kvm_debug("incorrectly configured timer irqs\n");
return -EINVAL;
}
get_timer_map(vcpu, &map);
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_vtimer->host_timer_irq,
map.direct_vtimer->irq.irq,
kvm_arch_timer_get_input_level);
if (ret)
return ret;
if (map.direct_ptimer) {
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_ptimer->host_timer_irq,
map.direct_ptimer->irq.irq,
kvm_arch_timer_get_input_level);
}
if (ret)
return ret;
KVM: arm/arm64: Support arch timers with a userspace gic If you're running with a userspace gic or other interrupt controller (that is no vgic in the kernel), then you have so far not been able to use the architected timers, because the output of the architected timers, which are driven inside the kernel, was a kernel-only construct between the arch timer code and the vgic. This patch implements the new KVM_CAP_ARM_USER_IRQ feature, where we use a side channel on the kvm_run structure, run->s.regs.device_irq_level, to always notify userspace of the timer output levels when using a userspace irqchip. This works by ensuring that before we enter the guest, if the timer output level has changed compared to what we last told userspace, we don't enter the guest, but instead return to userspace to notify it of the new level. If we are exiting, because of an MMIO for example, and the level changed at the same time, the value is also updated and userspace can sample the line as it needs. This is nicely achieved simply always updating the timer_irq_level field after the main run loop. Note that the kvm_timer_update_irq trace event is changed to show the host IRQ number for the timer instead of the guest IRQ number, because the kernel no longer know which IRQ userspace wires up the timer signal to. Also note that this patch implements all required functionality but does not yet advertise the capability. Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-28 03:08:06 +08:00
no_vgic:
timer->enabled = 1;
return 0;
}
/*
* On VHE system, we only need to configure the EL2 timer trap register once,
* not for every world switch.
* The host kernel runs at EL2 with HCR_EL2.TGE == 1,
* and this makes those bits have no effect for the host kernel execution.
*/
void kvm_timer_init_vhe(void)
{
/* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
u32 cnthctl_shift = 10;
u64 val;
/*
* VHE systems allow the guest direct access to the EL1 physical
* timer/counter.
*/
val = read_sysreg(cnthctl_el2);
val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
write_sysreg(val, cnthctl_el2);
}
static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
{
struct kvm_vcpu *vcpu;
int i;
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
}
}
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
int __user *uaddr = (int __user *)(long)attr->addr;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
int irq;
if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL;
if (get_user(irq, uaddr))
return -EFAULT;
if (!(irq_is_ppi(irq)))
return -EINVAL;
if (vcpu->arch.timer_cpu.enabled)
return -EBUSY;
switch (attr->attr) {
case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
break;
case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
break;
default:
return -ENXIO;
}
return 0;
}
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
int __user *uaddr = (int __user *)(long)attr->addr;
struct arch_timer_context *timer;
int irq;
switch (attr->attr) {
case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
timer = vcpu_vtimer(vcpu);
break;
case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
timer = vcpu_ptimer(vcpu);
break;
default:
return -ENXIO;
}
irq = timer->irq.irq;
return put_user(irq, uaddr);
}
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
switch (attr->attr) {
case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
return 0;
}
return -ENXIO;
}