KVM: Remove internal timer abstraction

kvm_timer_fn(), the sole inhabitant of timer.c, is only used by lapic.c. Move
it there to make it easier to hack on it.

struct kvm_timer is a thin wrapper around hrtimer, and only adds obfuscation.
Move near its two users (with different names) to prepare for simplification.

Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Avi Kivity 2012-07-26 18:01:50 +03:00 committed by Marcelo Tosatti
parent 4a4541a40e
commit e9d90d472d
7 changed files with 67 additions and 73 deletions

View File

@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o)
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o timer.o cpuid.o pmu.o
i8254.o cpuid.o pmu.o
kvm-intel-y += vmx.o
kvm-amd-y += svm.o

View File

@ -272,14 +272,14 @@ static void destroy_pit_timer(struct kvm_pit *pit)
flush_kthread_work(&pit->expired);
}
static bool kpit_is_periodic(struct kvm_timer *ktimer)
static bool kpit_is_periodic(struct kvm_pit_timer *ktimer)
{
struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state,
pit_timer);
return ps->is_periodic;
}
static struct kvm_timer_ops kpit_ops = {
static struct kvm_pit_timer_ops kpit_ops = {
.is_periodic = kpit_is_periodic,
};
@ -322,7 +322,7 @@ static void pit_do_work(struct kthread_work *work)
static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
{
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_pit_timer *ktimer = container_of(data, struct kvm_pit_timer, timer);
struct kvm_pit *pt = ktimer->kvm->arch.vpit;
if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
@ -340,7 +340,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
{
struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
struct kvm_timer *pt = &ps->pit_timer;
struct kvm_pit_timer *pt = &ps->pit_timer;
s64 interval;
if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)

View File

@ -21,10 +21,26 @@ struct kvm_kpit_channel_state {
ktime_t count_load_time;
};
struct kvm_pit_timer {
struct hrtimer timer;
s64 period; /* unit: ns */
u32 timer_mode_mask;
u64 tscdeadline;
atomic_t pending; /* accumulated triggered timers */
bool reinject;
struct kvm_pit_timer_ops *t_ops;
struct kvm *kvm;
struct kvm_vcpu *vcpu;
};
struct kvm_pit_timer_ops {
bool (*is_periodic)(struct kvm_pit_timer *);
};
struct kvm_kpit_state {
struct kvm_kpit_channel_state channels[3];
u32 flags;
struct kvm_timer pit_timer;
struct kvm_pit_timer pit_timer;
bool is_periodic;
u32 speaker_data_on;
struct mutex lock;

View File

@ -1,18 +0,0 @@
struct kvm_timer {
struct hrtimer timer;
s64 period; /* unit: ns */
u32 timer_mode_mask;
u64 tscdeadline;
atomic_t pending; /* accumulated triggered timers */
bool reinject;
struct kvm_timer_ops *t_ops;
struct kvm *kvm;
struct kvm_vcpu *vcpu;
};
struct kvm_timer_ops {
bool (*is_periodic)(struct kvm_timer *);
};
enum hrtimer_restart kvm_timer_fn(struct hrtimer *data);

View File

@ -1262,6 +1262,34 @@ static const struct kvm_io_device_ops apic_mmio_ops = {
.write = apic_mmio_write,
};
static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
{
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_vcpu *vcpu = ktimer->vcpu;
wait_queue_head_t *q = &vcpu->wq;
/*
* There is a race window between reading and incrementing, but we do
* not care about potentially losing timer events in the !reinject
* case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
* in vcpu_enter_guest.
*/
if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending);
/* FIXME: this code should not know anything about vcpus */
kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
}
if (waitqueue_active(q))
wake_up_interruptible(q);
if (ktimer->t_ops->is_periodic(ktimer)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;
}
int kvm_create_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
@ -1285,7 +1313,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
apic->lapic_timer.timer.function = kvm_timer_fn;
apic->lapic_timer.timer.function = apic_timer_fn;
apic->lapic_timer.t_ops = &lapic_timer_ops;
apic->lapic_timer.kvm = vcpu->kvm;
apic->lapic_timer.vcpu = vcpu;

View File

@ -2,10 +2,25 @@
#define __KVM_X86_LAPIC_H
#include "iodev.h"
#include "kvm_timer.h"
#include <linux/kvm_host.h>
struct kvm_timer {
struct hrtimer timer;
s64 period; /* unit: ns */
u32 timer_mode_mask;
u64 tscdeadline;
atomic_t pending; /* accumulated triggered timers */
bool reinject;
struct kvm_timer_ops *t_ops;
struct kvm *kvm;
struct kvm_vcpu *vcpu;
};
struct kvm_timer_ops {
bool (*is_periodic)(struct kvm_timer *);
};
struct kvm_lapic {
unsigned long base_address;
struct kvm_io_device dev;

View File

@ -1,47 +0,0 @@
/*
* Kernel-based Virtual Machine driver for Linux
*
* This module enables machines with Intel VT-x extensions to run virtual
* machines without emulation or binary translation.
*
* timer support
*
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/hrtimer.h>
#include <linux/atomic.h>
#include "kvm_timer.h"
enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
{
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_vcpu *vcpu = ktimer->vcpu;
wait_queue_head_t *q = &vcpu->wq;
/*
* There is a race window between reading and incrementing, but we do
* not care about potentially losing timer events in the !reinject
* case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
* in vcpu_enter_guest.
*/
if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending);
/* FIXME: this code should not know anything about vcpus */
kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
}
if (waitqueue_active(q))
wake_up_interruptible(q);
if (ktimer->t_ops->is_periodic(ktimer)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;
}