Merge branch 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (70 commits) KVM: Adjust smp_call_function_mask() callers to new requirements KVM: MMU: Fix potential race setting upper shadow ptes on nonpae hosts KVM: x86 emulator: emulate clflush KVM: MMU: improve invalid shadow root page handling KVM: MMU: nuke shadowed pgtable pages and ptes on memslot destruction KVM: Prefix some x86 low level function with kvm_, to avoid namespace issues KVM: check injected pic irq within valid pic irqs KVM: x86 emulator: Fix HLT instruction KVM: Apply the kernel sigmask to vcpus blocked due to being uninitialized KVM: VMX: Add ept_sync_context in flush_tlb KVM: mmu_shrink: kvm_mmu_zap_page requires slots_lock to be held x86: KVM guest: make kvm_smp_prepare_boot_cpu() static KVM: SVM: fix suspend/resume support KVM: s390: rename private structures KVM: s390: Set guest storage limit and offset to sane values KVM: Fix memory leak on guest exit KVM: s390: dont allocate dirty bitmap KVM: move slots_lock acquision down to vapic_exit KVM: VMX: Fake emulate Intel perfctr MSRs KVM: VMX: Fix a wrong usage of vmcs_config ...
This commit is contained in:
commit
f076ab8d04
|
@ -43,7 +43,8 @@ $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
|
|||
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
|
||||
EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
|
||||
|
||||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
|
||||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
|
||||
coalesced_mmio.o)
|
||||
|
||||
kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
|
|
|
@ -187,6 +187,9 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|||
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
}
|
||||
|
@ -195,11 +198,11 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|||
}
|
||||
|
||||
static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr)
|
||||
gpa_t addr, int len, int is_write)
|
||||
{
|
||||
struct kvm_io_device *dev;
|
||||
|
||||
dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
|
||||
dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
@ -231,7 +234,7 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
kvm_run->exit_reason = KVM_EXIT_MMIO;
|
||||
return 0;
|
||||
mmio:
|
||||
mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr);
|
||||
mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
|
||||
if (mmio_dev) {
|
||||
if (!p->dir)
|
||||
kvm_iodevice_write(mmio_dev, p->addr, p->size,
|
||||
|
@ -1035,14 +1038,6 @@ static void kvm_free_vmm_area(void)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that a cpu that is being hot-unplugged does not have any vcpus
|
||||
* cached on it. Leave it as blank for IA64.
|
||||
*/
|
||||
void decache_vcpus_on_cpu(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
}
|
||||
|
@ -1460,6 +1455,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
long kvm_arch_dev_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
|
||||
|
||||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
|
||||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
|
||||
|
||||
kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
|
|
|
@ -145,6 +145,9 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|||
case KVM_CAP_USER_MEMORY:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
break;
|
||||
|
@ -167,6 +170,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
@ -240,10 +247,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
}
|
||||
|
||||
void decache_vcpus_on_cpu(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
|
||||
struct kvm_debug_guest *dbg)
|
||||
{
|
||||
|
|
|
@ -31,7 +31,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
|
||||
struct interrupt_info *inti)
|
||||
struct kvm_s390_interrupt_info *inti)
|
||||
{
|
||||
switch (inti->type) {
|
||||
case KVM_S390_INT_EMERGENCY:
|
||||
|
@ -91,7 +91,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
|
|||
}
|
||||
|
||||
static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
|
||||
struct interrupt_info *inti)
|
||||
struct kvm_s390_interrupt_info *inti)
|
||||
{
|
||||
switch (inti->type) {
|
||||
case KVM_S390_INT_EMERGENCY:
|
||||
|
@ -111,7 +111,7 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
|
||||
static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
|
||||
struct interrupt_info *inti)
|
||||
struct kvm_s390_interrupt_info *inti)
|
||||
{
|
||||
const unsigned short table[] = { 2, 4, 4, 6 };
|
||||
int rc, exception = 0;
|
||||
|
@ -290,9 +290,9 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
|
|||
|
||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct local_interrupt *li = &vcpu->arch.local_int;
|
||||
struct float_interrupt *fi = vcpu->arch.local_int.float_int;
|
||||
struct interrupt_info *inti;
|
||||
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
||||
struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
int rc = 0;
|
||||
|
||||
if (atomic_read(&li->active)) {
|
||||
|
@ -408,9 +408,9 @@ void kvm_s390_idle_wakeup(unsigned long data)
|
|||
|
||||
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct local_interrupt *li = &vcpu->arch.local_int;
|
||||
struct float_interrupt *fi = vcpu->arch.local_int.float_int;
|
||||
struct interrupt_info *n, *inti = NULL;
|
||||
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
||||
struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
|
||||
struct kvm_s390_interrupt_info *n, *inti = NULL;
|
||||
int deliver;
|
||||
|
||||
__reset_intercept_indicators(vcpu);
|
||||
|
@ -465,8 +465,8 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
|
|||
|
||||
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
|
||||
{
|
||||
struct local_interrupt *li = &vcpu->arch.local_int;
|
||||
struct interrupt_info *inti;
|
||||
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
if (!inti)
|
||||
|
@ -487,9 +487,9 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
|
|||
int kvm_s390_inject_vm(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt *s390int)
|
||||
{
|
||||
struct local_interrupt *li;
|
||||
struct float_interrupt *fi;
|
||||
struct interrupt_info *inti;
|
||||
struct kvm_s390_local_interrupt *li;
|
||||
struct kvm_s390_float_interrupt *fi;
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
int sigcpu;
|
||||
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
|
@ -544,8 +544,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
|
|||
int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s390_interrupt *s390int)
|
||||
{
|
||||
struct local_interrupt *li;
|
||||
struct interrupt_info *inti;
|
||||
struct kvm_s390_local_interrupt *li;
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
if (!inti)
|
||||
|
|
|
@ -79,10 +79,6 @@ void kvm_arch_hardware_disable(void *garbage)
|
|||
{
|
||||
}
|
||||
|
||||
void decache_vcpus_on_cpu(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_arch_hardware_setup(void)
|
||||
{
|
||||
return 0;
|
||||
|
@ -198,6 +194,7 @@ out_nokvm:
|
|||
void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
{
|
||||
debug_unregister(kvm->arch.dbf);
|
||||
kvm_free_physmem(kvm);
|
||||
free_page((unsigned long)(kvm->arch.sca));
|
||||
kfree(kvm);
|
||||
module_put(THIS_MODULE);
|
||||
|
@ -250,11 +247,16 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.sie_block->gbea = 1;
|
||||
}
|
||||
|
||||
/* The current code can have up to 256 pages for virtio */
|
||||
#define VIRTIODESCSPACE (256ul * 4096ul)
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
|
||||
vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
|
||||
vcpu->arch.sie_block->gmsor = 0x000000000000;
|
||||
vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
|
||||
vcpu->kvm->arch.guest_origin +
|
||||
VIRTIODESCSPACE - 1ul;
|
||||
vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
|
||||
vcpu->arch.sie_block->ecb = 2;
|
||||
vcpu->arch.sie_block->eca = 0xC1002001U;
|
||||
setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
|
||||
|
@ -273,7 +275,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
|||
if (!vcpu)
|
||||
goto out_nomem;
|
||||
|
||||
vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
|
||||
vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
|
||||
get_zeroed_page(GFP_KERNEL);
|
||||
|
||||
if (!vcpu->arch.sie_block)
|
||||
goto out_free_cpu;
|
||||
|
@ -672,6 +675,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
return gfn;
|
||||
|
|
|
@ -199,7 +199,7 @@ out:
|
|||
|
||||
static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
|
||||
{
|
||||
struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
int cpus = 0;
|
||||
int n;
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
|
||||
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
|
||||
{
|
||||
struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
int rc;
|
||||
|
||||
if (cpu_addr >= KVM_MAX_VCPUS)
|
||||
|
@ -71,9 +71,9 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
|
|||
|
||||
static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
|
||||
{
|
||||
struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
struct local_interrupt *li;
|
||||
struct interrupt_info *inti;
|
||||
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
struct kvm_s390_local_interrupt *li;
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
int rc;
|
||||
|
||||
if (cpu_addr >= KVM_MAX_VCPUS)
|
||||
|
@ -108,9 +108,9 @@ unlock:
|
|||
|
||||
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
|
||||
{
|
||||
struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
struct local_interrupt *li;
|
||||
struct interrupt_info *inti;
|
||||
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
struct kvm_s390_local_interrupt *li;
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
int rc;
|
||||
|
||||
if (cpu_addr >= KVM_MAX_VCPUS)
|
||||
|
@ -169,9 +169,9 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
|
|||
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
|
||||
u64 *reg)
|
||||
{
|
||||
struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
struct local_interrupt *li;
|
||||
struct interrupt_info *inti;
|
||||
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
|
||||
struct kvm_s390_local_interrupt *li;
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
int rc;
|
||||
u8 tmp;
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ static void kvm_setup_secondary_clock(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __init kvm_smp_prepare_boot_cpu(void)
|
||||
static void __init kvm_smp_prepare_boot_cpu(void)
|
||||
{
|
||||
WARN_ON(kvm_register_clock("primary cpu clock"));
|
||||
native_smp_prepare_boot_cpu();
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
# Makefile for Kernel-based Virtual Machine module
|
||||
#
|
||||
|
||||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
|
||||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
|
||||
coalesced_mmio.o)
|
||||
ifeq ($(CONFIG_KVM_TRACE),y)
|
||||
common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
|
||||
endif
|
||||
|
|
|
@ -91,7 +91,7 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
|
|||
c->gate = val;
|
||||
}
|
||||
|
||||
int pit_get_gate(struct kvm *kvm, int channel)
|
||||
static int pit_get_gate(struct kvm *kvm, int channel)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
|
||||
|
||||
|
@ -193,19 +193,16 @@ static void pit_latch_status(struct kvm *kvm, int channel)
|
|||
}
|
||||
}
|
||||
|
||||
int __pit_timer_fn(struct kvm_kpit_state *ps)
|
||||
static int __pit_timer_fn(struct kvm_kpit_state *ps)
|
||||
{
|
||||
struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0];
|
||||
struct kvm_kpit_timer *pt = &ps->pit_timer;
|
||||
|
||||
atomic_inc(&pt->pending);
|
||||
smp_mb__after_atomic_inc();
|
||||
if (vcpu0) {
|
||||
if (!atomic_inc_and_test(&pt->pending))
|
||||
set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests);
|
||||
if (waitqueue_active(&vcpu0->wq)) {
|
||||
vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
wake_up_interruptible(&vcpu0->wq);
|
||||
}
|
||||
if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
|
||||
vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
wake_up_interruptible(&vcpu0->wq);
|
||||
}
|
||||
|
||||
pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
|
||||
|
@ -308,6 +305,7 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
|
|||
create_pit_timer(&ps->pit_timer, val, 0);
|
||||
break;
|
||||
case 2:
|
||||
case 3:
|
||||
create_pit_timer(&ps->pit_timer, val, 1);
|
||||
break;
|
||||
default:
|
||||
|
@ -459,7 +457,8 @@ static void pit_ioport_read(struct kvm_io_device *this,
|
|||
mutex_unlock(&pit_state->lock);
|
||||
}
|
||||
|
||||
static int pit_in_range(struct kvm_io_device *this, gpa_t addr)
|
||||
static int pit_in_range(struct kvm_io_device *this, gpa_t addr,
|
||||
int len, int is_write)
|
||||
{
|
||||
return ((addr >= KVM_PIT_BASE_ADDRESS) &&
|
||||
(addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
|
||||
|
@ -500,7 +499,8 @@ static void speaker_ioport_read(struct kvm_io_device *this,
|
|||
mutex_unlock(&pit_state->lock);
|
||||
}
|
||||
|
||||
static int speaker_in_range(struct kvm_io_device *this, gpa_t addr)
|
||||
static int speaker_in_range(struct kvm_io_device *this, gpa_t addr,
|
||||
int len, int is_write)
|
||||
{
|
||||
return (addr == KVM_SPEAKER_BASE_ADDRESS);
|
||||
}
|
||||
|
@ -575,7 +575,7 @@ void kvm_free_pit(struct kvm *kvm)
|
|||
}
|
||||
}
|
||||
|
||||
void __inject_pit_timer_intr(struct kvm *kvm)
|
||||
static void __inject_pit_timer_intr(struct kvm *kvm)
|
||||
{
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1);
|
||||
|
|
|
@ -130,8 +130,10 @@ void kvm_pic_set_irq(void *opaque, int irq, int level)
|
|||
{
|
||||
struct kvm_pic *s = opaque;
|
||||
|
||||
pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
|
||||
pic_update_irq(s);
|
||||
if (irq >= 0 && irq < PIC_NUM_PINS) {
|
||||
pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
|
||||
pic_update_irq(s);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -346,7 +348,8 @@ static u32 elcr_ioport_read(void *opaque, u32 addr1)
|
|||
return s->elcr;
|
||||
}
|
||||
|
||||
static int picdev_in_range(struct kvm_io_device *this, gpa_t addr)
|
||||
static int picdev_in_range(struct kvm_io_device *this, gpa_t addr,
|
||||
int len, int is_write)
|
||||
{
|
||||
switch (addr) {
|
||||
case 0x20:
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include "ioapic.h"
|
||||
#include "lapic.h"
|
||||
|
||||
#define PIC_NUM_PINS 16
|
||||
|
||||
struct kvm;
|
||||
struct kvm_vcpu;
|
||||
|
||||
|
|
|
@ -356,8 +356,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
|||
case APIC_DM_SMI:
|
||||
printk(KERN_DEBUG "Ignoring guest SMI\n");
|
||||
break;
|
||||
|
||||
case APIC_DM_NMI:
|
||||
printk(KERN_DEBUG "Ignoring guest NMI\n");
|
||||
kvm_inject_nmi(vcpu);
|
||||
break;
|
||||
|
||||
case APIC_DM_INIT:
|
||||
|
@ -572,6 +573,8 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
|
|||
{
|
||||
u32 val = 0;
|
||||
|
||||
KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler);
|
||||
|
||||
if (offset >= LAPIC_MMIO_LENGTH)
|
||||
return 0;
|
||||
|
||||
|
@ -695,6 +698,8 @@ static void apic_mmio_write(struct kvm_io_device *this,
|
|||
|
||||
offset &= 0xff0;
|
||||
|
||||
KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler);
|
||||
|
||||
switch (offset) {
|
||||
case APIC_ID: /* Local APIC ID */
|
||||
apic_set_reg(apic, APIC_ID, val);
|
||||
|
@ -780,7 +785,8 @@ static void apic_mmio_write(struct kvm_io_device *this,
|
|||
|
||||
}
|
||||
|
||||
static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr)
|
||||
static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr,
|
||||
int len, int size)
|
||||
{
|
||||
struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
|
||||
int ret = 0;
|
||||
|
@ -939,8 +945,8 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
|
|||
int result = 0;
|
||||
wait_queue_head_t *q = &apic->vcpu->wq;
|
||||
|
||||
atomic_inc(&apic->timer.pending);
|
||||
set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests);
|
||||
if(!atomic_inc_and_test(&apic->timer.pending))
|
||||
set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests);
|
||||
if (waitqueue_active(q)) {
|
||||
apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
wake_up_interruptible(q);
|
||||
|
|
|
@ -31,6 +31,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu);
|
|||
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
|
||||
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
|
||||
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
|
||||
u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
|
||||
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
|
||||
|
|
|
@ -66,7 +66,8 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
|
|||
#endif
|
||||
|
||||
#if defined(MMU_DEBUG) || defined(AUDIT)
|
||||
static int dbg = 1;
|
||||
static int dbg = 0;
|
||||
module_param(dbg, bool, 0644);
|
||||
#endif
|
||||
|
||||
#ifndef MMU_DEBUG
|
||||
|
@ -776,6 +777,15 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
|
|||
BUG();
|
||||
}
|
||||
|
||||
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
|
||||
sp->spt[i] = shadow_trap_nonpresent_pte;
|
||||
}
|
||||
|
||||
static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
unsigned index;
|
||||
|
@ -841,7 +851,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|||
hlist_add_head(&sp->hash_link, bucket);
|
||||
if (!metaphysical)
|
||||
rmap_write_protect(vcpu->kvm, gfn);
|
||||
vcpu->arch.mmu.prefetch_page(vcpu, sp);
|
||||
if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
|
||||
vcpu->arch.mmu.prefetch_page(vcpu, sp);
|
||||
else
|
||||
nonpaging_prefetch_page(vcpu, sp);
|
||||
return sp;
|
||||
}
|
||||
|
||||
|
@ -917,14 +930,17 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|||
}
|
||||
kvm_mmu_page_unlink_children(kvm, sp);
|
||||
if (!sp->root_count) {
|
||||
if (!sp->role.metaphysical)
|
||||
if (!sp->role.metaphysical && !sp->role.invalid)
|
||||
unaccount_shadowed(kvm, sp->gfn);
|
||||
hlist_del(&sp->hash_link);
|
||||
kvm_mmu_free_page(kvm, sp);
|
||||
} else {
|
||||
int invalid = sp->role.invalid;
|
||||
list_move(&sp->link, &kvm->arch.active_mmu_pages);
|
||||
sp->role.invalid = 1;
|
||||
kvm_reload_remote_mmus(kvm);
|
||||
if (!sp->role.metaphysical && !invalid)
|
||||
unaccount_shadowed(kvm, sp->gfn);
|
||||
}
|
||||
kvm_mmu_reset_last_pte_updated(kvm);
|
||||
}
|
||||
|
@ -1103,7 +1119,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|||
mark_page_dirty(vcpu->kvm, gfn);
|
||||
|
||||
pgprintk("%s: setting spte %llx\n", __func__, spte);
|
||||
pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
|
||||
pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
|
||||
(spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
|
||||
(spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
|
||||
set_shadow_pte(shadow_pte, spte);
|
||||
|
@ -1122,8 +1138,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|||
else
|
||||
kvm_release_pfn_clean(pfn);
|
||||
}
|
||||
if (!ptwrite || !*ptwrite)
|
||||
if (speculative) {
|
||||
vcpu->arch.last_pte_updated = shadow_pte;
|
||||
vcpu->arch.last_pte_gfn = gfn;
|
||||
}
|
||||
}
|
||||
|
||||
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
|
||||
|
@ -1171,9 +1189,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
table[index] = __pa(new_table->spt)
|
||||
| PT_PRESENT_MASK | PT_WRITABLE_MASK
|
||||
| shadow_user_mask | shadow_x_mask;
|
||||
set_shadow_pte(&table[index],
|
||||
__pa(new_table->spt)
|
||||
| PT_PRESENT_MASK | PT_WRITABLE_MASK
|
||||
| shadow_user_mask | shadow_x_mask);
|
||||
}
|
||||
table_addr = table[index] & PT64_BASE_ADDR_MASK;
|
||||
}
|
||||
|
@ -1211,15 +1230,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
|
|||
}
|
||||
|
||||
|
||||
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
|
||||
sp->spt[i] = shadow_trap_nonpresent_pte;
|
||||
}
|
||||
|
||||
static void mmu_free_roots(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
|
@ -1671,6 +1681,18 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
vcpu->arch.update_pte.pfn = pfn;
|
||||
}
|
||||
|
||||
static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
{
|
||||
u64 *spte = vcpu->arch.last_pte_updated;
|
||||
|
||||
if (spte
|
||||
&& vcpu->arch.last_pte_gfn == gfn
|
||||
&& shadow_accessed_mask
|
||||
&& !(*spte & shadow_accessed_mask)
|
||||
&& is_shadow_present_pte(*spte))
|
||||
set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
|
||||
}
|
||||
|
||||
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
const u8 *new, int bytes)
|
||||
{
|
||||
|
@ -1694,6 +1716,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
|
||||
mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
|
||||
spin_lock(&vcpu->kvm->mmu_lock);
|
||||
kvm_mmu_access_page(vcpu, gfn);
|
||||
kvm_mmu_free_some_pages(vcpu);
|
||||
++vcpu->kvm->stat.mmu_pte_write;
|
||||
kvm_mmu_audit(vcpu, "pre pte write");
|
||||
|
@ -1948,7 +1971,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
|
|||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
|
||||
void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
|
||||
static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_mmu_page *page;
|
||||
|
||||
|
@ -1968,6 +1991,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
|
|||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
int npages;
|
||||
|
||||
if (!down_read_trylock(&kvm->slots_lock))
|
||||
continue;
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
npages = kvm->arch.n_alloc_mmu_pages -
|
||||
kvm->arch.n_free_mmu_pages;
|
||||
|
@ -1980,6 +2005,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
|
|||
nr_to_scan--;
|
||||
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
up_read(&kvm->slots_lock);
|
||||
}
|
||||
if (kvm_freed)
|
||||
list_move_tail(&kvm_freed->vm_list, &vm_list);
|
||||
|
|
|
@ -15,7 +15,8 @@
|
|||
#define PT_USER_MASK (1ULL << 2)
|
||||
#define PT_PWT_MASK (1ULL << 3)
|
||||
#define PT_PCD_MASK (1ULL << 4)
|
||||
#define PT_ACCESSED_MASK (1ULL << 5)
|
||||
#define PT_ACCESSED_SHIFT 5
|
||||
#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
|
||||
#define PT_DIRTY_MASK (1ULL << 6)
|
||||
#define PT_PAGE_SIZE_MASK (1ULL << 7)
|
||||
#define PT_PAT_MASK (1ULL << 7)
|
||||
|
|
|
@ -460,8 +460,9 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
|
|||
static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp)
|
||||
{
|
||||
int i, offset = 0, r = 0;
|
||||
pt_element_t pt;
|
||||
int i, j, offset, r;
|
||||
pt_element_t pt[256 / sizeof(pt_element_t)];
|
||||
gpa_t pte_gpa;
|
||||
|
||||
if (sp->role.metaphysical
|
||||
|| (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
|
||||
|
@ -469,19 +470,20 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
|
|||
return;
|
||||
}
|
||||
|
||||
if (PTTYPE == 32)
|
||||
pte_gpa = gfn_to_gpa(sp->gfn);
|
||||
if (PTTYPE == 32) {
|
||||
offset = sp->role.quadrant << PT64_LEVEL_BITS;
|
||||
pte_gpa += offset * sizeof(pt_element_t);
|
||||
}
|
||||
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
|
||||
gpa_t pte_gpa = gfn_to_gpa(sp->gfn);
|
||||
pte_gpa += (i+offset) * sizeof(pt_element_t);
|
||||
|
||||
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt,
|
||||
sizeof(pt_element_t));
|
||||
if (r || is_present_pte(pt))
|
||||
sp->spt[i] = shadow_trap_nonpresent_pte;
|
||||
else
|
||||
sp->spt[i] = shadow_notrap_nonpresent_pte;
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
|
||||
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
|
||||
pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
|
||||
for (j = 0; j < ARRAY_SIZE(pt); ++j)
|
||||
if (r || is_present_pte(pt[j]))
|
||||
sp->spt[i+j] = shadow_trap_nonpresent_pte;
|
||||
else
|
||||
sp->spt[i+j] = shadow_notrap_nonpresent_pte;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include <asm/desc.h>
|
||||
|
||||
#define __ex(x) __kvm_handle_fault_on_reboot(x)
|
||||
|
||||
MODULE_AUTHOR("Qumranet");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@ -129,17 +131,17 @@ static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
|
|||
|
||||
static inline void clgi(void)
|
||||
{
|
||||
asm volatile (SVM_CLGI);
|
||||
asm volatile (__ex(SVM_CLGI));
|
||||
}
|
||||
|
||||
static inline void stgi(void)
|
||||
{
|
||||
asm volatile (SVM_STGI);
|
||||
asm volatile (__ex(SVM_STGI));
|
||||
}
|
||||
|
||||
static inline void invlpga(unsigned long addr, u32 asid)
|
||||
{
|
||||
asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
|
||||
asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_read_cr2(void)
|
||||
|
@ -270,19 +272,11 @@ static int has_svm(void)
|
|||
|
||||
static void svm_hardware_disable(void *garbage)
|
||||
{
|
||||
struct svm_cpu_data *svm_data
|
||||
= per_cpu(svm_data, raw_smp_processor_id());
|
||||
uint64_t efer;
|
||||
|
||||
if (svm_data) {
|
||||
uint64_t efer;
|
||||
|
||||
wrmsrl(MSR_VM_HSAVE_PA, 0);
|
||||
rdmsrl(MSR_EFER, efer);
|
||||
wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
|
||||
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
|
||||
__free_page(svm_data->save_area);
|
||||
kfree(svm_data);
|
||||
}
|
||||
wrmsrl(MSR_VM_HSAVE_PA, 0);
|
||||
rdmsrl(MSR_EFER, efer);
|
||||
wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
|
||||
}
|
||||
|
||||
static void svm_hardware_enable(void *garbage)
|
||||
|
@ -321,6 +315,19 @@ static void svm_hardware_enable(void *garbage)
|
|||
page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static void svm_cpu_uninit(int cpu)
|
||||
{
|
||||
struct svm_cpu_data *svm_data
|
||||
= per_cpu(svm_data, raw_smp_processor_id());
|
||||
|
||||
if (!svm_data)
|
||||
return;
|
||||
|
||||
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
|
||||
__free_page(svm_data->save_area);
|
||||
kfree(svm_data);
|
||||
}
|
||||
|
||||
static int svm_cpu_init(int cpu)
|
||||
{
|
||||
struct svm_cpu_data *svm_data;
|
||||
|
@ -458,6 +465,11 @@ err:
|
|||
|
||||
static __exit void svm_hardware_unsetup(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
svm_cpu_uninit(cpu);
|
||||
|
||||
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
|
||||
iopm_base = 0;
|
||||
}
|
||||
|
@ -707,10 +719,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
rdtscll(vcpu->arch.host_tsc);
|
||||
}
|
||||
|
||||
static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void svm_cache_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
@ -949,7 +957,9 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
|
|||
|
||||
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
|
||||
{
|
||||
return to_svm(vcpu)->db_regs[dr];
|
||||
unsigned long val = to_svm(vcpu)->db_regs[dr];
|
||||
KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
|
||||
return val;
|
||||
}
|
||||
|
||||
static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
|
||||
|
@ -1004,6 +1014,16 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
|
|||
|
||||
fault_address = svm->vmcb->control.exit_info_2;
|
||||
error_code = svm->vmcb->control.exit_info_1;
|
||||
|
||||
if (!npt_enabled)
|
||||
KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
|
||||
(u32)fault_address, (u32)(fault_address >> 32),
|
||||
handler);
|
||||
else
|
||||
KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
|
||||
(u32)fault_address, (u32)(fault_address >> 32),
|
||||
handler);
|
||||
|
||||
return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
|
||||
}
|
||||
|
||||
|
@ -1081,6 +1101,19 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
|
|||
return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
|
||||
}
|
||||
|
||||
static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
|
||||
{
|
||||
KVMTRACE_0D(NMI, &svm->vcpu, handler);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
|
||||
{
|
||||
++svm->vcpu.stat.irq_exits;
|
||||
KVMTRACE_0D(INTR, &svm->vcpu, handler);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
|
||||
{
|
||||
return 1;
|
||||
|
@ -1219,6 +1252,9 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
|
|||
if (svm_get_msr(&svm->vcpu, ecx, &data))
|
||||
kvm_inject_gp(&svm->vcpu, 0);
|
||||
else {
|
||||
KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
|
||||
(u32)(data >> 32), handler);
|
||||
|
||||
svm->vmcb->save.rax = data & 0xffffffff;
|
||||
svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
|
||||
svm->next_rip = svm->vmcb->save.rip + 2;
|
||||
|
@ -1284,16 +1320,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
|
|||
case MSR_K7_EVNTSEL1:
|
||||
case MSR_K7_EVNTSEL2:
|
||||
case MSR_K7_EVNTSEL3:
|
||||
case MSR_K7_PERFCTR0:
|
||||
case MSR_K7_PERFCTR1:
|
||||
case MSR_K7_PERFCTR2:
|
||||
case MSR_K7_PERFCTR3:
|
||||
/*
|
||||
* only support writing 0 to the performance counters for now
|
||||
* to make Windows happy. Should be replaced by a real
|
||||
* performance counter emulation later.
|
||||
* Just discard all writes to the performance counters; this
|
||||
* should keep both older linux and windows 64-bit guests
|
||||
* happy
|
||||
*/
|
||||
if (data != 0)
|
||||
goto unhandled;
|
||||
pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
|
||||
|
||||
break;
|
||||
default:
|
||||
unhandled:
|
||||
return kvm_set_msr_common(vcpu, ecx, data);
|
||||
}
|
||||
return 0;
|
||||
|
@ -1304,6 +1343,10 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
|
|||
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
|
||||
u64 data = (svm->vmcb->save.rax & -1u)
|
||||
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
|
||||
|
||||
KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
|
||||
handler);
|
||||
|
||||
svm->next_rip = svm->vmcb->save.rip + 2;
|
||||
if (svm_set_msr(&svm->vcpu, ecx, data))
|
||||
kvm_inject_gp(&svm->vcpu, 0);
|
||||
|
@ -1323,6 +1366,8 @@ static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
|
|||
static int interrupt_window_interception(struct vcpu_svm *svm,
|
||||
struct kvm_run *kvm_run)
|
||||
{
|
||||
KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
|
||||
|
||||
svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
|
||||
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
|
||||
/*
|
||||
|
@ -1364,8 +1409,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
|
|||
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
|
||||
[SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
|
||||
[SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
|
||||
[SVM_EXIT_INTR] = nop_on_interception,
|
||||
[SVM_EXIT_NMI] = nop_on_interception,
|
||||
[SVM_EXIT_INTR] = intr_interception,
|
||||
[SVM_EXIT_NMI] = nmi_interception,
|
||||
[SVM_EXIT_SMI] = nop_on_interception,
|
||||
[SVM_EXIT_INIT] = nop_on_interception,
|
||||
[SVM_EXIT_VINTR] = interrupt_window_interception,
|
||||
|
@ -1397,6 +1442,9 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
u32 exit_code = svm->vmcb->control.exit_code;
|
||||
|
||||
KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
|
||||
(u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
|
||||
|
||||
if (npt_enabled) {
|
||||
int mmu_reload = 0;
|
||||
if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
|
||||
|
@ -1470,6 +1518,8 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
|
|||
{
|
||||
struct vmcb_control_area *control;
|
||||
|
||||
KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
|
||||
|
||||
control = &svm->vmcb->control;
|
||||
control->int_vector = irq;
|
||||
control->int_ctl &= ~V_INTR_PRIO_MASK;
|
||||
|
@ -1660,9 +1710,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
sync_lapic_to_cr8(vcpu);
|
||||
|
||||
save_host_msrs(vcpu);
|
||||
fs_selector = read_fs();
|
||||
gs_selector = read_gs();
|
||||
ldt_selector = read_ldt();
|
||||
fs_selector = kvm_read_fs();
|
||||
gs_selector = kvm_read_gs();
|
||||
ldt_selector = kvm_read_ldt();
|
||||
svm->host_cr2 = kvm_read_cr2();
|
||||
svm->host_dr6 = read_dr6();
|
||||
svm->host_dr7 = read_dr7();
|
||||
|
@ -1716,17 +1766,17 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
/* Enter guest mode */
|
||||
"push %%rax \n\t"
|
||||
"mov %c[vmcb](%[svm]), %%rax \n\t"
|
||||
SVM_VMLOAD "\n\t"
|
||||
SVM_VMRUN "\n\t"
|
||||
SVM_VMSAVE "\n\t"
|
||||
__ex(SVM_VMLOAD) "\n\t"
|
||||
__ex(SVM_VMRUN) "\n\t"
|
||||
__ex(SVM_VMSAVE) "\n\t"
|
||||
"pop %%rax \n\t"
|
||||
#else
|
||||
/* Enter guest mode */
|
||||
"push %%eax \n\t"
|
||||
"mov %c[vmcb](%[svm]), %%eax \n\t"
|
||||
SVM_VMLOAD "\n\t"
|
||||
SVM_VMRUN "\n\t"
|
||||
SVM_VMSAVE "\n\t"
|
||||
__ex(SVM_VMLOAD) "\n\t"
|
||||
__ex(SVM_VMRUN) "\n\t"
|
||||
__ex(SVM_VMSAVE) "\n\t"
|
||||
"pop %%eax \n\t"
|
||||
#endif
|
||||
|
||||
|
@ -1795,9 +1845,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
write_dr7(svm->host_dr7);
|
||||
kvm_write_cr2(svm->host_cr2);
|
||||
|
||||
load_fs(fs_selector);
|
||||
load_gs(gs_selector);
|
||||
load_ldt(ldt_selector);
|
||||
kvm_load_fs(fs_selector);
|
||||
kvm_load_gs(gs_selector);
|
||||
kvm_load_ldt(ldt_selector);
|
||||
load_host_msrs(vcpu);
|
||||
|
||||
reload_tss(vcpu);
|
||||
|
@ -1889,7 +1939,6 @@ static struct kvm_x86_ops svm_x86_ops = {
|
|||
.prepare_guest_switch = svm_prepare_guest_switch,
|
||||
.vcpu_load = svm_vcpu_load,
|
||||
.vcpu_put = svm_vcpu_put,
|
||||
.vcpu_decache = svm_vcpu_decache,
|
||||
|
||||
.set_guest_debug = svm_guest_debug,
|
||||
.get_msr = svm_get_msr,
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include <asm/io.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
#define __ex(x) __kvm_handle_fault_on_reboot(x)
|
||||
|
||||
MODULE_AUTHOR("Qumranet");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@ -53,6 +55,7 @@ struct vmcs {
|
|||
|
||||
struct vcpu_vmx {
|
||||
struct kvm_vcpu vcpu;
|
||||
struct list_head local_vcpus_link;
|
||||
int launched;
|
||||
u8 fail;
|
||||
u32 idt_vectoring_info;
|
||||
|
@ -88,9 +91,11 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
static int init_rmode(struct kvm *kvm);
|
||||
static u64 construct_eptp(unsigned long root_hpa);
|
||||
|
||||
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
|
||||
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
|
||||
static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
|
||||
|
||||
static struct page *vmx_io_bitmap_a;
|
||||
static struct page *vmx_io_bitmap_b;
|
||||
|
@ -260,6 +265,11 @@ static inline int cpu_has_vmx_vpid(void)
|
|||
SECONDARY_EXEC_ENABLE_VPID);
|
||||
}
|
||||
|
||||
static inline int cpu_has_virtual_nmis(void)
|
||||
{
|
||||
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
|
||||
}
|
||||
|
||||
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
|
||||
{
|
||||
int i;
|
||||
|
@ -278,7 +288,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
|
|||
u64 gva;
|
||||
} operand = { vpid, 0, gva };
|
||||
|
||||
asm volatile (ASM_VMX_INVVPID
|
||||
asm volatile (__ex(ASM_VMX_INVVPID)
|
||||
/* CF==1 or ZF==1 --> rc = -1 */
|
||||
"; ja 1f ; ud2 ; 1:"
|
||||
: : "a"(&operand), "c"(ext) : "cc", "memory");
|
||||
|
@ -290,7 +300,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa)
|
|||
u64 eptp, gpa;
|
||||
} operand = {eptp, gpa};
|
||||
|
||||
asm volatile (ASM_VMX_INVEPT
|
||||
asm volatile (__ex(ASM_VMX_INVEPT)
|
||||
/* CF==1 or ZF==1 --> rc = -1 */
|
||||
"; ja 1f ; ud2 ; 1:\n"
|
||||
: : "a" (&operand), "c" (ext) : "cc", "memory");
|
||||
|
@ -311,7 +321,7 @@ static void vmcs_clear(struct vmcs *vmcs)
|
|||
u64 phys_addr = __pa(vmcs);
|
||||
u8 error;
|
||||
|
||||
asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
|
||||
asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
|
||||
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
|
||||
: "cc", "memory");
|
||||
if (error)
|
||||
|
@ -329,6 +339,9 @@ static void __vcpu_clear(void *arg)
|
|||
if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
|
||||
per_cpu(current_vmcs, cpu) = NULL;
|
||||
rdtscll(vmx->vcpu.arch.host_tsc);
|
||||
list_del(&vmx->local_vcpus_link);
|
||||
vmx->vcpu.cpu = -1;
|
||||
vmx->launched = 0;
|
||||
}
|
||||
|
||||
static void vcpu_clear(struct vcpu_vmx *vmx)
|
||||
|
@ -336,7 +349,6 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
|
|||
if (vmx->vcpu.cpu == -1)
|
||||
return;
|
||||
smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
|
||||
vmx->launched = 0;
|
||||
}
|
||||
|
||||
static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
|
||||
|
@ -378,7 +390,7 @@ static unsigned long vmcs_readl(unsigned long field)
|
|||
{
|
||||
unsigned long value;
|
||||
|
||||
asm volatile (ASM_VMX_VMREAD_RDX_RAX
|
||||
asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
|
||||
: "=a"(value) : "d"(field) : "cc");
|
||||
return value;
|
||||
}
|
||||
|
@ -413,7 +425,7 @@ static void vmcs_writel(unsigned long field, unsigned long value)
|
|||
{
|
||||
u8 error;
|
||||
|
||||
asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
|
||||
asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
|
||||
: "=q"(error) : "a"(value), "d"(field) : "cc");
|
||||
if (unlikely(error))
|
||||
vmwrite_error(field, value);
|
||||
|
@ -431,10 +443,8 @@ static void vmcs_write32(unsigned long field, u32 value)
|
|||
|
||||
static void vmcs_write64(unsigned long field, u64 value)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
vmcs_writel(field, value);
|
||||
#else
|
||||
vmcs_writel(field, value);
|
||||
#ifndef CONFIG_X86_64
|
||||
asm volatile ("");
|
||||
vmcs_writel(field+1, value >> 32);
|
||||
#endif
|
||||
|
@ -474,7 +484,7 @@ static void reload_tss(void)
|
|||
struct descriptor_table gdt;
|
||||
struct desc_struct *descs;
|
||||
|
||||
get_gdt(&gdt);
|
||||
kvm_get_gdt(&gdt);
|
||||
descs = (void *)gdt.base;
|
||||
descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
|
||||
load_TR_desc();
|
||||
|
@ -530,9 +540,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
|||
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not
|
||||
* allow segment selectors with cpl > 0 or ti == 1.
|
||||
*/
|
||||
vmx->host_state.ldt_sel = read_ldt();
|
||||
vmx->host_state.ldt_sel = kvm_read_ldt();
|
||||
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
|
||||
vmx->host_state.fs_sel = read_fs();
|
||||
vmx->host_state.fs_sel = kvm_read_fs();
|
||||
if (!(vmx->host_state.fs_sel & 7)) {
|
||||
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
|
||||
vmx->host_state.fs_reload_needed = 0;
|
||||
|
@ -540,7 +550,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
|||
vmcs_write16(HOST_FS_SELECTOR, 0);
|
||||
vmx->host_state.fs_reload_needed = 1;
|
||||
}
|
||||
vmx->host_state.gs_sel = read_gs();
|
||||
vmx->host_state.gs_sel = kvm_read_gs();
|
||||
if (!(vmx->host_state.gs_sel & 7))
|
||||
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
|
||||
else {
|
||||
|
@ -576,15 +586,15 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
|
|||
++vmx->vcpu.stat.host_state_reload;
|
||||
vmx->host_state.loaded = 0;
|
||||
if (vmx->host_state.fs_reload_needed)
|
||||
load_fs(vmx->host_state.fs_sel);
|
||||
kvm_load_fs(vmx->host_state.fs_sel);
|
||||
if (vmx->host_state.gs_ldt_reload_needed) {
|
||||
load_ldt(vmx->host_state.ldt_sel);
|
||||
kvm_load_ldt(vmx->host_state.ldt_sel);
|
||||
/*
|
||||
* If we have to reload gs, we must take care to
|
||||
* preserve our gs base.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
load_gs(vmx->host_state.gs_sel);
|
||||
kvm_load_gs(vmx->host_state.gs_sel);
|
||||
#ifdef CONFIG_X86_64
|
||||
wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
|
||||
#endif
|
||||
|
@ -617,13 +627,17 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
vcpu_clear(vmx);
|
||||
kvm_migrate_timers(vcpu);
|
||||
vpid_sync_vcpu_all(vmx);
|
||||
local_irq_disable();
|
||||
list_add(&vmx->local_vcpus_link,
|
||||
&per_cpu(vcpus_on_cpu, cpu));
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
|
||||
u8 error;
|
||||
|
||||
per_cpu(current_vmcs, cpu) = vmx->vmcs;
|
||||
asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
|
||||
asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
|
||||
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
|
||||
: "cc");
|
||||
if (error)
|
||||
|
@ -640,8 +654,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
* Linux uses per-cpu TSS and GDT, so set these when switching
|
||||
* processors.
|
||||
*/
|
||||
vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
|
||||
get_gdt(&dt);
|
||||
vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
|
||||
kvm_get_gdt(&dt);
|
||||
vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
|
||||
|
||||
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
|
||||
|
@ -684,11 +698,6 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
|
|||
update_exception_bitmap(vcpu);
|
||||
}
|
||||
|
||||
static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_clear(to_vmx(vcpu));
|
||||
}
|
||||
|
||||
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vmcs_readl(GUEST_RFLAGS);
|
||||
|
@ -912,6 +921,18 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|||
break;
|
||||
case MSR_IA32_TIME_STAMP_COUNTER:
|
||||
guest_write_tsc(data);
|
||||
break;
|
||||
case MSR_P6_PERFCTR0:
|
||||
case MSR_P6_PERFCTR1:
|
||||
case MSR_P6_EVNTSEL0:
|
||||
case MSR_P6_EVNTSEL1:
|
||||
/*
|
||||
* Just discard all writes to the performance counters; this
|
||||
* should keep both older linux and windows 64-bit guests
|
||||
* happy
|
||||
*/
|
||||
pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data);
|
||||
|
||||
break;
|
||||
default:
|
||||
vmx_load_host_state(vmx);
|
||||
|
@ -1022,6 +1043,7 @@ static void hardware_enable(void *garbage)
|
|||
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
|
||||
u64 old;
|
||||
|
||||
INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
|
||||
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
|
||||
if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
|
||||
MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
|
||||
|
@ -1032,13 +1054,25 @@ static void hardware_enable(void *garbage)
|
|||
MSR_IA32_FEATURE_CONTROL_LOCKED |
|
||||
MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
|
||||
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
|
||||
asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
|
||||
asm volatile (ASM_VMX_VMXON_RAX
|
||||
: : "a"(&phys_addr), "m"(phys_addr)
|
||||
: "memory", "cc");
|
||||
}
|
||||
|
||||
static void vmclear_local_vcpus(void)
|
||||
{
|
||||
int cpu = raw_smp_processor_id();
|
||||
struct vcpu_vmx *vmx, *n;
|
||||
|
||||
list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
|
||||
local_vcpus_link)
|
||||
__vcpu_clear(vmx);
|
||||
}
|
||||
|
||||
static void hardware_disable(void *garbage)
|
||||
{
|
||||
asm volatile (ASM_VMX_VMXOFF : : : "cc");
|
||||
vmclear_local_vcpus();
|
||||
asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
|
||||
write_cr4(read_cr4() & ~X86_CR4_VMXE);
|
||||
}
|
||||
|
||||
|
@ -1072,7 +1106,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
|
|||
u32 _vmentry_control = 0;
|
||||
|
||||
min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
|
||||
opt = 0;
|
||||
opt = PIN_BASED_VIRTUAL_NMIS;
|
||||
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
|
||||
&_pin_based_exec_control) < 0)
|
||||
return -EIO;
|
||||
|
@ -1389,6 +1423,8 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
|
|||
static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vpid_sync_vcpu_all(to_vmx(vcpu));
|
||||
if (vm_need_ept())
|
||||
ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
|
||||
}
|
||||
|
||||
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
|
||||
|
@ -1420,7 +1456,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
|
|||
if (!(cr0 & X86_CR0_PG)) {
|
||||
/* From paging/starting to nonpaging */
|
||||
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
|
||||
vmcs_config.cpu_based_exec_ctrl |
|
||||
vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
|
||||
(CPU_BASED_CR3_LOAD_EXITING |
|
||||
CPU_BASED_CR3_STORE_EXITING));
|
||||
vcpu->arch.cr0 = cr0;
|
||||
|
@ -1430,7 +1466,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
|
|||
} else if (!is_paging(vcpu)) {
|
||||
/* From nonpaging to paging */
|
||||
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
|
||||
vmcs_config.cpu_based_exec_ctrl &
|
||||
vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
|
||||
~(CPU_BASED_CR3_LOAD_EXITING |
|
||||
CPU_BASED_CR3_STORE_EXITING));
|
||||
vcpu->arch.cr0 = cr0;
|
||||
|
@ -1821,7 +1857,7 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
|
|||
spin_unlock(&vmx_vpid_lock);
|
||||
}
|
||||
|
||||
void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
|
||||
static void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
|
||||
{
|
||||
void *va;
|
||||
|
||||
|
@ -1907,8 +1943,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|||
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
||||
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
||||
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
||||
vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
|
||||
vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
|
||||
vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
|
||||
vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
|
||||
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
||||
#ifdef CONFIG_X86_64
|
||||
rdmsrl(MSR_FS_BASE, a);
|
||||
|
@ -1922,7 +1958,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|||
|
||||
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
|
||||
|
||||
get_idt(&dt);
|
||||
kvm_get_idt(&dt);
|
||||
vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
|
||||
|
||||
asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
|
||||
|
@ -2114,6 +2150,13 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
|
|||
irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
|
||||
}
|
||||
|
||||
static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
|
||||
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
|
||||
vcpu->arch.nmi_pending = 0;
|
||||
}
|
||||
|
||||
static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int word_index = __ffs(vcpu->arch.irq_summary);
|
||||
|
@ -2554,8 +2597,6 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
|
||||
offset = exit_qualification & 0xffful;
|
||||
|
||||
KVMTRACE_1D(APIC_ACCESS, vcpu, (u32)offset, handler);
|
||||
|
||||
er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
|
||||
|
||||
if (er != EMULATE_DONE) {
|
||||
|
@ -2639,6 +2680,19 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
{
|
||||
u32 cpu_based_vm_exec_control;
|
||||
|
||||
/* clear pending NMI */
|
||||
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
|
||||
cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
|
||||
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
|
||||
++vcpu->stat.nmi_window_exits;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The exit handlers return 1 if the exit was handled fully and guest execution
|
||||
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
||||
|
@ -2649,6 +2703,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
|
|||
[EXIT_REASON_EXCEPTION_NMI] = handle_exception,
|
||||
[EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
|
||||
[EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
|
||||
[EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
|
||||
[EXIT_REASON_IO_INSTRUCTION] = handle_io,
|
||||
[EXIT_REASON_CR_ACCESS] = handle_cr,
|
||||
[EXIT_REASON_DR_ACCESS] = handle_dr,
|
||||
|
@ -2736,17 +2791,52 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
|
|||
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
|
||||
}
|
||||
|
||||
static void enable_nmi_window(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 cpu_based_vm_exec_control;
|
||||
|
||||
if (!cpu_has_virtual_nmis())
|
||||
return;
|
||||
|
||||
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
|
||||
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
|
||||
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
|
||||
}
|
||||
|
||||
static int vmx_nmi_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
||||
return !(guest_intr & (GUEST_INTR_STATE_NMI |
|
||||
GUEST_INTR_STATE_MOV_SS |
|
||||
GUEST_INTR_STATE_STI));
|
||||
}
|
||||
|
||||
static int vmx_irq_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
||||
return (!(guest_intr & (GUEST_INTR_STATE_MOV_SS |
|
||||
GUEST_INTR_STATE_STI)) &&
|
||||
(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
|
||||
}
|
||||
|
||||
static void enable_intr_window(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.nmi_pending)
|
||||
enable_nmi_window(vcpu);
|
||||
else if (kvm_cpu_has_interrupt(vcpu))
|
||||
enable_irq_window(vcpu);
|
||||
}
|
||||
|
||||
static void vmx_intr_assist(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
u32 idtv_info_field, intr_info_field;
|
||||
int has_ext_irq, interrupt_window_open;
|
||||
u32 idtv_info_field, intr_info_field, exit_intr_info_field;
|
||||
int vector;
|
||||
|
||||
update_tpr_threshold(vcpu);
|
||||
|
||||
has_ext_irq = kvm_cpu_has_interrupt(vcpu);
|
||||
intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
|
||||
exit_intr_info_field = vmcs_read32(VM_EXIT_INTR_INFO);
|
||||
idtv_info_field = vmx->idt_vectoring_info;
|
||||
if (intr_info_field & INTR_INFO_VALID_MASK) {
|
||||
if (idtv_info_field & INTR_INFO_VALID_MASK) {
|
||||
|
@ -2754,8 +2844,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
|
|||
if (printk_ratelimit())
|
||||
printk(KERN_ERR "Fault when IDT_Vectoring\n");
|
||||
}
|
||||
if (has_ext_irq)
|
||||
enable_irq_window(vcpu);
|
||||
enable_intr_window(vcpu);
|
||||
return;
|
||||
}
|
||||
if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
|
||||
|
@ -2765,30 +2854,56 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
|
|||
u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
|
||||
|
||||
vmx_inject_irq(vcpu, vect);
|
||||
if (unlikely(has_ext_irq))
|
||||
enable_irq_window(vcpu);
|
||||
enable_intr_window(vcpu);
|
||||
return;
|
||||
}
|
||||
|
||||
KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler);
|
||||
|
||||
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
|
||||
/*
|
||||
* SDM 3: 25.7.1.2
|
||||
* Clear bit "block by NMI" before VM entry if a NMI delivery
|
||||
* faulted.
|
||||
*/
|
||||
if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
|
||||
== INTR_TYPE_NMI_INTR && cpu_has_virtual_nmis())
|
||||
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
|
||||
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
||||
~GUEST_INTR_STATE_NMI);
|
||||
|
||||
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field
|
||||
& ~INTR_INFO_RESVD_BITS_MASK);
|
||||
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
|
||||
vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
|
||||
|
||||
if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK))
|
||||
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
|
||||
vmcs_read32(IDT_VECTORING_ERROR_CODE));
|
||||
if (unlikely(has_ext_irq))
|
||||
enable_irq_window(vcpu);
|
||||
enable_intr_window(vcpu);
|
||||
return;
|
||||
}
|
||||
if (!has_ext_irq)
|
||||
if (cpu_has_virtual_nmis()) {
|
||||
/*
|
||||
* SDM 3: 25.7.1.2
|
||||
* Re-set bit "block by NMI" before VM entry if vmexit caused by
|
||||
* a guest IRET fault.
|
||||
*/
|
||||
if ((exit_intr_info_field & INTR_INFO_UNBLOCK_NMI) &&
|
||||
(exit_intr_info_field & INTR_INFO_VECTOR_MASK) != 8)
|
||||
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
|
||||
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) |
|
||||
GUEST_INTR_STATE_NMI);
|
||||
else if (vcpu->arch.nmi_pending) {
|
||||
if (vmx_nmi_enabled(vcpu))
|
||||
vmx_inject_nmi(vcpu);
|
||||
enable_intr_window(vcpu);
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
if (!kvm_cpu_has_interrupt(vcpu))
|
||||
return;
|
||||
interrupt_window_open =
|
||||
((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
|
||||
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
|
||||
if (interrupt_window_open) {
|
||||
if (vmx_irq_enabled(vcpu)) {
|
||||
vector = kvm_cpu_get_interrupt(vcpu);
|
||||
vmx_inject_irq(vcpu, vector);
|
||||
kvm_timer_intr_post(vcpu, vector);
|
||||
|
@ -2838,7 +2953,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
"push %%edx; push %%ebp;"
|
||||
"push %%ecx \n\t"
|
||||
#endif
|
||||
ASM_VMX_VMWRITE_RSP_RDX "\n\t"
|
||||
__ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
|
||||
/* Check if vmlaunch of vmresume is needed */
|
||||
"cmpl $0, %c[launched](%0) \n\t"
|
||||
/* Load guest registers. Don't clobber flags. */
|
||||
|
@ -2873,9 +2988,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
#endif
|
||||
/* Enter guest mode */
|
||||
"jne .Llaunched \n\t"
|
||||
ASM_VMX_VMLAUNCH "\n\t"
|
||||
__ex(ASM_VMX_VMLAUNCH) "\n\t"
|
||||
"jmp .Lkvm_vmx_return \n\t"
|
||||
".Llaunched: " ASM_VMX_VMRESUME "\n\t"
|
||||
".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
|
||||
".Lkvm_vmx_return: "
|
||||
/* Save guest registers, load host registers, keep flags */
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -2949,7 +3064,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
fixup_rmode_irq(vmx);
|
||||
|
||||
vcpu->arch.interrupt_window_open =
|
||||
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
|
||||
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
||||
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)) == 0;
|
||||
|
||||
asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
|
||||
vmx->launched = 1;
|
||||
|
@ -2957,7 +3073,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
||||
|
||||
/* We need to handle NMIs before interrupts are enabled */
|
||||
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
|
||||
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200 &&
|
||||
(intr_info & INTR_INFO_VALID_MASK)) {
|
||||
KVMTRACE_0D(NMI, vcpu, handler);
|
||||
asm("int $2");
|
||||
}
|
||||
|
@ -2968,7 +3085,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
|
|||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
if (vmx->vmcs) {
|
||||
on_each_cpu(__vcpu_clear, vmx, 1);
|
||||
vcpu_clear(vmx);
|
||||
free_vmcs(vmx->vmcs);
|
||||
vmx->vmcs = NULL;
|
||||
}
|
||||
|
@ -3095,7 +3212,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|||
.prepare_guest_switch = vmx_save_host_state,
|
||||
.vcpu_load = vmx_vcpu_load,
|
||||
.vcpu_put = vmx_vcpu_put,
|
||||
.vcpu_decache = vmx_vcpu_decache,
|
||||
|
||||
.set_guest_debug = set_guest_debug,
|
||||
.guest_debug_pre = kvm_guest_debug_pre,
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
|
||||
#define CPU_BASED_CR8_STORE_EXITING 0x00100000
|
||||
#define CPU_BASED_TPR_SHADOW 0x00200000
|
||||
#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
|
||||
#define CPU_BASED_MOV_DR_EXITING 0x00800000
|
||||
#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
|
||||
#define CPU_BASED_USE_IO_BITMAPS 0x02000000
|
||||
|
@ -216,7 +217,7 @@ enum vmcs_field {
|
|||
#define EXIT_REASON_TRIPLE_FAULT 2
|
||||
|
||||
#define EXIT_REASON_PENDING_INTERRUPT 7
|
||||
|
||||
#define EXIT_REASON_NMI_WINDOW 8
|
||||
#define EXIT_REASON_TASK_SWITCH 9
|
||||
#define EXIT_REASON_CPUID 10
|
||||
#define EXIT_REASON_HLT 12
|
||||
|
@ -251,7 +252,9 @@ enum vmcs_field {
|
|||
#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
|
||||
#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
|
||||
#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
|
||||
#define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */
|
||||
#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
|
||||
#define INTR_INFO_RESVD_BITS_MASK 0x7ffff000
|
||||
|
||||
#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
|
||||
#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
|
||||
|
@ -259,9 +262,16 @@ enum vmcs_field {
|
|||
#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
|
||||
|
||||
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
|
||||
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
|
||||
#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */
|
||||
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
|
||||
|
||||
/* GUEST_INTERRUPTIBILITY_INFO flags. */
|
||||
#define GUEST_INTR_STATE_STI 0x00000001
|
||||
#define GUEST_INTR_STATE_MOV_SS 0x00000002
|
||||
#define GUEST_INTR_STATE_SMI 0x00000004
|
||||
#define GUEST_INTR_STATE_NMI 0x00000008
|
||||
|
||||
/*
|
||||
* Exit Qualifications for MOV for Control Register Access
|
||||
*/
|
||||
|
|
|
@ -72,6 +72,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "mmio_exits", VCPU_STAT(mmio_exits) },
|
||||
{ "signal_exits", VCPU_STAT(signal_exits) },
|
||||
{ "irq_window", VCPU_STAT(irq_window_exits) },
|
||||
{ "nmi_window", VCPU_STAT(nmi_window_exits) },
|
||||
{ "halt_exits", VCPU_STAT(halt_exits) },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "hypercalls", VCPU_STAT(hypercalls) },
|
||||
|
@ -173,6 +174,12 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
|
|||
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
|
||||
}
|
||||
|
||||
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.nmi_pending = 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_inject_nmi);
|
||||
|
||||
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
|
||||
{
|
||||
WARN_ON(vcpu->arch.exception.pending);
|
||||
|
@ -604,6 +611,38 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
|
|||
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static bool msr_mtrr_valid(unsigned msr)
|
||||
{
|
||||
switch (msr) {
|
||||
case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
|
||||
case MSR_MTRRfix64K_00000:
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
case MSR_MTRRdefType:
|
||||
case MSR_IA32_CR_PAT:
|
||||
return true;
|
||||
case 0x2f8:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
{
|
||||
if (!msr_mtrr_valid(msr))
|
||||
return 1;
|
||||
|
||||
vcpu->arch.mtrr[msr - 0x200] = data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
{
|
||||
|
@ -625,8 +664,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|||
break;
|
||||
case MSR_IA32_UCODE_REV:
|
||||
case MSR_IA32_UCODE_WRITE:
|
||||
case 0x200 ... 0x2ff: /* MTRRs */
|
||||
break;
|
||||
case 0x200 ... 0x2ff:
|
||||
return set_msr_mtrr(vcpu, msr, data);
|
||||
case MSR_IA32_APICBASE:
|
||||
kvm_set_apic_base(vcpu, data);
|
||||
break;
|
||||
|
@ -684,6 +724,15 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
|
|||
return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
|
||||
}
|
||||
|
||||
static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
{
|
||||
if (!msr_mtrr_valid(msr))
|
||||
return 1;
|
||||
|
||||
*pdata = vcpu->arch.mtrr[msr - 0x200];
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
{
|
||||
u64 data;
|
||||
|
@ -705,11 +754,13 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|||
case MSR_IA32_MC0_MISC+16:
|
||||
case MSR_IA32_UCODE_REV:
|
||||
case MSR_IA32_EBL_CR_POWERON:
|
||||
/* MTRR registers */
|
||||
case 0xfe:
|
||||
case 0x200 ... 0x2ff:
|
||||
data = 0;
|
||||
break;
|
||||
case MSR_MTRRcap:
|
||||
data = 0x500 | KVM_NR_VAR_MTRR;
|
||||
break;
|
||||
case 0x200 ... 0x2ff:
|
||||
return get_msr_mtrr(vcpu, msr, pdata);
|
||||
case 0xcd: /* fsb frequency */
|
||||
data = 3;
|
||||
break;
|
||||
|
@ -817,41 +868,6 @@ out:
|
|||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that a cpu that is being hot-unplugged does not have any vcpus
|
||||
* cached on it.
|
||||
*/
|
||||
void decache_vcpus_on_cpu(int cpu)
|
||||
{
|
||||
struct kvm *vm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
list_for_each_entry(vm, &vm_list, vm_list)
|
||||
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
||||
vcpu = vm->vcpus[i];
|
||||
if (!vcpu)
|
||||
continue;
|
||||
/*
|
||||
* If the vcpu is locked, then it is running on some
|
||||
* other cpu and therefore it is not cached on the
|
||||
* cpu in question.
|
||||
*
|
||||
* If it's not locked, check the last cpu it executed
|
||||
* on.
|
||||
*/
|
||||
if (mutex_trylock(&vcpu->mutex)) {
|
||||
if (vcpu->cpu == cpu) {
|
||||
kvm_x86_ops->vcpu_decache(vcpu);
|
||||
vcpu->cpu = -1;
|
||||
}
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
}
|
||||
spin_unlock(&kvm_lock);
|
||||
}
|
||||
|
||||
int kvm_dev_ioctl_check_extension(long ext)
|
||||
{
|
||||
int r;
|
||||
|
@ -869,6 +885,9 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|||
case KVM_CAP_MP_STATE:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
||||
break;
|
||||
case KVM_CAP_VAPIC:
|
||||
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
|
||||
break;
|
||||
|
@ -1781,13 +1800,14 @@ static void kvm_init_msr_list(void)
|
|||
* Only apic need an MMIO device hook, so shortcut now..
|
||||
*/
|
||||
static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr)
|
||||
gpa_t addr, int len,
|
||||
int is_write)
|
||||
{
|
||||
struct kvm_io_device *dev;
|
||||
|
||||
if (vcpu->arch.apic) {
|
||||
dev = &vcpu->arch.apic->dev;
|
||||
if (dev->in_range(dev, addr))
|
||||
if (dev->in_range(dev, addr, len, is_write))
|
||||
return dev;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -1795,13 +1815,15 @@ static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
|
|||
|
||||
|
||||
static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr)
|
||||
gpa_t addr, int len,
|
||||
int is_write)
|
||||
{
|
||||
struct kvm_io_device *dev;
|
||||
|
||||
dev = vcpu_find_pervcpu_dev(vcpu, addr);
|
||||
dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
|
||||
if (dev == NULL)
|
||||
dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
|
||||
dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
|
||||
is_write);
|
||||
return dev;
|
||||
}
|
||||
|
||||
|
@ -1869,7 +1891,7 @@ mmio:
|
|||
* Is this MMIO handled locally?
|
||||
*/
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
|
||||
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
|
||||
if (mmio_dev) {
|
||||
kvm_iodevice_read(mmio_dev, gpa, bytes, val);
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
|
@ -1924,7 +1946,7 @@ mmio:
|
|||
* Is this MMIO handled locally?
|
||||
*/
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
|
||||
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
|
||||
if (mmio_dev) {
|
||||
kvm_iodevice_write(mmio_dev, gpa, bytes, val);
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
|
@ -2020,6 +2042,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
|
|||
|
||||
int emulate_clts(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
KVMTRACE_0D(CLTS, vcpu, handler);
|
||||
kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
@ -2053,21 +2076,19 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
|
|||
|
||||
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
|
||||
{
|
||||
static int reported;
|
||||
u8 opcodes[4];
|
||||
unsigned long rip = vcpu->arch.rip;
|
||||
unsigned long rip_linear;
|
||||
|
||||
rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
|
||||
|
||||
if (reported)
|
||||
if (!printk_ratelimit())
|
||||
return;
|
||||
|
||||
rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
|
||||
|
||||
emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
|
||||
|
||||
printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
|
||||
context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
|
||||
reported = 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
|
||||
|
||||
|
@ -2105,27 +2126,6 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
|
|||
? X86EMUL_MODE_PROT64 : cs_db
|
||||
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
|
||||
|
||||
if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
|
||||
vcpu->arch.emulate_ctxt.cs_base = 0;
|
||||
vcpu->arch.emulate_ctxt.ds_base = 0;
|
||||
vcpu->arch.emulate_ctxt.es_base = 0;
|
||||
vcpu->arch.emulate_ctxt.ss_base = 0;
|
||||
} else {
|
||||
vcpu->arch.emulate_ctxt.cs_base =
|
||||
get_segment_base(vcpu, VCPU_SREG_CS);
|
||||
vcpu->arch.emulate_ctxt.ds_base =
|
||||
get_segment_base(vcpu, VCPU_SREG_DS);
|
||||
vcpu->arch.emulate_ctxt.es_base =
|
||||
get_segment_base(vcpu, VCPU_SREG_ES);
|
||||
vcpu->arch.emulate_ctxt.ss_base =
|
||||
get_segment_base(vcpu, VCPU_SREG_SS);
|
||||
}
|
||||
|
||||
vcpu->arch.emulate_ctxt.gs_base =
|
||||
get_segment_base(vcpu, VCPU_SREG_GS);
|
||||
vcpu->arch.emulate_ctxt.fs_base =
|
||||
get_segment_base(vcpu, VCPU_SREG_FS);
|
||||
|
||||
r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
|
||||
|
||||
/* Reject the instructions other than VMCALL/VMMCALL when
|
||||
|
@ -2300,9 +2300,10 @@ static void pio_string_write(struct kvm_io_device *pio_dev,
|
|||
}
|
||||
|
||||
static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr)
|
||||
gpa_t addr, int len,
|
||||
int is_write)
|
||||
{
|
||||
return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
|
||||
return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
|
||||
}
|
||||
|
||||
int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
|
||||
|
@ -2331,11 +2332,10 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
|
|||
|
||||
kvm_x86_ops->cache_regs(vcpu);
|
||||
memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
|
||||
kvm_x86_ops->decache_regs(vcpu);
|
||||
|
||||
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
||||
|
||||
pio_dev = vcpu_find_pio_dev(vcpu, port);
|
||||
pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
|
||||
if (pio_dev) {
|
||||
kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
|
||||
complete_pio(vcpu);
|
||||
|
@ -2417,7 +2417,9 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
|
|||
}
|
||||
}
|
||||
|
||||
pio_dev = vcpu_find_pio_dev(vcpu, port);
|
||||
pio_dev = vcpu_find_pio_dev(vcpu, port,
|
||||
vcpu->arch.pio.cur_count,
|
||||
!vcpu->arch.pio.in);
|
||||
if (!vcpu->arch.pio.in) {
|
||||
/* string PIO write */
|
||||
ret = pio_copy_data(vcpu);
|
||||
|
@ -2600,27 +2602,41 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
|
|||
|
||||
unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
|
||||
{
|
||||
unsigned long value;
|
||||
|
||||
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
|
||||
switch (cr) {
|
||||
case 0:
|
||||
return vcpu->arch.cr0;
|
||||
value = vcpu->arch.cr0;
|
||||
break;
|
||||
case 2:
|
||||
return vcpu->arch.cr2;
|
||||
value = vcpu->arch.cr2;
|
||||
break;
|
||||
case 3:
|
||||
return vcpu->arch.cr3;
|
||||
value = vcpu->arch.cr3;
|
||||
break;
|
||||
case 4:
|
||||
return vcpu->arch.cr4;
|
||||
value = vcpu->arch.cr4;
|
||||
break;
|
||||
case 8:
|
||||
return kvm_get_cr8(vcpu);
|
||||
value = kvm_get_cr8(vcpu);
|
||||
break;
|
||||
default:
|
||||
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
|
||||
return 0;
|
||||
}
|
||||
KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
|
||||
(u32)((u64)value >> 32), handler);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
|
||||
unsigned long *rflags)
|
||||
{
|
||||
KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
|
||||
(u32)((u64)val >> 32), handler);
|
||||
|
||||
switch (cr) {
|
||||
case 0:
|
||||
kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
|
||||
|
@ -2771,8 +2787,10 @@ static void vapic_exit(struct kvm_vcpu *vcpu)
|
|||
if (!apic || !apic->vapic_addr)
|
||||
return;
|
||||
|
||||
down_read(&vcpu->kvm->slots_lock);
|
||||
kvm_release_page_dirty(apic->vapic_page);
|
||||
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
|
||||
up_read(&vcpu->kvm->slots_lock);
|
||||
}
|
||||
|
||||
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
|
@ -2928,9 +2946,7 @@ out:
|
|||
|
||||
post_kvm_run_save(vcpu, kvm_run);
|
||||
|
||||
down_read(&vcpu->kvm->slots_lock);
|
||||
vapic_exit(vcpu);
|
||||
up_read(&vcpu->kvm->slots_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -2942,15 +2958,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
|
||||
vcpu_load(vcpu);
|
||||
|
||||
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
|
||||
kvm_vcpu_block(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (vcpu->sigset_active)
|
||||
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
|
||||
|
||||
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
|
||||
kvm_vcpu_block(vcpu);
|
||||
r = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* re-sync apic's tpr */
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
kvm_set_cr8(vcpu, kvm_run->cr8);
|
||||
|
@ -3070,8 +3086,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void get_segment(struct kvm_vcpu *vcpu,
|
||||
struct kvm_segment *var, int seg)
|
||||
void kvm_get_segment(struct kvm_vcpu *vcpu,
|
||||
struct kvm_segment *var, int seg)
|
||||
{
|
||||
kvm_x86_ops->get_segment(vcpu, var, seg);
|
||||
}
|
||||
|
@ -3080,7 +3096,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
|
|||
{
|
||||
struct kvm_segment cs;
|
||||
|
||||
get_segment(vcpu, &cs, VCPU_SREG_CS);
|
||||
kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
|
||||
*db = cs.db;
|
||||
*l = cs.l;
|
||||
}
|
||||
|
@ -3094,15 +3110,15 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|||
|
||||
vcpu_load(vcpu);
|
||||
|
||||
get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
|
||||
get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
|
||||
get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
|
||||
get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
|
||||
get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
|
||||
get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
|
||||
kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
|
||||
kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
|
||||
kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
|
||||
kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
|
||||
kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
|
||||
kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
|
||||
|
||||
get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
|
||||
get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
|
||||
kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
|
||||
kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
|
||||
|
||||
kvm_x86_ops->get_idt(vcpu, &dt);
|
||||
sregs->idt.limit = dt.limit;
|
||||
|
@ -3154,7 +3170,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void set_segment(struct kvm_vcpu *vcpu,
|
||||
static void kvm_set_segment(struct kvm_vcpu *vcpu,
|
||||
struct kvm_segment *var, int seg)
|
||||
{
|
||||
kvm_x86_ops->set_segment(vcpu, var, seg);
|
||||
|
@ -3191,7 +3207,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
|
|||
if (selector & 1 << 2) {
|
||||
struct kvm_segment kvm_seg;
|
||||
|
||||
get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
|
||||
kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
|
||||
|
||||
if (kvm_seg.unusable)
|
||||
dtable->limit = 0;
|
||||
|
@ -3297,7 +3313,7 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
|
|||
{
|
||||
struct kvm_segment kvm_seg;
|
||||
|
||||
get_segment(vcpu, &kvm_seg, seg);
|
||||
kvm_get_segment(vcpu, &kvm_seg, seg);
|
||||
return kvm_seg.selector;
|
||||
}
|
||||
|
||||
|
@ -3313,8 +3329,8 @@ static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
||||
int type_bits, int seg)
|
||||
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
||||
int type_bits, int seg)
|
||||
{
|
||||
struct kvm_segment kvm_seg;
|
||||
|
||||
|
@ -3327,7 +3343,7 @@ static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
|||
if (!kvm_seg.s)
|
||||
kvm_seg.unusable = 1;
|
||||
|
||||
set_segment(vcpu, &kvm_seg, seg);
|
||||
kvm_set_segment(vcpu, &kvm_seg, seg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3373,25 +3389,25 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
|
||||
vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -3432,24 +3448,24 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
|
||||
vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
|
||||
return 1;
|
||||
|
||||
if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
|
||||
if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
|
||||
static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
|
||||
struct desc_struct *cseg_desc,
|
||||
struct desc_struct *nseg_desc)
|
||||
{
|
||||
|
@ -3472,7 +3488,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
|
||||
static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
|
||||
struct desc_struct *cseg_desc,
|
||||
struct desc_struct *nseg_desc)
|
||||
{
|
||||
|
@ -3502,7 +3518,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
|
|||
struct desc_struct nseg_desc;
|
||||
int ret = 0;
|
||||
|
||||
get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
|
||||
kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
|
||||
|
||||
if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
|
||||
goto out;
|
||||
|
@ -3561,7 +3577,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
|
|||
kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
|
||||
seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
|
||||
tr_seg.type = 11;
|
||||
set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
|
||||
kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
|
||||
out:
|
||||
kvm_x86_ops->decache_regs(vcpu);
|
||||
return ret;
|
||||
|
@ -3628,15 +3644,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
}
|
||||
|
||||
set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
|
||||
set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
|
||||
set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
|
||||
set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
|
||||
set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
|
||||
set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
|
||||
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
|
||||
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
|
||||
kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
|
||||
kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
|
||||
kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
|
||||
kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
|
||||
|
||||
set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
|
||||
set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
|
||||
kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
|
||||
kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
|
||||
|
||||
vcpu_put(vcpu);
|
||||
|
||||
|
@ -3751,14 +3767,14 @@ void fx_init(struct kvm_vcpu *vcpu)
|
|||
* allocate ram with GFP_KERNEL.
|
||||
*/
|
||||
if (!used_math())
|
||||
fx_save(&vcpu->arch.host_fx_image);
|
||||
kvm_fx_save(&vcpu->arch.host_fx_image);
|
||||
|
||||
/* Initialize guest FPU by resetting ours and saving into guest's */
|
||||
preempt_disable();
|
||||
fx_save(&vcpu->arch.host_fx_image);
|
||||
fx_finit();
|
||||
fx_save(&vcpu->arch.guest_fx_image);
|
||||
fx_restore(&vcpu->arch.host_fx_image);
|
||||
kvm_fx_save(&vcpu->arch.host_fx_image);
|
||||
kvm_fx_finit();
|
||||
kvm_fx_save(&vcpu->arch.guest_fx_image);
|
||||
kvm_fx_restore(&vcpu->arch.host_fx_image);
|
||||
preempt_enable();
|
||||
|
||||
vcpu->arch.cr0 |= X86_CR0_ET;
|
||||
|
@ -3775,8 +3791,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
return;
|
||||
|
||||
vcpu->guest_fpu_loaded = 1;
|
||||
fx_save(&vcpu->arch.host_fx_image);
|
||||
fx_restore(&vcpu->arch.guest_fx_image);
|
||||
kvm_fx_save(&vcpu->arch.host_fx_image);
|
||||
kvm_fx_restore(&vcpu->arch.guest_fx_image);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
|
||||
|
||||
|
@ -3786,8 +3802,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
return;
|
||||
|
||||
vcpu->guest_fpu_loaded = 0;
|
||||
fx_save(&vcpu->arch.guest_fx_image);
|
||||
fx_restore(&vcpu->arch.host_fx_image);
|
||||
kvm_fx_save(&vcpu->arch.guest_fx_image);
|
||||
kvm_fx_restore(&vcpu->arch.host_fx_image);
|
||||
++vcpu->stat.fpu_reload;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
|
||||
|
@ -4016,6 +4032,11 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||
{
|
||||
kvm_mmu_zap_all(kvm);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
|
||||
|
|
|
@ -121,7 +121,7 @@ static u16 opcode_table[256] = {
|
|||
0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
|
||||
0, 0, 0, 0,
|
||||
/* 0x68 - 0x6F */
|
||||
0, 0, ImplicitOps | Mov | Stack, 0,
|
||||
SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
|
||||
SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* insb, insw/insd */
|
||||
SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* outsb, outsw/outsd */
|
||||
/* 0x70 - 0x77 */
|
||||
|
@ -138,9 +138,11 @@ static u16 opcode_table[256] = {
|
|||
/* 0x88 - 0x8F */
|
||||
ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
|
||||
ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
|
||||
0, ModRM | DstReg, 0, Group | Group1A,
|
||||
/* 0x90 - 0x9F */
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
DstMem | SrcReg | ModRM | Mov, ModRM | DstReg,
|
||||
DstReg | SrcMem | ModRM | Mov, Group | Group1A,
|
||||
/* 0x90 - 0x97 */
|
||||
DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
|
||||
/* 0x98 - 0x9F */
|
||||
0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
|
||||
/* 0xA0 - 0xA7 */
|
||||
ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs,
|
||||
|
@ -152,7 +154,8 @@ static u16 opcode_table[256] = {
|
|||
ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
|
||||
ByteOp | ImplicitOps | String, ImplicitOps | String,
|
||||
/* 0xB0 - 0xBF */
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
DstReg | SrcImm | Mov, 0, 0, 0, 0, 0, 0, 0,
|
||||
/* 0xC0 - 0xC7 */
|
||||
ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
|
||||
0, ImplicitOps | Stack, 0, 0,
|
||||
|
@ -168,7 +171,8 @@ static u16 opcode_table[256] = {
|
|||
/* 0xE0 - 0xE7 */
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
/* 0xE8 - 0xEF */
|
||||
ImplicitOps | Stack, SrcImm|ImplicitOps, 0, SrcImmByte|ImplicitOps,
|
||||
ImplicitOps | Stack, SrcImm | ImplicitOps,
|
||||
ImplicitOps, SrcImmByte | ImplicitOps,
|
||||
0, 0, 0, 0,
|
||||
/* 0xF0 - 0xF7 */
|
||||
0, 0, 0, 0,
|
||||
|
@ -215,7 +219,7 @@ static u16 twobyte_table[256] = {
|
|||
/* 0xA0 - 0xA7 */
|
||||
0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0,
|
||||
/* 0xA8 - 0xAF */
|
||||
0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0,
|
||||
0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, ModRM, 0,
|
||||
/* 0xB0 - 0xB7 */
|
||||
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0,
|
||||
DstMem | SrcReg | ModRM | BitOp,
|
||||
|
@ -518,6 +522,39 @@ static inline void jmp_rel(struct decode_cache *c, int rel)
|
|||
register_address_increment(c, &c->eip, rel);
|
||||
}
|
||||
|
||||
static void set_seg_override(struct decode_cache *c, int seg)
|
||||
{
|
||||
c->has_seg_override = true;
|
||||
c->seg_override = seg;
|
||||
}
|
||||
|
||||
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
|
||||
{
|
||||
if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
|
||||
return 0;
|
||||
|
||||
return kvm_x86_ops->get_segment_base(ctxt->vcpu, seg);
|
||||
}
|
||||
|
||||
static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
|
||||
struct decode_cache *c)
|
||||
{
|
||||
if (!c->has_seg_override)
|
||||
return 0;
|
||||
|
||||
return seg_base(ctxt, c->seg_override);
|
||||
}
|
||||
|
||||
static unsigned long es_base(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return seg_base(ctxt, VCPU_SREG_ES);
|
||||
}
|
||||
|
||||
static unsigned long ss_base(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return seg_base(ctxt, VCPU_SREG_SS);
|
||||
}
|
||||
|
||||
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
|
||||
struct x86_emulate_ops *ops,
|
||||
unsigned long linear, u8 *dest)
|
||||
|
@ -660,7 +697,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
|
|||
{
|
||||
struct decode_cache *c = &ctxt->decode;
|
||||
u8 sib;
|
||||
int index_reg = 0, base_reg = 0, scale, rip_relative = 0;
|
||||
int index_reg = 0, base_reg = 0, scale;
|
||||
int rc = 0;
|
||||
|
||||
if (c->rex_prefix) {
|
||||
|
@ -731,47 +768,28 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
|
|||
}
|
||||
if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
|
||||
(c->modrm_rm == 6 && c->modrm_mod != 0))
|
||||
if (!c->override_base)
|
||||
c->override_base = &ctxt->ss_base;
|
||||
if (!c->has_seg_override)
|
||||
set_seg_override(c, VCPU_SREG_SS);
|
||||
c->modrm_ea = (u16)c->modrm_ea;
|
||||
} else {
|
||||
/* 32/64-bit ModR/M decode. */
|
||||
switch (c->modrm_rm) {
|
||||
case 4:
|
||||
case 12:
|
||||
if ((c->modrm_rm & 7) == 4) {
|
||||
sib = insn_fetch(u8, 1, c->eip);
|
||||
index_reg |= (sib >> 3) & 7;
|
||||
base_reg |= sib & 7;
|
||||
scale = sib >> 6;
|
||||
|
||||
switch (base_reg) {
|
||||
case 5:
|
||||
if (c->modrm_mod != 0)
|
||||
c->modrm_ea += c->regs[base_reg];
|
||||
else
|
||||
c->modrm_ea +=
|
||||
insn_fetch(s32, 4, c->eip);
|
||||
break;
|
||||
default:
|
||||
if ((base_reg & 7) == 5 && c->modrm_mod == 0)
|
||||
c->modrm_ea += insn_fetch(s32, 4, c->eip);
|
||||
else
|
||||
c->modrm_ea += c->regs[base_reg];
|
||||
}
|
||||
switch (index_reg) {
|
||||
case 4:
|
||||
break;
|
||||
default:
|
||||
if (index_reg != 4)
|
||||
c->modrm_ea += c->regs[index_reg] << scale;
|
||||
}
|
||||
break;
|
||||
case 5:
|
||||
if (c->modrm_mod != 0)
|
||||
c->modrm_ea += c->regs[c->modrm_rm];
|
||||
else if (ctxt->mode == X86EMUL_MODE_PROT64)
|
||||
rip_relative = 1;
|
||||
break;
|
||||
default:
|
||||
} else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
|
||||
if (ctxt->mode == X86EMUL_MODE_PROT64)
|
||||
c->rip_relative = 1;
|
||||
} else
|
||||
c->modrm_ea += c->regs[c->modrm_rm];
|
||||
break;
|
||||
}
|
||||
switch (c->modrm_mod) {
|
||||
case 0:
|
||||
if (c->modrm_rm == 5)
|
||||
|
@ -785,22 +803,6 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (rip_relative) {
|
||||
c->modrm_ea += c->eip;
|
||||
switch (c->d & SrcMask) {
|
||||
case SrcImmByte:
|
||||
c->modrm_ea += 1;
|
||||
break;
|
||||
case SrcImm:
|
||||
if (c->d & ByteOp)
|
||||
c->modrm_ea += 1;
|
||||
else
|
||||
if (c->op_bytes == 8)
|
||||
c->modrm_ea += 4;
|
||||
else
|
||||
c->modrm_ea += c->op_bytes;
|
||||
}
|
||||
}
|
||||
done:
|
||||
return rc;
|
||||
}
|
||||
|
@ -838,6 +840,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
|
|||
|
||||
memset(c, 0, sizeof(struct decode_cache));
|
||||
c->eip = ctxt->vcpu->arch.rip;
|
||||
ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
|
||||
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
|
||||
|
||||
switch (mode) {
|
||||
|
@ -876,23 +879,15 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
|
|||
/* switch between 2/4 bytes */
|
||||
c->ad_bytes = def_ad_bytes ^ 6;
|
||||
break;
|
||||
case 0x2e: /* CS override */
|
||||
c->override_base = &ctxt->cs_base;
|
||||
break;
|
||||
case 0x3e: /* DS override */
|
||||
c->override_base = &ctxt->ds_base;
|
||||
break;
|
||||
case 0x26: /* ES override */
|
||||
c->override_base = &ctxt->es_base;
|
||||
case 0x2e: /* CS override */
|
||||
case 0x36: /* SS override */
|
||||
case 0x3e: /* DS override */
|
||||
set_seg_override(c, (c->b >> 3) & 3);
|
||||
break;
|
||||
case 0x64: /* FS override */
|
||||
c->override_base = &ctxt->fs_base;
|
||||
break;
|
||||
case 0x65: /* GS override */
|
||||
c->override_base = &ctxt->gs_base;
|
||||
break;
|
||||
case 0x36: /* SS override */
|
||||
c->override_base = &ctxt->ss_base;
|
||||
set_seg_override(c, c->b & 7);
|
||||
break;
|
||||
case 0x40 ... 0x4f: /* REX */
|
||||
if (mode != X86EMUL_MODE_PROT64)
|
||||
|
@ -964,15 +959,11 @@ done_prefixes:
|
|||
if (rc)
|
||||
goto done;
|
||||
|
||||
if (!c->override_base)
|
||||
c->override_base = &ctxt->ds_base;
|
||||
if (mode == X86EMUL_MODE_PROT64 &&
|
||||
c->override_base != &ctxt->fs_base &&
|
||||
c->override_base != &ctxt->gs_base)
|
||||
c->override_base = NULL;
|
||||
if (!c->has_seg_override)
|
||||
set_seg_override(c, VCPU_SREG_DS);
|
||||
|
||||
if (c->override_base)
|
||||
c->modrm_ea += *c->override_base;
|
||||
if (!(!c->twobyte && c->b == 0x8d))
|
||||
c->modrm_ea += seg_override_base(ctxt, c);
|
||||
|
||||
if (c->ad_bytes != 8)
|
||||
c->modrm_ea = (u32)c->modrm_ea;
|
||||
|
@ -1049,6 +1040,7 @@ done_prefixes:
|
|||
break;
|
||||
case DstMem:
|
||||
if ((c->d & ModRM) && c->modrm_mod == 3) {
|
||||
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
||||
c->dst.type = OP_REG;
|
||||
c->dst.val = c->dst.orig_val = c->modrm_val;
|
||||
c->dst.ptr = c->modrm_ptr;
|
||||
|
@ -1058,6 +1050,9 @@ done_prefixes:
|
|||
break;
|
||||
}
|
||||
|
||||
if (c->rip_relative)
|
||||
c->modrm_ea += c->eip;
|
||||
|
||||
done:
|
||||
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
|
||||
}
|
||||
|
@ -1070,7 +1065,7 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
|
|||
c->dst.bytes = c->op_bytes;
|
||||
c->dst.val = c->src.val;
|
||||
register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
|
||||
c->dst.ptr = (void *) register_address(c, ctxt->ss_base,
|
||||
c->dst.ptr = (void *) register_address(c, ss_base(ctxt),
|
||||
c->regs[VCPU_REGS_RSP]);
|
||||
}
|
||||
|
||||
|
@ -1080,7 +1075,7 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
|
|||
struct decode_cache *c = &ctxt->decode;
|
||||
int rc;
|
||||
|
||||
rc = ops->read_std(register_address(c, ctxt->ss_base,
|
||||
rc = ops->read_std(register_address(c, ss_base(ctxt),
|
||||
c->regs[VCPU_REGS_RSP]),
|
||||
&c->dst.val, c->dst.bytes, ctxt->vcpu);
|
||||
if (rc != 0)
|
||||
|
@ -1402,11 +1397,11 @@ special_insn:
|
|||
register_address_increment(c, &c->regs[VCPU_REGS_RSP],
|
||||
-c->op_bytes);
|
||||
c->dst.ptr = (void *) register_address(
|
||||
c, ctxt->ss_base, c->regs[VCPU_REGS_RSP]);
|
||||
c, ss_base(ctxt), c->regs[VCPU_REGS_RSP]);
|
||||
break;
|
||||
case 0x58 ... 0x5f: /* pop reg */
|
||||
pop_instruction:
|
||||
if ((rc = ops->read_std(register_address(c, ctxt->ss_base,
|
||||
if ((rc = ops->read_std(register_address(c, ss_base(ctxt),
|
||||
c->regs[VCPU_REGS_RSP]), c->dst.ptr,
|
||||
c->op_bytes, ctxt->vcpu)) != 0)
|
||||
goto done;
|
||||
|
@ -1420,9 +1415,8 @@ special_insn:
|
|||
goto cannot_emulate;
|
||||
c->dst.val = (s32) c->src.val;
|
||||
break;
|
||||
case 0x68: /* push imm */
|
||||
case 0x6a: /* push imm8 */
|
||||
c->src.val = 0L;
|
||||
c->src.val = insn_fetch(s8, 1, c->eip);
|
||||
emulate_push(ctxt);
|
||||
break;
|
||||
case 0x6c: /* insb */
|
||||
|
@ -1433,7 +1427,7 @@ special_insn:
|
|||
c->rep_prefix ?
|
||||
address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
|
||||
(ctxt->eflags & EFLG_DF),
|
||||
register_address(c, ctxt->es_base,
|
||||
register_address(c, es_base(ctxt),
|
||||
c->regs[VCPU_REGS_RDI]),
|
||||
c->rep_prefix,
|
||||
c->regs[VCPU_REGS_RDX]) == 0) {
|
||||
|
@ -1449,9 +1443,8 @@ special_insn:
|
|||
c->rep_prefix ?
|
||||
address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
|
||||
(ctxt->eflags & EFLG_DF),
|
||||
register_address(c, c->override_base ?
|
||||
*c->override_base :
|
||||
ctxt->ds_base,
|
||||
register_address(c,
|
||||
seg_override_base(ctxt, c),
|
||||
c->regs[VCPU_REGS_RSI]),
|
||||
c->rep_prefix,
|
||||
c->regs[VCPU_REGS_RDX]) == 0) {
|
||||
|
@ -1490,6 +1483,7 @@ special_insn:
|
|||
emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
|
||||
break;
|
||||
case 0x86 ... 0x87: /* xchg */
|
||||
xchg:
|
||||
/* Write back the register source. */
|
||||
switch (c->dst.bytes) {
|
||||
case 1:
|
||||
|
@ -1514,14 +1508,60 @@ special_insn:
|
|||
break;
|
||||
case 0x88 ... 0x8b: /* mov */
|
||||
goto mov;
|
||||
case 0x8c: { /* mov r/m, sreg */
|
||||
struct kvm_segment segreg;
|
||||
|
||||
if (c->modrm_reg <= 5)
|
||||
kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg);
|
||||
else {
|
||||
printk(KERN_INFO "0x8c: Invalid segreg in modrm byte 0x%02x\n",
|
||||
c->modrm);
|
||||
goto cannot_emulate;
|
||||
}
|
||||
c->dst.val = segreg.selector;
|
||||
break;
|
||||
}
|
||||
case 0x8d: /* lea r16/r32, m */
|
||||
c->dst.val = c->modrm_ea;
|
||||
break;
|
||||
case 0x8e: { /* mov seg, r/m16 */
|
||||
uint16_t sel;
|
||||
int type_bits;
|
||||
int err;
|
||||
|
||||
sel = c->src.val;
|
||||
if (c->modrm_reg <= 5) {
|
||||
type_bits = (c->modrm_reg == 1) ? 9 : 1;
|
||||
err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
|
||||
type_bits, c->modrm_reg);
|
||||
} else {
|
||||
printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n",
|
||||
c->modrm);
|
||||
goto cannot_emulate;
|
||||
}
|
||||
|
||||
if (err < 0)
|
||||
goto cannot_emulate;
|
||||
|
||||
c->dst.type = OP_NONE; /* Disable writeback. */
|
||||
break;
|
||||
}
|
||||
case 0x8f: /* pop (sole member of Grp1a) */
|
||||
rc = emulate_grp1a(ctxt, ops);
|
||||
if (rc != 0)
|
||||
goto done;
|
||||
break;
|
||||
case 0x90: /* nop / xchg r8,rax */
|
||||
if (!(c->rex_prefix & 1)) { /* nop */
|
||||
c->dst.type = OP_NONE;
|
||||
break;
|
||||
}
|
||||
case 0x91 ... 0x97: /* xchg reg,rax */
|
||||
c->src.type = c->dst.type = OP_REG;
|
||||
c->src.bytes = c->dst.bytes = c->op_bytes;
|
||||
c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX];
|
||||
c->src.val = *(c->src.ptr);
|
||||
goto xchg;
|
||||
case 0x9c: /* pushf */
|
||||
c->src.val = (unsigned long) ctxt->eflags;
|
||||
emulate_push(ctxt);
|
||||
|
@ -1540,11 +1580,10 @@ special_insn:
|
|||
c->dst.type = OP_MEM;
|
||||
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
||||
c->dst.ptr = (unsigned long *)register_address(c,
|
||||
ctxt->es_base,
|
||||
es_base(ctxt),
|
||||
c->regs[VCPU_REGS_RDI]);
|
||||
if ((rc = ops->read_emulated(register_address(c,
|
||||
c->override_base ? *c->override_base :
|
||||
ctxt->ds_base,
|
||||
seg_override_base(ctxt, c),
|
||||
c->regs[VCPU_REGS_RSI]),
|
||||
&c->dst.val,
|
||||
c->dst.bytes, ctxt->vcpu)) != 0)
|
||||
|
@ -1560,8 +1599,7 @@ special_insn:
|
|||
c->src.type = OP_NONE; /* Disable writeback. */
|
||||
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
||||
c->src.ptr = (unsigned long *)register_address(c,
|
||||
c->override_base ? *c->override_base :
|
||||
ctxt->ds_base,
|
||||
seg_override_base(ctxt, c),
|
||||
c->regs[VCPU_REGS_RSI]);
|
||||
if ((rc = ops->read_emulated((unsigned long)c->src.ptr,
|
||||
&c->src.val,
|
||||
|
@ -1572,7 +1610,7 @@ special_insn:
|
|||
c->dst.type = OP_NONE; /* Disable writeback. */
|
||||
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
||||
c->dst.ptr = (unsigned long *)register_address(c,
|
||||
ctxt->es_base,
|
||||
es_base(ctxt),
|
||||
c->regs[VCPU_REGS_RDI]);
|
||||
if ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
|
||||
&c->dst.val,
|
||||
|
@ -1596,7 +1634,7 @@ special_insn:
|
|||
c->dst.type = OP_MEM;
|
||||
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
||||
c->dst.ptr = (unsigned long *)register_address(c,
|
||||
ctxt->es_base,
|
||||
es_base(ctxt),
|
||||
c->regs[VCPU_REGS_RDI]);
|
||||
c->dst.val = c->regs[VCPU_REGS_RAX];
|
||||
register_address_increment(c, &c->regs[VCPU_REGS_RDI],
|
||||
|
@ -1608,8 +1646,7 @@ special_insn:
|
|||
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
||||
c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
|
||||
if ((rc = ops->read_emulated(register_address(c,
|
||||
c->override_base ? *c->override_base :
|
||||
ctxt->ds_base,
|
||||
seg_override_base(ctxt, c),
|
||||
c->regs[VCPU_REGS_RSI]),
|
||||
&c->dst.val,
|
||||
c->dst.bytes,
|
||||
|
@ -1622,6 +1659,8 @@ special_insn:
|
|||
case 0xae ... 0xaf: /* scas */
|
||||
DPRINTF("Urk! I don't handle SCAS.\n");
|
||||
goto cannot_emulate;
|
||||
case 0xb8: /* mov r, imm */
|
||||
goto mov;
|
||||
case 0xc0 ... 0xc1:
|
||||
emulate_grp2(ctxt);
|
||||
break;
|
||||
|
@ -1660,13 +1699,39 @@ special_insn:
|
|||
break;
|
||||
}
|
||||
case 0xe9: /* jmp rel */
|
||||
case 0xeb: /* jmp rel short */
|
||||
goto jmp;
|
||||
case 0xea: /* jmp far */ {
|
||||
uint32_t eip;
|
||||
uint16_t sel;
|
||||
|
||||
switch (c->op_bytes) {
|
||||
case 2:
|
||||
eip = insn_fetch(u16, 2, c->eip);
|
||||
break;
|
||||
case 4:
|
||||
eip = insn_fetch(u32, 4, c->eip);
|
||||
break;
|
||||
default:
|
||||
DPRINTF("jmp far: Invalid op_bytes\n");
|
||||
goto cannot_emulate;
|
||||
}
|
||||
sel = insn_fetch(u16, 2, c->eip);
|
||||
if (kvm_load_segment_descriptor(ctxt->vcpu, sel, 9, VCPU_SREG_CS) < 0) {
|
||||
DPRINTF("jmp far: Failed to load CS descriptor\n");
|
||||
goto cannot_emulate;
|
||||
}
|
||||
|
||||
c->eip = eip;
|
||||
break;
|
||||
}
|
||||
case 0xeb:
|
||||
jmp: /* jmp rel short */
|
||||
jmp_rel(c, c->src.val);
|
||||
c->dst.type = OP_NONE; /* Disable writeback. */
|
||||
break;
|
||||
case 0xf4: /* hlt */
|
||||
ctxt->vcpu->arch.halt_request = 1;
|
||||
goto done;
|
||||
break;
|
||||
case 0xf5: /* cmc */
|
||||
/* complement carry flag from eflags reg */
|
||||
ctxt->eflags ^= EFLG_CF;
|
||||
|
@ -1882,6 +1947,8 @@ twobyte_insn:
|
|||
c->src.val &= (c->dst.bytes << 3) - 1;
|
||||
emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
|
||||
break;
|
||||
case 0xae: /* clflush */
|
||||
break;
|
||||
case 0xb0 ... 0xb1: /* cmpxchg */
|
||||
/*
|
||||
* Save real source value, then compare EAX against
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
/* memory slots that does not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
||||
/* define exit reasons from vmm to kvm*/
|
||||
#define EXIT_REASON_VM_PANIC 0
|
||||
|
@ -521,4 +522,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
|
|||
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
|
||||
void kvm_sal_emul(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
/* memory slots that does not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
||||
/* We don't currently support large pages. */
|
||||
#define KVM_PAGES_PER_HPAGE (1<<31)
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ struct sca_block {
|
|||
#define CPUSTAT_J 0x00000002
|
||||
#define CPUSTAT_P 0x00000001
|
||||
|
||||
struct sie_block {
|
||||
struct kvm_s390_sie_block {
|
||||
atomic_t cpuflags; /* 0x0000 */
|
||||
__u32 prefix; /* 0x0004 */
|
||||
__u8 reserved8[32]; /* 0x0008 */
|
||||
|
@ -140,14 +140,14 @@ struct kvm_vcpu_stat {
|
|||
u32 diagnose_44;
|
||||
};
|
||||
|
||||
struct io_info {
|
||||
struct kvm_s390_io_info {
|
||||
__u16 subchannel_id; /* 0x0b8 */
|
||||
__u16 subchannel_nr; /* 0x0ba */
|
||||
__u32 io_int_parm; /* 0x0bc */
|
||||
__u32 io_int_word; /* 0x0c0 */
|
||||
};
|
||||
|
||||
struct ext_info {
|
||||
struct kvm_s390_ext_info {
|
||||
__u32 ext_params;
|
||||
__u64 ext_params2;
|
||||
};
|
||||
|
@ -160,22 +160,22 @@ struct ext_info {
|
|||
#define PGM_SPECIFICATION 0x06
|
||||
#define PGM_DATA 0x07
|
||||
|
||||
struct pgm_info {
|
||||
struct kvm_s390_pgm_info {
|
||||
__u16 code;
|
||||
};
|
||||
|
||||
struct prefix_info {
|
||||
struct kvm_s390_prefix_info {
|
||||
__u32 address;
|
||||
};
|
||||
|
||||
struct interrupt_info {
|
||||
struct kvm_s390_interrupt_info {
|
||||
struct list_head list;
|
||||
u64 type;
|
||||
union {
|
||||
struct io_info io;
|
||||
struct ext_info ext;
|
||||
struct pgm_info pgm;
|
||||
struct prefix_info prefix;
|
||||
struct kvm_s390_io_info io;
|
||||
struct kvm_s390_ext_info ext;
|
||||
struct kvm_s390_pgm_info pgm;
|
||||
struct kvm_s390_prefix_info prefix;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -183,35 +183,35 @@ struct interrupt_info {
|
|||
#define ACTION_STORE_ON_STOP 1
|
||||
#define ACTION_STOP_ON_STOP 2
|
||||
|
||||
struct local_interrupt {
|
||||
struct kvm_s390_local_interrupt {
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
atomic_t active;
|
||||
struct float_interrupt *float_int;
|
||||
struct kvm_s390_float_interrupt *float_int;
|
||||
int timer_due; /* event indicator for waitqueue below */
|
||||
wait_queue_head_t wq;
|
||||
atomic_t *cpuflags;
|
||||
unsigned int action_bits;
|
||||
};
|
||||
|
||||
struct float_interrupt {
|
||||
struct kvm_s390_float_interrupt {
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
atomic_t active;
|
||||
int next_rr_cpu;
|
||||
unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)];
|
||||
struct local_interrupt *local_int[64];
|
||||
struct kvm_s390_local_interrupt *local_int[64];
|
||||
};
|
||||
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
struct sie_block *sie_block;
|
||||
struct kvm_s390_sie_block *sie_block;
|
||||
unsigned long guest_gprs[16];
|
||||
s390_fp_regs host_fpregs;
|
||||
unsigned int host_acrs[NUM_ACRS];
|
||||
s390_fp_regs guest_fpregs;
|
||||
unsigned int guest_acrs[NUM_ACRS];
|
||||
struct local_interrupt local_int;
|
||||
struct kvm_s390_local_interrupt local_int;
|
||||
struct timer_list ckc_timer;
|
||||
union {
|
||||
cpuid_t cpu_id;
|
||||
|
@ -228,8 +228,8 @@ struct kvm_arch{
|
|||
unsigned long guest_memsize;
|
||||
struct sca_block *sca;
|
||||
debug_info_t *dbf;
|
||||
struct float_interrupt float_int;
|
||||
struct kvm_s390_float_interrupt float_int;
|
||||
};
|
||||
|
||||
extern int sie64a(struct sie_block *, __u64 *);
|
||||
extern int sie64a(struct kvm_s390_sie_block *, __u64 *);
|
||||
#endif
|
||||
|
|
|
@ -228,5 +228,6 @@ struct kvm_pit_state {
|
|||
#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
|
||||
#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
|
||||
#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
|
||||
#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
|
||||
#define KVM_PIO_PAGE_OFFSET 1
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
|
||||
|
||||
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
|
||||
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
|
||||
|
@ -79,6 +80,7 @@
|
|||
#define KVM_MIN_FREE_MMU_PAGES 5
|
||||
#define KVM_REFILL_PAGES 25
|
||||
#define KVM_MAX_CPUID_ENTRIES 40
|
||||
#define KVM_NR_VAR_MTRR 8
|
||||
|
||||
extern spinlock_t kvm_lock;
|
||||
extern struct list_head vm_list;
|
||||
|
@ -109,12 +111,12 @@ enum {
|
|||
};
|
||||
|
||||
enum {
|
||||
VCPU_SREG_CS,
|
||||
VCPU_SREG_DS,
|
||||
VCPU_SREG_ES,
|
||||
VCPU_SREG_CS,
|
||||
VCPU_SREG_SS,
|
||||
VCPU_SREG_DS,
|
||||
VCPU_SREG_FS,
|
||||
VCPU_SREG_GS,
|
||||
VCPU_SREG_SS,
|
||||
VCPU_SREG_TR,
|
||||
VCPU_SREG_LDTR,
|
||||
};
|
||||
|
@ -243,6 +245,7 @@ struct kvm_vcpu_arch {
|
|||
gfn_t last_pt_write_gfn;
|
||||
int last_pt_write_count;
|
||||
u64 *last_pte_updated;
|
||||
gfn_t last_pte_gfn;
|
||||
|
||||
struct {
|
||||
gfn_t gfn; /* presumed gfn during guest pte update */
|
||||
|
@ -287,6 +290,10 @@ struct kvm_vcpu_arch {
|
|||
unsigned int hv_clock_tsc_khz;
|
||||
unsigned int time_offset;
|
||||
struct page *time_page;
|
||||
|
||||
bool nmi_pending;
|
||||
|
||||
u64 mtrr[0x100];
|
||||
};
|
||||
|
||||
struct kvm_mem_alias {
|
||||
|
@ -344,6 +351,7 @@ struct kvm_vcpu_stat {
|
|||
u32 mmio_exits;
|
||||
u32 signal_exits;
|
||||
u32 irq_window_exits;
|
||||
u32 nmi_window_exits;
|
||||
u32 halt_exits;
|
||||
u32 halt_wakeup;
|
||||
u32 request_irq_exits;
|
||||
|
@ -379,7 +387,6 @@ struct kvm_x86_ops {
|
|||
void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
|
||||
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
|
||||
void (*vcpu_put)(struct kvm_vcpu *vcpu);
|
||||
void (*vcpu_decache)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*set_guest_debug)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_debug_guest *dbg);
|
||||
|
@ -497,6 +504,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
|
|||
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
|
||||
unsigned long value);
|
||||
|
||||
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
||||
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
||||
int type_bits, int seg);
|
||||
|
||||
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
|
||||
|
||||
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||
|
@ -515,6 +526,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
|
|||
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
|
||||
u32 error_code);
|
||||
|
||||
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
|
||||
|
||||
void fx_init(struct kvm_vcpu *vcpu);
|
||||
|
||||
int emulator_read_std(unsigned long addr,
|
||||
|
@ -554,55 +567,53 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
|
|||
return (struct kvm_mmu_page *)page_private(page);
|
||||
}
|
||||
|
||||
static inline u16 read_fs(void)
|
||||
static inline u16 kvm_read_fs(void)
|
||||
{
|
||||
u16 seg;
|
||||
asm("mov %%fs, %0" : "=g"(seg));
|
||||
return seg;
|
||||
}
|
||||
|
||||
static inline u16 read_gs(void)
|
||||
static inline u16 kvm_read_gs(void)
|
||||
{
|
||||
u16 seg;
|
||||
asm("mov %%gs, %0" : "=g"(seg));
|
||||
return seg;
|
||||
}
|
||||
|
||||
static inline u16 read_ldt(void)
|
||||
static inline u16 kvm_read_ldt(void)
|
||||
{
|
||||
u16 ldt;
|
||||
asm("sldt %0" : "=g"(ldt));
|
||||
return ldt;
|
||||
}
|
||||
|
||||
static inline void load_fs(u16 sel)
|
||||
static inline void kvm_load_fs(u16 sel)
|
||||
{
|
||||
asm("mov %0, %%fs" : : "rm"(sel));
|
||||
}
|
||||
|
||||
static inline void load_gs(u16 sel)
|
||||
static inline void kvm_load_gs(u16 sel)
|
||||
{
|
||||
asm("mov %0, %%gs" : : "rm"(sel));
|
||||
}
|
||||
|
||||
#ifndef load_ldt
|
||||
static inline void load_ldt(u16 sel)
|
||||
static inline void kvm_load_ldt(u16 sel)
|
||||
{
|
||||
asm("lldt %0" : : "rm"(sel));
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void get_idt(struct descriptor_table *table)
|
||||
static inline void kvm_get_idt(struct descriptor_table *table)
|
||||
{
|
||||
asm("sidt %0" : "=m"(*table));
|
||||
}
|
||||
|
||||
static inline void get_gdt(struct descriptor_table *table)
|
||||
static inline void kvm_get_gdt(struct descriptor_table *table)
|
||||
{
|
||||
asm("sgdt %0" : "=m"(*table));
|
||||
}
|
||||
|
||||
static inline unsigned long read_tr_base(void)
|
||||
static inline unsigned long kvm_read_tr_base(void)
|
||||
{
|
||||
u16 tr;
|
||||
asm("str %0" : "=g"(tr));
|
||||
|
@ -619,17 +630,17 @@ static inline unsigned long read_msr(unsigned long msr)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void fx_save(struct i387_fxsave_struct *image)
|
||||
static inline void kvm_fx_save(struct i387_fxsave_struct *image)
|
||||
{
|
||||
asm("fxsave (%0)":: "r" (image));
|
||||
}
|
||||
|
||||
static inline void fx_restore(struct i387_fxsave_struct *image)
|
||||
static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
|
||||
{
|
||||
asm("fxrstor (%0)":: "r" (image));
|
||||
}
|
||||
|
||||
static inline void fx_finit(void)
|
||||
static inline void kvm_fx_finit(void)
|
||||
{
|
||||
asm("finit");
|
||||
}
|
||||
|
@ -691,4 +702,28 @@ enum {
|
|||
trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
|
||||
vcpu, 0, 0, 0, 0, 0, 0)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define KVM_EX_ENTRY ".quad"
|
||||
#else
|
||||
#define KVM_EX_ENTRY ".long"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Hardware virtualization extension instructions may fault if a
|
||||
* reboot turns off virtualization while processes are running.
|
||||
* Trap the fault and ignore the instruction if that happens.
|
||||
*/
|
||||
asmlinkage void kvm_handle_fault_on_reboot(void);
|
||||
|
||||
#define __kvm_handle_fault_on_reboot(insn) \
|
||||
"666: " insn "\n\t" \
|
||||
".pushsection .text.fixup, \"ax\" \n" \
|
||||
"667: \n\t" \
|
||||
"push $666b \n\t" \
|
||||
"jmp kvm_handle_fault_on_reboot \n\t" \
|
||||
".popsection \n\t" \
|
||||
".pushsection __ex_table, \"a\" \n\t" \
|
||||
KVM_EX_ENTRY " 666b, 667b \n\t" \
|
||||
".popsection"
|
||||
|
||||
#endif
|
||||
|
|
|
@ -124,7 +124,8 @@ struct decode_cache {
|
|||
u8 rex_prefix;
|
||||
struct operand src;
|
||||
struct operand dst;
|
||||
unsigned long *override_base;
|
||||
bool has_seg_override;
|
||||
u8 seg_override;
|
||||
unsigned int d;
|
||||
unsigned long regs[NR_VCPU_REGS];
|
||||
unsigned long eip;
|
||||
|
@ -134,6 +135,7 @@ struct decode_cache {
|
|||
u8 modrm_reg;
|
||||
u8 modrm_rm;
|
||||
u8 use_modrm_ea;
|
||||
bool rip_relative;
|
||||
unsigned long modrm_ea;
|
||||
void *modrm_ptr;
|
||||
unsigned long modrm_val;
|
||||
|
@ -150,12 +152,7 @@ struct x86_emulate_ctxt {
|
|||
/* Emulated execution mode, represented by an X86EMUL_MODE value. */
|
||||
int mode;
|
||||
|
||||
unsigned long cs_base;
|
||||
unsigned long ds_base;
|
||||
unsigned long es_base;
|
||||
unsigned long ss_base;
|
||||
unsigned long gs_base;
|
||||
unsigned long fs_base;
|
||||
u32 cs_base;
|
||||
|
||||
/* decode cache */
|
||||
|
||||
|
|
|
@ -173,6 +173,30 @@ struct kvm_run {
|
|||
};
|
||||
};
|
||||
|
||||
/* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */
|
||||
|
||||
struct kvm_coalesced_mmio_zone {
|
||||
__u64 addr;
|
||||
__u32 size;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct kvm_coalesced_mmio {
|
||||
__u64 phys_addr;
|
||||
__u32 len;
|
||||
__u32 pad;
|
||||
__u8 data[8];
|
||||
};
|
||||
|
||||
struct kvm_coalesced_mmio_ring {
|
||||
__u32 first, last;
|
||||
struct kvm_coalesced_mmio coalesced_mmio[0];
|
||||
};
|
||||
|
||||
#define KVM_COALESCED_MMIO_MAX \
|
||||
((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \
|
||||
sizeof(struct kvm_coalesced_mmio))
|
||||
|
||||
/* for KVM_TRANSLATE */
|
||||
struct kvm_translation {
|
||||
/* in */
|
||||
|
@ -294,14 +318,14 @@ struct kvm_trace_rec {
|
|||
__u32 vcpu_id;
|
||||
union {
|
||||
struct {
|
||||
__u32 cycle_lo, cycle_hi;
|
||||
__u64 cycle_u64;
|
||||
__u32 extra_u32[KVM_TRC_EXTRA_MAX];
|
||||
} cycle;
|
||||
struct {
|
||||
__u32 extra_u32[KVM_TRC_EXTRA_MAX];
|
||||
} nocycle;
|
||||
} u;
|
||||
};
|
||||
} __attribute__((packed));
|
||||
|
||||
#define KVMIO 0xAE
|
||||
|
||||
|
@ -346,6 +370,7 @@ struct kvm_trace_rec {
|
|||
#define KVM_CAP_NOP_IO_DELAY 12
|
||||
#define KVM_CAP_PV_MMU 13
|
||||
#define KVM_CAP_MP_STATE 14
|
||||
#define KVM_CAP_COALESCED_MMIO 15
|
||||
|
||||
/*
|
||||
* ioctls for VM fds
|
||||
|
@ -371,6 +396,10 @@ struct kvm_trace_rec {
|
|||
#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
|
||||
#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
|
||||
#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
|
||||
#define KVM_REGISTER_COALESCED_MMIO \
|
||||
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
|
||||
#define KVM_UNREGISTER_COALESCED_MMIO \
|
||||
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
|
||||
|
||||
/*
|
||||
* ioctls for vcpu fds
|
||||
|
|
|
@ -52,7 +52,8 @@ struct kvm_io_bus {
|
|||
|
||||
void kvm_io_bus_init(struct kvm_io_bus *bus);
|
||||
void kvm_io_bus_destroy(struct kvm_io_bus *bus);
|
||||
struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
|
||||
struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
|
||||
gpa_t addr, int len, int is_write);
|
||||
void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
|
||||
struct kvm_io_device *dev);
|
||||
|
||||
|
@ -116,6 +117,10 @@ struct kvm {
|
|||
struct kvm_vm_stat stat;
|
||||
struct kvm_arch arch;
|
||||
atomic_t users_count;
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
|
||||
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* The guest did something we don't support. */
|
||||
|
@ -135,9 +140,6 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
|
|||
void vcpu_load(struct kvm_vcpu *vcpu);
|
||||
void vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
void decache_vcpus_on_cpu(int cpu);
|
||||
|
||||
|
||||
int kvm_init(void *opaque, unsigned int vcpu_size,
|
||||
struct module *module);
|
||||
void kvm_exit(void);
|
||||
|
@ -166,6 +168,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
|||
struct kvm_userspace_memory_region *mem,
|
||||
struct kvm_memory_slot old,
|
||||
int user_alloc);
|
||||
void kvm_arch_flush_shadow(struct kvm *kvm);
|
||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
|
||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
|
||||
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
|
||||
|
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* KVM coalesced MMIO
|
||||
*
|
||||
* Copyright (c) 2008 Bull S.A.S.
|
||||
*
|
||||
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "iodev.h"
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/kvm.h>
|
||||
|
||||
#include "coalesced_mmio.h"
|
||||
|
||||
static int coalesced_mmio_in_range(struct kvm_io_device *this,
|
||||
gpa_t addr, int len, int is_write)
|
||||
{
|
||||
struct kvm_coalesced_mmio_dev *dev =
|
||||
(struct kvm_coalesced_mmio_dev*)this->private;
|
||||
struct kvm_coalesced_mmio_zone *zone;
|
||||
int next;
|
||||
int i;
|
||||
|
||||
if (!is_write)
|
||||
return 0;
|
||||
|
||||
/* kvm->lock is taken by the caller and must be not released before
|
||||
* dev.read/write
|
||||
*/
|
||||
|
||||
/* Are we able to batch it ? */
|
||||
|
||||
/* last is the first free entry
|
||||
* check if we don't meet the first used entry
|
||||
* there is always one unused entry in the buffer
|
||||
*/
|
||||
|
||||
next = (dev->kvm->coalesced_mmio_ring->last + 1) %
|
||||
KVM_COALESCED_MMIO_MAX;
|
||||
if (next == dev->kvm->coalesced_mmio_ring->first) {
|
||||
/* full */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* is it in a batchable area ? */
|
||||
|
||||
for (i = 0; i < dev->nb_zones; i++) {
|
||||
zone = &dev->zone[i];
|
||||
|
||||
/* (addr,len) is fully included in
|
||||
* (zone->addr, zone->size)
|
||||
*/
|
||||
|
||||
if (zone->addr <= addr &&
|
||||
addr + len <= zone->addr + zone->size)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void coalesced_mmio_write(struct kvm_io_device *this,
|
||||
gpa_t addr, int len, const void *val)
|
||||
{
|
||||
struct kvm_coalesced_mmio_dev *dev =
|
||||
(struct kvm_coalesced_mmio_dev*)this->private;
|
||||
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
|
||||
|
||||
/* kvm->lock must be taken by caller before call to in_range()*/
|
||||
|
||||
/* copy data in first free entry of the ring */
|
||||
|
||||
ring->coalesced_mmio[ring->last].phys_addr = addr;
|
||||
ring->coalesced_mmio[ring->last].len = len;
|
||||
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
|
||||
smp_wmb();
|
||||
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
|
||||
}
|
||||
|
||||
static void coalesced_mmio_destructor(struct kvm_io_device *this)
|
||||
{
|
||||
kfree(this);
|
||||
}
|
||||
|
||||
int kvm_coalesced_mmio_init(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_coalesced_mmio_dev *dev;
|
||||
|
||||
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
dev->dev.write = coalesced_mmio_write;
|
||||
dev->dev.in_range = coalesced_mmio_in_range;
|
||||
dev->dev.destructor = coalesced_mmio_destructor;
|
||||
dev->dev.private = dev;
|
||||
dev->kvm = kvm;
|
||||
kvm->coalesced_mmio_dev = dev;
|
||||
kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
|
||||
struct kvm_coalesced_mmio_zone *zone)
|
||||
{
|
||||
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
|
||||
|
||||
if (dev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
|
||||
mutex_unlock(&kvm->lock);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
dev->zone[dev->nb_zones] = *zone;
|
||||
dev->nb_zones++;
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
|
||||
struct kvm_coalesced_mmio_zone *zone)
|
||||
{
|
||||
int i;
|
||||
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
|
||||
struct kvm_coalesced_mmio_zone *z;
|
||||
|
||||
if (dev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
i = dev->nb_zones;
|
||||
while(i) {
|
||||
z = &dev->zone[i - 1];
|
||||
|
||||
/* unregister all zones
|
||||
* included in (zone->addr, zone->size)
|
||||
*/
|
||||
|
||||
if (zone->addr <= z->addr &&
|
||||
z->addr + z->size <= zone->addr + zone->size) {
|
||||
dev->nb_zones--;
|
||||
*z = dev->zone[dev->nb_zones];
|
||||
}
|
||||
i--;
|
||||
}
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* KVM coalesced MMIO
|
||||
*
|
||||
* Copyright (c) 2008 Bull S.A.S.
|
||||
*
|
||||
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
|
||||
*
|
||||
*/
|
||||
|
||||
#define KVM_COALESCED_MMIO_ZONE_MAX 100
|
||||
|
||||
struct kvm_coalesced_mmio_dev {
|
||||
struct kvm_io_device dev;
|
||||
struct kvm *kvm;
|
||||
int nb_zones;
|
||||
struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
|
||||
};
|
||||
|
||||
int kvm_coalesced_mmio_init(struct kvm *kvm);
|
||||
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
|
||||
struct kvm_coalesced_mmio_zone *zone);
|
||||
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
|
||||
struct kvm_coalesced_mmio_zone *zone);
|
|
@ -146,6 +146,11 @@ static int ioapic_inj_irq(struct kvm_ioapic *ioapic,
|
|||
return kvm_apic_set_irq(vcpu, vector, trig_mode);
|
||||
}
|
||||
|
||||
static void ioapic_inj_nmi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_inject_nmi(vcpu);
|
||||
}
|
||||
|
||||
static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
|
||||
u8 dest_mode)
|
||||
{
|
||||
|
@ -239,8 +244,19 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
|||
}
|
||||
}
|
||||
break;
|
||||
|
||||
/* TODO: NMI */
|
||||
case IOAPIC_NMI:
|
||||
for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
|
||||
if (!(deliver_bitmask & (1 << vcpu_id)))
|
||||
continue;
|
||||
deliver_bitmask &= ~(1 << vcpu_id);
|
||||
vcpu = ioapic->kvm->vcpus[vcpu_id];
|
||||
if (vcpu)
|
||||
ioapic_inj_nmi(vcpu);
|
||||
else
|
||||
ioapic_debug("NMI to vcpu %d failed\n",
|
||||
vcpu->vcpu_id);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "Unsupported delivery mode %d\n",
|
||||
delivery_mode);
|
||||
|
@ -291,7 +307,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
|
|||
__kvm_ioapic_update_eoi(ioapic, i);
|
||||
}
|
||||
|
||||
static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr)
|
||||
static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr,
|
||||
int len, int is_write)
|
||||
{
|
||||
struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
|
||||
|
||||
|
|
|
@ -27,7 +27,8 @@ struct kvm_io_device {
|
|||
gpa_t addr,
|
||||
int len,
|
||||
const void *val);
|
||||
int (*in_range)(struct kvm_io_device *this, gpa_t addr);
|
||||
int (*in_range)(struct kvm_io_device *this, gpa_t addr, int len,
|
||||
int is_write);
|
||||
void (*destructor)(struct kvm_io_device *this);
|
||||
|
||||
void *private;
|
||||
|
@ -49,9 +50,10 @@ static inline void kvm_iodevice_write(struct kvm_io_device *dev,
|
|||
dev->write(dev, addr, len, val);
|
||||
}
|
||||
|
||||
static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
|
||||
static inline int kvm_iodevice_inrange(struct kvm_io_device *dev,
|
||||
gpa_t addr, int len, int is_write)
|
||||
{
|
||||
return dev->in_range(dev, addr);
|
||||
return dev->in_range(dev, addr, len, is_write);
|
||||
}
|
||||
|
||||
static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
|
||||
|
|
|
@ -47,6 +47,10 @@
|
|||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
#include "coalesced_mmio.h"
|
||||
#endif
|
||||
|
||||
MODULE_AUTHOR("Qumranet");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@ -65,6 +69,8 @@ struct dentry *kvm_debugfs_dir;
|
|||
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
|
||||
unsigned long arg);
|
||||
|
||||
bool kvm_rebooting;
|
||||
|
||||
static inline int valid_vcpu(int n)
|
||||
{
|
||||
return likely(n >= 0 && n < KVM_MAX_VCPUS);
|
||||
|
@ -99,10 +105,11 @@ static void ack_flush(void *_completed)
|
|||
|
||||
void kvm_flush_remote_tlbs(struct kvm *kvm)
|
||||
{
|
||||
int i, cpu;
|
||||
int i, cpu, me;
|
||||
cpumask_t cpus;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
me = get_cpu();
|
||||
cpus_clear(cpus);
|
||||
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
||||
vcpu = kvm->vcpus[i];
|
||||
|
@ -111,21 +118,24 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|||
if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
|
||||
continue;
|
||||
cpu = vcpu->cpu;
|
||||
if (cpu != -1 && cpu != raw_smp_processor_id())
|
||||
if (cpu != -1 && cpu != me)
|
||||
cpu_set(cpu, cpus);
|
||||
}
|
||||
if (cpus_empty(cpus))
|
||||
return;
|
||||
goto out;
|
||||
++kvm->stat.remote_tlb_flush;
|
||||
smp_call_function_mask(cpus, ack_flush, NULL, 1);
|
||||
out:
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
void kvm_reload_remote_mmus(struct kvm *kvm)
|
||||
{
|
||||
int i, cpu;
|
||||
int i, cpu, me;
|
||||
cpumask_t cpus;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
me = get_cpu();
|
||||
cpus_clear(cpus);
|
||||
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
||||
vcpu = kvm->vcpus[i];
|
||||
|
@ -134,12 +144,14 @@ void kvm_reload_remote_mmus(struct kvm *kvm)
|
|||
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
|
||||
continue;
|
||||
cpu = vcpu->cpu;
|
||||
if (cpu != -1 && cpu != raw_smp_processor_id())
|
||||
if (cpu != -1 && cpu != me)
|
||||
cpu_set(cpu, cpus);
|
||||
}
|
||||
if (cpus_empty(cpus))
|
||||
return;
|
||||
goto out;
|
||||
smp_call_function_mask(cpus, ack_flush, NULL, 1);
|
||||
out:
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
|
||||
|
@ -183,10 +195,23 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
|
|||
static struct kvm *kvm_create_vm(void)
|
||||
{
|
||||
struct kvm *kvm = kvm_arch_create_vm();
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
struct page *page;
|
||||
#endif
|
||||
|
||||
if (IS_ERR(kvm))
|
||||
goto out;
|
||||
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!page) {
|
||||
kfree(kvm);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
kvm->coalesced_mmio_ring =
|
||||
(struct kvm_coalesced_mmio_ring *)page_address(page);
|
||||
#endif
|
||||
|
||||
kvm->mm = current->mm;
|
||||
atomic_inc(&kvm->mm->mm_count);
|
||||
spin_lock_init(&kvm->mmu_lock);
|
||||
|
@ -198,6 +223,9 @@ static struct kvm *kvm_create_vm(void)
|
|||
spin_lock(&kvm_lock);
|
||||
list_add(&kvm->vm_list, &vm_list);
|
||||
spin_unlock(&kvm_lock);
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
kvm_coalesced_mmio_init(kvm);
|
||||
#endif
|
||||
out:
|
||||
return kvm;
|
||||
}
|
||||
|
@ -240,6 +268,10 @@ static void kvm_destroy_vm(struct kvm *kvm)
|
|||
spin_unlock(&kvm_lock);
|
||||
kvm_io_bus_destroy(&kvm->pio_bus);
|
||||
kvm_io_bus_destroy(&kvm->mmio_bus);
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
if (kvm->coalesced_mmio_ring != NULL)
|
||||
free_page((unsigned long)kvm->coalesced_mmio_ring);
|
||||
#endif
|
||||
kvm_arch_destroy_vm(kvm);
|
||||
mmdrop(mm);
|
||||
}
|
||||
|
@ -333,6 +365,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
r = -ENOMEM;
|
||||
|
||||
/* Allocate if a slot is being created */
|
||||
#ifndef CONFIG_S390
|
||||
if (npages && !new.rmap) {
|
||||
new.rmap = vmalloc(npages * sizeof(struct page *));
|
||||
|
||||
|
@ -373,10 +406,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
goto out_free;
|
||||
memset(new.dirty_bitmap, 0, dirty_bytes);
|
||||
}
|
||||
#endif /* not defined CONFIG_S390 */
|
||||
|
||||
if (mem->slot >= kvm->nmemslots)
|
||||
kvm->nmemslots = mem->slot + 1;
|
||||
|
||||
if (!npages)
|
||||
kvm_arch_flush_shadow(kvm);
|
||||
|
||||
*memslot = new;
|
||||
|
||||
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
|
||||
|
@ -532,6 +569,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
|
|||
struct page *page[1];
|
||||
unsigned long addr;
|
||||
int npages;
|
||||
pfn_t pfn;
|
||||
|
||||
might_sleep();
|
||||
|
||||
|
@ -544,19 +582,38 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
|
|||
npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
|
||||
NULL);
|
||||
|
||||
if (npages != 1) {
|
||||
get_page(bad_page);
|
||||
return page_to_pfn(bad_page);
|
||||
}
|
||||
if (unlikely(npages != 1)) {
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
return page_to_pfn(page[0]);
|
||||
vma = find_vma(current->mm, addr);
|
||||
if (vma == NULL || addr < vma->vm_start ||
|
||||
!(vma->vm_flags & VM_PFNMAP)) {
|
||||
get_page(bad_page);
|
||||
return page_to_pfn(bad_page);
|
||||
}
|
||||
|
||||
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
|
||||
BUG_ON(pfn_valid(pfn));
|
||||
} else
|
||||
pfn = page_to_pfn(page[0]);
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(gfn_to_pfn);
|
||||
|
||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
return pfn_to_page(gfn_to_pfn(kvm, gfn));
|
||||
pfn_t pfn;
|
||||
|
||||
pfn = gfn_to_pfn(kvm, gfn);
|
||||
if (pfn_valid(pfn))
|
||||
return pfn_to_page(pfn);
|
||||
|
||||
WARN_ON(!pfn_valid(pfn));
|
||||
|
||||
get_page(bad_page);
|
||||
return bad_page;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
||||
|
@ -569,7 +626,8 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
|
|||
|
||||
void kvm_release_pfn_clean(pfn_t pfn)
|
||||
{
|
||||
put_page(pfn_to_page(pfn));
|
||||
if (pfn_valid(pfn))
|
||||
put_page(pfn_to_page(pfn));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
|
||||
|
||||
|
@ -594,21 +652,25 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
|
|||
|
||||
void kvm_set_pfn_dirty(pfn_t pfn)
|
||||
{
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
if (!PageReserved(page))
|
||||
SetPageDirty(page);
|
||||
if (pfn_valid(pfn)) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
if (!PageReserved(page))
|
||||
SetPageDirty(page);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
|
||||
|
||||
void kvm_set_pfn_accessed(pfn_t pfn)
|
||||
{
|
||||
mark_page_accessed(pfn_to_page(pfn));
|
||||
if (pfn_valid(pfn))
|
||||
mark_page_accessed(pfn_to_page(pfn));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
|
||||
|
||||
void kvm_get_pfn(pfn_t pfn)
|
||||
{
|
||||
get_page(pfn_to_page(pfn));
|
||||
if (pfn_valid(pfn))
|
||||
get_page(pfn_to_page(pfn));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_pfn);
|
||||
|
||||
|
@ -798,6 +860,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
#ifdef CONFIG_X86
|
||||
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
|
||||
page = virt_to_page(vcpu->arch.pio_data);
|
||||
#endif
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
|
||||
page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
|
||||
#endif
|
||||
else
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
@ -1121,6 +1187,32 @@ static long kvm_vm_ioctl(struct file *filp,
|
|||
goto out;
|
||||
break;
|
||||
}
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
case KVM_REGISTER_COALESCED_MMIO: {
|
||||
struct kvm_coalesced_mmio_zone zone;
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&zone, argp, sizeof zone))
|
||||
goto out;
|
||||
r = -ENXIO;
|
||||
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
|
||||
if (r)
|
||||
goto out;
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
case KVM_UNREGISTER_COALESCED_MMIO: {
|
||||
struct kvm_coalesced_mmio_zone zone;
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&zone, argp, sizeof zone))
|
||||
goto out;
|
||||
r = -ENXIO;
|
||||
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
|
||||
if (r)
|
||||
goto out;
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
|
||||
}
|
||||
|
@ -1179,7 +1271,6 @@ static int kvm_dev_ioctl_create_vm(void)
|
|||
static long kvm_dev_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
void __user *argp = (void __user *)arg;
|
||||
long r = -EINVAL;
|
||||
|
||||
switch (ioctl) {
|
||||
|
@ -1196,7 +1287,7 @@ static long kvm_dev_ioctl(struct file *filp,
|
|||
r = kvm_dev_ioctl_create_vm();
|
||||
break;
|
||||
case KVM_CHECK_EXTENSION:
|
||||
r = kvm_dev_ioctl_check_extension((long)argp);
|
||||
r = kvm_dev_ioctl_check_extension(arg);
|
||||
break;
|
||||
case KVM_GET_VCPU_MMAP_SIZE:
|
||||
r = -EINVAL;
|
||||
|
@ -1205,6 +1296,9 @@ static long kvm_dev_ioctl(struct file *filp,
|
|||
r = PAGE_SIZE; /* struct kvm_run */
|
||||
#ifdef CONFIG_X86
|
||||
r += PAGE_SIZE; /* pio data page */
|
||||
#endif
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
r += PAGE_SIZE; /* coalesced mmio ring page */
|
||||
#endif
|
||||
break;
|
||||
case KVM_TRACE_ENABLE:
|
||||
|
@ -1247,7 +1341,6 @@ static void hardware_disable(void *junk)
|
|||
if (!cpu_isset(cpu, cpus_hardware_enabled))
|
||||
return;
|
||||
cpu_clear(cpu, cpus_hardware_enabled);
|
||||
decache_vcpus_on_cpu(cpu);
|
||||
kvm_arch_hardware_disable(NULL);
|
||||
}
|
||||
|
||||
|
@ -1277,6 +1370,18 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
||||
asmlinkage void kvm_handle_fault_on_reboot(void)
|
||||
{
|
||||
if (kvm_rebooting)
|
||||
/* spin while reset goes on */
|
||||
while (true)
|
||||
;
|
||||
/* Fault while not rebooting. We want the trace. */
|
||||
BUG();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
|
||||
|
||||
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
|
||||
void *v)
|
||||
{
|
||||
|
@ -1286,6 +1391,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
|
|||
* in vmx root mode.
|
||||
*/
|
||||
printk(KERN_INFO "kvm: exiting hardware virtualization\n");
|
||||
kvm_rebooting = true;
|
||||
on_each_cpu(hardware_disable, NULL, 1);
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
|
@ -1312,14 +1418,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus)
|
|||
}
|
||||
}
|
||||
|
||||
struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
|
||||
struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
|
||||
gpa_t addr, int len, int is_write)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bus->dev_count; i++) {
|
||||
struct kvm_io_device *pos = bus->devs[i];
|
||||
|
||||
if (pos->in_range(pos, addr))
|
||||
if (pos->in_range(pos, addr, len, is_write))
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
|
|
@ -72,11 +72,7 @@ static void kvm_add_trace(void *probe_private, void *call_data,
|
|||
rec.cycle_in = p->cycle_in;
|
||||
|
||||
if (rec.cycle_in) {
|
||||
u64 cycle = 0;
|
||||
|
||||
cycle = get_cycles();
|
||||
rec.u.cycle.cycle_lo = (u32)cycle;
|
||||
rec.u.cycle.cycle_hi = (u32)(cycle >> 32);
|
||||
rec.u.cycle.cycle_u64 = get_cycles();
|
||||
|
||||
for (i = 0; i < rec.extra_u32; i++)
|
||||
rec.u.cycle.extra_u32[i] = va_arg(*args, u32);
|
||||
|
@ -114,8 +110,18 @@ static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
|
|||
{
|
||||
struct kvm_trace *kt;
|
||||
|
||||
if (!relay_buf_full(buf))
|
||||
if (!relay_buf_full(buf)) {
|
||||
if (!prev_subbuf) {
|
||||
/*
|
||||
* executed only once when the channel is opened
|
||||
* save metadata as first record
|
||||
*/
|
||||
subbuf_start_reserve(buf, sizeof(u32));
|
||||
*(u32 *)subbuf = 0x12345678;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
kt = buf->chan->private_data;
|
||||
atomic_inc(&kt->lost_records);
|
||||
|
|
Loading…
Reference in New Issue