KVM: x86 emulator: add framework for instruction intercepts

When running in guest mode, certain instructions can be intercepted by
hardware.  This also holds for nested guests running on emulated
virtualization hardware, in particular instructions emulated by kvm
itself.

This patch adds a framework for intercepting instructions.  If an
instruction is marked for interception, and if we're running in guest
mode, a callback is called to check whether an intercept is needed or
not.  The callback is called at three points in time: immediately after
beginning execution, after checking privilge exceptions, and after
checking memory exception.  This suits the different interception points
defined for different instructions and for the various virtualization
instruction sets.

In addition, a new X86EMUL_INTERCEPT is defined, which any callback or
memory access may define, allowing the more complicated intercepts to be
implemented in existing callbacks.

Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Avi Kivity 2011-04-04 12:39:22 +02:00
parent aa97bb4891
commit c4f035c60d
3 changed files with 55 additions and 0 deletions

View File

@ -14,6 +14,8 @@
#include <asm/desc_defs.h>
struct x86_emulate_ctxt;
enum x86_intercept;
enum x86_intercept_stage;
struct x86_exception {
u8 vector;
@ -62,6 +64,7 @@ struct x86_exception {
#define X86EMUL_RETRY_INSTR 3 /* retry the instruction for some reason */
#define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */
#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
#define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */
struct x86_emulate_ops {
/*
@ -160,6 +163,9 @@ struct x86_emulate_ops {
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
int (*intercept)(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage);
};
typedef u32 __attribute__((vector_size(16))) sse128_t;
@ -203,6 +209,7 @@ struct read_cache {
struct decode_cache {
u8 twobyte;
u8 b;
u8 intercept;
u8 lock_prefix;
u8 rep_prefix;
u8 op_bytes;
@ -244,6 +251,7 @@ struct x86_emulate_ctxt {
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
bool guest_mode; /* guest running a nested guest */
bool perm_ok; /* do not check permissions if true */
bool only_vendor_specific_insn;
@ -265,6 +273,18 @@ struct x86_emulate_ctxt {
#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
enum x86_intercept_stage {
X86_ICPT_PRE_EXCEPT,
X86_ICPT_POST_EXCEPT,
X86_ICPT_POST_MEMACCESS,
};
enum x86_intercept {
x86_intercept_none,
nr_x86_intercepts
};
/* Host execution mode. */
#if defined(CONFIG_X86_32)
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32

View File

@ -104,6 +104,7 @@
struct opcode {
u32 flags;
u8 intercept;
union {
int (*execute)(struct x86_emulate_ctxt *ctxt);
struct opcode *group;
@ -2423,10 +2424,13 @@ static int em_movdqu(struct x86_emulate_ctxt *ctxt)
}
#define D(_y) { .flags = (_y) }
#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
#define N D(0)
#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
#define D2bv(_f) D((_f) | ByteOp), D(_f)
@ -2867,6 +2871,7 @@ done_prefixes:
}
c->execute = opcode.u.execute;
c->intercept = opcode.intercept;
/* Unrecognised? */
if (c->d == 0 || (c->d & Undefined))
@ -3116,12 +3121,26 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
if (unlikely(ctxt->guest_mode) && c->intercept) {
rc = ops->intercept(ctxt, c->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
rc = emulate_gp(ctxt, 0);
goto done;
}
if (unlikely(ctxt->guest_mode) && c->intercept) {
rc = ops->intercept(ctxt, c->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (c->rep_prefix && (c->d & String)) {
/* All REP prefixes have the same first termination condition */
if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
@ -3160,6 +3179,13 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
special_insn:
if (unlikely(ctxt->guest_mode) && c->intercept) {
rc = ops->intercept(ctxt, c->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (c->execute) {
rc = c->execute(ctxt);
if (rc != X86EMUL_CONTINUE)

View File

@ -4297,6 +4297,13 @@ static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
preempt_enable();
}
static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
return X86EMUL_CONTINUE;
}
static struct x86_emulate_ops emulate_ops = {
.read_std = kvm_read_guest_virt_system,
.write_std = kvm_write_guest_virt_system,
@ -4322,6 +4329,7 @@ static struct x86_emulate_ops emulate_ops = {
.get_msr = kvm_get_msr,
.get_fpu = emulator_get_fpu,
.put_fpu = emulator_put_fpu,
.intercept = emulator_intercept,
};
static void cache_all_regs(struct kvm_vcpu *vcpu)
@ -4376,6 +4384,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
? X86EMUL_MODE_VM86 : cs_l
? X86EMUL_MODE_PROT64 : cs_db
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
vcpu->arch.emulate_ctxt.guest_mode = is_guest_mode(vcpu);
memset(c, 0, sizeof(struct decode_cache));
memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
}