kvm: x86: use correct privilege level for sgdt/sidt/fxsave/fxrstor access
The functions that were used in the emulation of fxrstor, fxsave, sgdt and
sidt were originally meant for task switching, and as such they did not
check privilege levels. This is very bad when the same functions are used
in the emulation of unprivileged instructions. This is CVE-2018-10853.
The obvious fix is to add a new argument to ops->read_std and ops->write_std,
which decides whether the access is a "system" access or should use the
processor's CPL.
Fixes: 129a72a0d3
("KVM: x86: Introduce segmented_write_std", 2017-01-12)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ce14e868a5
commit
3c9fa24ca7
|
@ -107,11 +107,12 @@ struct x86_emulate_ops {
|
||||||
* @addr: [IN ] Linear address from which to read.
|
* @addr: [IN ] Linear address from which to read.
|
||||||
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
|
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
|
||||||
* @bytes: [IN ] Number of bytes to read from memory.
|
* @bytes: [IN ] Number of bytes to read from memory.
|
||||||
|
* @system:[IN ] Whether the access is forced to be at CPL0.
|
||||||
*/
|
*/
|
||||||
int (*read_std)(struct x86_emulate_ctxt *ctxt,
|
int (*read_std)(struct x86_emulate_ctxt *ctxt,
|
||||||
unsigned long addr, void *val,
|
unsigned long addr, void *val,
|
||||||
unsigned int bytes,
|
unsigned int bytes,
|
||||||
struct x86_exception *fault);
|
struct x86_exception *fault, bool system);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* read_phys: Read bytes of standard (non-emulated/special) memory.
|
* read_phys: Read bytes of standard (non-emulated/special) memory.
|
||||||
|
@ -129,10 +130,11 @@ struct x86_emulate_ops {
|
||||||
* @addr: [IN ] Linear address to which to write.
|
* @addr: [IN ] Linear address to which to write.
|
||||||
* @val: [OUT] Value write to memory, zero-extended to 'u_long'.
|
* @val: [OUT] Value write to memory, zero-extended to 'u_long'.
|
||||||
* @bytes: [IN ] Number of bytes to write to memory.
|
* @bytes: [IN ] Number of bytes to write to memory.
|
||||||
|
* @system:[IN ] Whether the access is forced to be at CPL0.
|
||||||
*/
|
*/
|
||||||
int (*write_std)(struct x86_emulate_ctxt *ctxt,
|
int (*write_std)(struct x86_emulate_ctxt *ctxt,
|
||||||
unsigned long addr, void *val, unsigned int bytes,
|
unsigned long addr, void *val, unsigned int bytes,
|
||||||
struct x86_exception *fault);
|
struct x86_exception *fault, bool system);
|
||||||
/*
|
/*
|
||||||
* fetch: Read bytes of standard (non-emulated/special) memory.
|
* fetch: Read bytes of standard (non-emulated/special) memory.
|
||||||
* Used for instruction fetch.
|
* Used for instruction fetch.
|
||||||
|
|
|
@ -815,14 +815,14 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
|
||||||
static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
|
static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
|
||||||
void *data, unsigned size)
|
void *data, unsigned size)
|
||||||
{
|
{
|
||||||
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int linear_write_system(struct x86_emulate_ctxt *ctxt,
|
static int linear_write_system(struct x86_emulate_ctxt *ctxt,
|
||||||
ulong linear, void *data,
|
ulong linear, void *data,
|
||||||
unsigned int size)
|
unsigned int size)
|
||||||
{
|
{
|
||||||
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
|
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
|
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
|
||||||
|
@ -836,7 +836,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
|
||||||
rc = linearize(ctxt, addr, size, false, &linear);
|
rc = linearize(ctxt, addr, size, false, &linear);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
return rc;
|
return rc;
|
||||||
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
|
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
|
||||||
|
@ -850,7 +850,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
|
||||||
rc = linearize(ctxt, addr, size, true, &linear);
|
rc = linearize(ctxt, addr, size, true, &linear);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
return rc;
|
return rc;
|
||||||
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
|
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2928,12 +2928,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
base |= ((u64)base3) << 32;
|
base |= ((u64)base3) << 32;
|
||||||
#endif
|
#endif
|
||||||
r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
|
r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
|
||||||
if (r != X86EMUL_CONTINUE)
|
if (r != X86EMUL_CONTINUE)
|
||||||
return false;
|
return false;
|
||||||
if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
|
if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
|
||||||
return false;
|
return false;
|
||||||
r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
|
r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
|
||||||
if (r != X86EMUL_CONTINUE)
|
if (r != X86EMUL_CONTINUE)
|
||||||
return false;
|
return false;
|
||||||
if ((perm >> bit_idx) & mask)
|
if ((perm >> bit_idx) & mask)
|
||||||
|
|
|
@ -4811,10 +4811,15 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
|
||||||
|
|
||||||
static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
|
static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
|
||||||
gva_t addr, void *val, unsigned int bytes,
|
gva_t addr, void *val, unsigned int bytes,
|
||||||
struct x86_exception *exception)
|
struct x86_exception *exception, bool system)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
|
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
|
||||||
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
|
u32 access = 0;
|
||||||
|
|
||||||
|
if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
|
||||||
|
access |= PFERR_USER_MASK;
|
||||||
|
|
||||||
|
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
|
static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
|
||||||
|
@ -4858,12 +4863,17 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
|
static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
|
||||||
unsigned int bytes, struct x86_exception *exception)
|
unsigned int bytes, struct x86_exception *exception,
|
||||||
|
bool system)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
|
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
|
||||||
|
u32 access = PFERR_WRITE_MASK;
|
||||||
|
|
||||||
|
if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
|
||||||
|
access |= PFERR_USER_MASK;
|
||||||
|
|
||||||
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
|
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
|
||||||
PFERR_WRITE_MASK, exception);
|
access, exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
|
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
|
||||||
|
@ -4882,8 +4892,8 @@ int handle_ud(struct kvm_vcpu *vcpu)
|
||||||
struct x86_exception e;
|
struct x86_exception e;
|
||||||
|
|
||||||
if (force_emulation_prefix &&
|
if (force_emulation_prefix &&
|
||||||
kvm_read_guest_virt(&vcpu->arch.emulate_ctxt,
|
kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
|
||||||
kvm_get_linear_rip(vcpu), sig, sizeof(sig), &e) == 0 &&
|
sig, sizeof(sig), &e) == 0 &&
|
||||||
memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
|
memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
|
||||||
kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
|
kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
|
||||||
emul_type = 0;
|
emul_type = 0;
|
||||||
|
|
Loading…
Reference in New Issue