KVM: SVM: Implement emulation of vm_cr msr
This patch implements the emulation of the vm_cr msr for nested svm. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
2e554e8d67
commit
4a810181c8
|
@ -115,6 +115,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
|
||||||
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
|
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
|
||||||
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
|
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
|
||||||
|
|
||||||
|
#define SVM_VM_CR_VALID_MASK 0x001fULL
|
||||||
|
#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
|
||||||
|
#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
|
||||||
|
|
||||||
struct __attribute__ ((__packed__)) vmcb_seg {
|
struct __attribute__ ((__packed__)) vmcb_seg {
|
||||||
u16 selector;
|
u16 selector;
|
||||||
u16 attrib;
|
u16 attrib;
|
||||||
|
|
|
@ -71,6 +71,7 @@ struct kvm_vcpu;
|
||||||
struct nested_state {
|
struct nested_state {
|
||||||
struct vmcb *hsave;
|
struct vmcb *hsave;
|
||||||
u64 hsave_msr;
|
u64 hsave_msr;
|
||||||
|
u64 vm_cr_msr;
|
||||||
u64 vmcb;
|
u64 vmcb;
|
||||||
|
|
||||||
/* These are the merged vectors */
|
/* These are the merged vectors */
|
||||||
|
@ -2280,7 +2281,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
|
||||||
*data = svm->nested.hsave_msr;
|
*data = svm->nested.hsave_msr;
|
||||||
break;
|
break;
|
||||||
case MSR_VM_CR:
|
case MSR_VM_CR:
|
||||||
*data = 0;
|
*data = svm->nested.vm_cr_msr;
|
||||||
break;
|
break;
|
||||||
case MSR_IA32_UCODE_REV:
|
case MSR_IA32_UCODE_REV:
|
||||||
*data = 0x01000065;
|
*data = 0x01000065;
|
||||||
|
@ -2310,6 +2311,31 @@ static int rdmsr_interception(struct vcpu_svm *svm)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
|
||||||
|
{
|
||||||
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
|
int svm_dis, chg_mask;
|
||||||
|
|
||||||
|
if (data & ~SVM_VM_CR_VALID_MASK)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
chg_mask = SVM_VM_CR_VALID_MASK;
|
||||||
|
|
||||||
|
if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
|
||||||
|
chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
|
||||||
|
|
||||||
|
svm->nested.vm_cr_msr &= ~chg_mask;
|
||||||
|
svm->nested.vm_cr_msr |= (data & chg_mask);
|
||||||
|
|
||||||
|
svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
|
||||||
|
|
||||||
|
/* check for svm_disable while efer.svme is set */
|
||||||
|
if (svm_dis && (vcpu->arch.efer & EFER_SVME))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
|
static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
|
||||||
{
|
{
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
|
@ -2376,6 +2402,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
|
||||||
svm->nested.hsave_msr = data;
|
svm->nested.hsave_msr = data;
|
||||||
break;
|
break;
|
||||||
case MSR_VM_CR:
|
case MSR_VM_CR:
|
||||||
|
return svm_set_vm_cr(vcpu, data);
|
||||||
case MSR_VM_IGNNE:
|
case MSR_VM_IGNNE:
|
||||||
pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
|
pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Reference in New Issue