KVM: nVMX: Add support for capturing highest observable L2 TSC
The L1 hypervisor may include the IA32_TIME_STAMP_COUNTER MSR in the vmcs12 MSR VM-exit MSR-store area as a way of determining the highest TSC value that might have been observed by L2 prior to VM-exit. The current implementation does not capture a very tight bound on this value. To tighten the bound, add the IA32_TIME_STAMP_COUNTER MSR to the vmcs02 VM-exit MSR-store area whenever it appears in the vmcs12 VM-exit MSR-store area. When L0 processes the vmcs12 VM-exit MSR-store area during the emulation of an L2->L1 VM-exit, special-case the IA32_TIME_STAMP_COUNTER MSR, using the value stored in the vmcs02 VM-exit MSR-store area to derive the value to be stored in the vmcs12 VM-exit MSR-store area. Reviewed-by: Liran Alon <liran.alon@oracle.com> Reviewed-by: Jim Mattson <jmattson@google.com> Signed-off-by: Aaron Lewis <aaronlewis@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ef0fbcac3f
commit
662f1d1d19
|
@ -940,6 +940,37 @@ fail:
|
|||
return i + 1;
|
||||
}
|
||||
|
||||
static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
|
||||
u32 msr_index,
|
||||
u64 *data)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
/*
|
||||
* If the L0 hypervisor stored a more accurate value for the TSC that
|
||||
* does not include the time taken for emulation of the L2->L1
|
||||
* VM-exit in L0, use the more accurate value.
|
||||
*/
|
||||
if (msr_index == MSR_IA32_TSC) {
|
||||
int index = vmx_find_msr_index(&vmx->msr_autostore.guest,
|
||||
MSR_IA32_TSC);
|
||||
|
||||
if (index >= 0) {
|
||||
u64 val = vmx->msr_autostore.guest.val[index].value;
|
||||
|
||||
*data = kvm_read_l1_tsc(vcpu, val);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (kvm_get_msr(vcpu, msr_index, data)) {
|
||||
pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
|
||||
msr_index);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
|
||||
struct vmx_msr_entry *e)
|
||||
{
|
||||
|
@ -974,12 +1005,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
|||
if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
|
||||
return -EINVAL;
|
||||
|
||||
if (kvm_get_msr(vcpu, e.index, &data)) {
|
||||
pr_debug_ratelimited(
|
||||
"%s cannot read MSR (%u, 0x%x)\n",
|
||||
__func__, i, e.index);
|
||||
if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (kvm_vcpu_write_guest(vcpu,
|
||||
gpa + i * sizeof(e) +
|
||||
offsetof(struct vmx_msr_entry, value),
|
||||
|
@ -993,6 +1021,60 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
u32 count = vmcs12->vm_exit_msr_store_count;
|
||||
u64 gpa = vmcs12->vm_exit_msr_store_addr;
|
||||
struct vmx_msr_entry e;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
|
||||
return false;
|
||||
|
||||
if (e.index == msr_index)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
|
||||
u32 msr_index)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
|
||||
bool in_vmcs12_store_list;
|
||||
int msr_autostore_index;
|
||||
bool in_autostore_list;
|
||||
int last;
|
||||
|
||||
msr_autostore_index = vmx_find_msr_index(autostore, msr_index);
|
||||
in_autostore_list = msr_autostore_index >= 0;
|
||||
in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
|
||||
|
||||
if (in_vmcs12_store_list && !in_autostore_list) {
|
||||
if (autostore->nr == NR_LOADSTORE_MSRS) {
|
||||
/*
|
||||
* Emulated VMEntry does not fail here. Instead a less
|
||||
* accurate value will be returned by
|
||||
* nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
|
||||
* instead of reading the value from the vmcs02 VMExit
|
||||
* MSR-store area.
|
||||
*/
|
||||
pr_warn_ratelimited(
|
||||
"Not enough msr entries in msr_autostore. Can't add msr %x\n",
|
||||
msr_index);
|
||||
return;
|
||||
}
|
||||
last = autostore->nr++;
|
||||
autostore->val[last].index = msr_index;
|
||||
} else if (!in_vmcs12_store_list && in_autostore_list) {
|
||||
last = --autostore->nr;
|
||||
autostore->val[msr_autostore_index] = autostore->val[last];
|
||||
}
|
||||
}
|
||||
|
||||
static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
{
|
||||
unsigned long invalid_mask;
|
||||
|
@ -2038,7 +2120,7 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
|
|||
* addresses are constant (for vmcs02), the counts can change based
|
||||
* on L2's behavior, e.g. switching to/from long mode.
|
||||
*/
|
||||
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
|
||||
vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
|
||||
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
|
||||
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
|
||||
|
||||
|
@ -2306,6 +2388,13 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
|||
vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the msr_autostore list is up to date before we set the
|
||||
* count in the vmcs02.
|
||||
*/
|
||||
prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
|
||||
|
||||
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
||||
|
||||
|
|
|
@ -833,7 +833,7 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
|
|||
vm_exit_controls_clearbit(vmx, exit);
|
||||
}
|
||||
|
||||
static int vmx_find_msr_index(struct vmx_msrs *m, u32 msr)
|
||||
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
|
|
|
@ -233,6 +233,10 @@ struct vcpu_vmx {
|
|||
struct vmx_msrs host;
|
||||
} msr_autoload;
|
||||
|
||||
struct msr_autostore {
|
||||
struct vmx_msrs guest;
|
||||
} msr_autostore;
|
||||
|
||||
struct {
|
||||
int vm86_active;
|
||||
ulong save_rflags;
|
||||
|
@ -337,6 +341,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
|||
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
|
||||
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
|
||||
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
|
||||
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
|
||||
|
||||
#define POSTED_INTR_ON 0
|
||||
#define POSTED_INTR_SN 1
|
||||
|
|
Loading…
Reference in New Issue