KVM: x86: Untag addresses for LAM emulation where applicable

Upstream commit: b39bd520a60c667a339e315ce7a3de2f7178f6e3
Conflict: none

Stub in vmx_get_untagged_addr() and wire up calls from the emulator (via
get_untagged_addr()) and "direct" calls from various VM-Exit handlers in
VMX where LAM untagging is supposed to be applied.  Defer implementing
the guts of vmx_get_untagged_addr() to future patches purely to make the
changes easier to consume.

LAM is active only for 64-bit linear addresses and several types of
accesses are exempted.

- Cases need to untag address (handled in get_vmx_mem_address())
  Operand(s) of VMX instructions and INVPCID.
  Operand(s) of SGX ENCLS.

- Cases LAM doesn't apply to (no change needed)
  Operand of INVLPG.
  Linear address in INVPCID descriptor.
  Linear address in INVVPID descriptor.
  BASEADDR specified in SECS of ECREATE.

Note:
  - LAM doesn't apply to write to control registers or MSRs
  - LAM masking is applied before walking page tables, i.e. the faulting
    linear address in CR2 doesn't contain the metadata.
  - The guest linear address saved in VMCS doesn't contain metadata.

Intel-SIG: commit b39bd520a60c KVM: x86: Untag addresses for LAM
emulation where applicable
Backport KVM Linear Address Masking (LAM) support.

Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Chao Gao <chao.gao@intel.com>
Tested-by: Xuelian Guo <xuelian.guo@intel.com>
Link: https://lore.kernel.org/r/20230913124227.12574-10-binbin.wu@linux.intel.com
[sean: massage changelog]
Signed-off-by: Sean Christopherson <seanjc@google.com>
[ Zhiquan Li: amend commit log ]
Signed-off-by: Zhiquan Li <zhiquan1.li@intel.com>
This commit is contained in:
Binbin Wu 2023-09-13 20:42:20 +08:00 committed by Zhiquan Li
parent 890ed1c18f
commit e1de46735f
5 changed files with 19 additions and 0 deletions

View File

@ -5048,6 +5048,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
else else
*ret = off; *ret = off;
*ret = vmx_get_untagged_addr(vcpu, *ret, 0);
/* Long mode: #GP(0)/#SS(0) if the memory address is in a /* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is the only check on the memory * non-canonical form. This is the only check on the memory
* destination for long mode! * destination for long mode!
@ -5865,6 +5866,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vpid02 = nested_get_vpid02(vcpu); vpid02 = nested_get_vpid02(vcpu);
switch (type) { switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
/*
* LAM doesn't apply to addresses that are inputs to TLB
* invalidation.
*/
if (!operand.vpid || if (!operand.vpid ||
is_noncanonical_address(operand.gla, vcpu)) is_noncanonical_address(operand.gla, vcpu))
return nested_vmx_fail(vcpu, return nested_vmx_fail(vcpu,

View File

@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
if (!IS_ALIGNED(*gva, alignment)) { if (!IS_ALIGNED(*gva, alignment)) {
fault = true; fault = true;
} else if (likely(is_64_bit_mode(vcpu))) { } else if (likely(is_64_bit_mode(vcpu))) {
*gva = vmx_get_untagged_addr(vcpu, *gva, 0);
fault = is_noncanonical_address(*gva, vcpu); fault = is_noncanonical_address(*gva, vcpu);
} else { } else {
*gva &= 0xffffffff; *gva &= 0xffffffff;

View File

@ -8354,6 +8354,11 @@ static void vmx_vm_destroy(struct kvm *kvm)
free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm)); free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
} }
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
{
return gva;
}
static struct kvm_x86_ops vmx_x86_ops __initdata = { static struct kvm_x86_ops vmx_x86_ops __initdata = {
.name = KBUILD_MODNAME, .name = KBUILD_MODNAME,
@ -8492,6 +8497,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.complete_emulated_msr = kvm_complete_insn_gp, .complete_emulated_msr = kvm_complete_insn_gp,
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
.get_untagged_addr = vmx_get_untagged_addr,
}; };
static unsigned int vmx_handle_intel_pt_intr(void) static unsigned int vmx_handle_intel_pt_intr(void)

View File

@ -421,6 +421,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
int type, bool value) int type, bool value)
{ {

View File

@ -13523,6 +13523,10 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
switch (type) { switch (type) {
case INVPCID_TYPE_INDIV_ADDR: case INVPCID_TYPE_INDIV_ADDR:
/*
* LAM doesn't apply to addresses that are inputs to TLB
* invalidation.
*/
if ((!pcid_enabled && (operand.pcid != 0)) || if ((!pcid_enabled && (operand.pcid != 0)) ||
is_noncanonical_address(operand.gla, vcpu)) { is_noncanonical_address(operand.gla, vcpu)) {
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);