KVM: arm64: Store vcpu on the stack during __guest_enter()

KVM uses tpidr_el2 as its private vcpu register, which makes sense for
non-vhe world switch as only KVM can access this register. This means
vhe Linux has to use tpidr_el1, which KVM has to save/restore as part
of the host context.

If the SDEI handler code runs behind KVMs back, it mustn't access any
per-cpu variables. To allow this on systems with vhe we need to make
the host use tpidr_el2, saving KVM from save/restoring it.

__guest_enter() stores the host_ctxt on the stack, do the same with
the vcpu.

Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Christoffer Dall <cdall@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
James Morse 2018-01-08 15:38:03 +00:00 committed by Catalin Marinas
parent 3423cab3e0
commit 32b03d1059
2 changed files with 10 additions and 6 deletions

View File

@ -62,8 +62,8 @@ ENTRY(__guest_enter)
// Store the host regs
save_callee_saved_regs x1
// Store the host_ctxt for use at exit time
str x1, [sp, #-16]!
// Store host_ctxt and vcpu for use at exit time
stp x1, x0, [sp, #-16]!
add x18, x0, #VCPU_CONTEXT
@ -159,6 +159,10 @@ abort_guest_exit_end:
ENDPROC(__guest_exit)
ENTRY(__fpsimd_guest_restore)
// x0: esr
// x1: vcpu
// x2-x29,lr: vcpu regs
// vcpu x0-x1 on the stack
stp x2, x3, [sp, #-16]!
stp x4, lr, [sp, #-16]!
@ -173,7 +177,7 @@ alternative_else
alternative_endif
isb
mrs x3, tpidr_el2
mov x3, x1
ldr x0, [x3, #VCPU_HOST_CONTEXT]
kern_hyp_va x0

View File

@ -104,6 +104,7 @@ el1_trap:
/*
* x0: ESR_EC
*/
ldr x1, [sp, #16 + 8] // vcpu stored by __guest_enter
/*
* We trap the first access to the FP/SIMD to save the host context
@ -116,19 +117,18 @@ alternative_if_not ARM64_HAS_NO_FPSIMD
b.eq __fpsimd_guest_restore
alternative_else_nop_endif
mrs x1, tpidr_el2
mov x0, #ARM_EXCEPTION_TRAP
b __guest_exit
el1_irq:
stp x0, x1, [sp, #-16]!
mrs x1, tpidr_el2
ldr x1, [sp, #16 + 8]
mov x0, #ARM_EXCEPTION_IRQ
b __guest_exit
el1_error:
stp x0, x1, [sp, #-16]!
mrs x1, tpidr_el2
ldr x1, [sp, #16 + 8]
mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit