2015-10-25 16:01:56 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
|
|
|
|
#include <asm/alternative.h>
|
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
#include <asm/kvm_arm.h>
|
|
|
|
#include <asm/kvm_asm.h>
|
|
|
|
#include <asm/kvm_mmu.h>
|
|
|
|
|
|
|
|
.text
|
|
|
|
.pushsection .hyp.text, "ax"
|
|
|
|
|
2015-01-29 21:52:12 +08:00
|
|
|
.macro do_el2_call
|
|
|
|
/*
|
|
|
|
* Shuffle the parameters before calling the function
|
|
|
|
* pointed to in x0. Assumes parameters in x[1,2,3].
|
|
|
|
*/
|
2017-04-04 02:37:35 +08:00
|
|
|
str lr, [sp, #-16]!
|
2015-01-29 21:52:12 +08:00
|
|
|
mov lr, x0
|
|
|
|
mov x0, x1
|
|
|
|
mov x1, x2
|
|
|
|
mov x2, x3
|
|
|
|
blr lr
|
2017-04-04 02:37:35 +08:00
|
|
|
ldr lr, [sp], #16
|
2015-01-29 21:52:12 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
ENTRY(__vhe_hyp_call)
|
|
|
|
do_el2_call
|
|
|
|
/*
|
|
|
|
* We used to rely on having an exception return to get
|
|
|
|
* an implicit isb. In the E2H case, we don't have it anymore.
|
|
|
|
* rather than changing all the leaf functions, just do it here
|
|
|
|
* before returning to the rest of the kernel.
|
|
|
|
*/
|
|
|
|
isb
|
|
|
|
ret
|
|
|
|
ENDPROC(__vhe_hyp_call)
|
2016-07-01 01:40:44 +08:00
|
|
|
|
2015-10-25 16:01:56 +08:00
|
|
|
el1_sync: // Guest trapped into EL2
|
2016-08-31 10:08:32 +08:00
|
|
|
stp x0, x1, [sp, #-16]!
|
2015-10-25 16:01:56 +08:00
|
|
|
|
2015-10-28 23:06:47 +08:00
|
|
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
2015-10-25 16:01:56 +08:00
|
|
|
mrs x1, esr_el2
|
2015-10-28 23:06:47 +08:00
|
|
|
alternative_else
|
|
|
|
mrs x1, esr_el1
|
|
|
|
alternative_endif
|
2016-08-31 10:08:32 +08:00
|
|
|
lsr x0, x1, #ESR_ELx_EC_SHIFT
|
2015-10-25 16:01:56 +08:00
|
|
|
|
2016-08-31 10:08:32 +08:00
|
|
|
cmp x0, #ESR_ELx_EC_HVC64
|
2015-10-25 16:01:56 +08:00
|
|
|
b.ne el1_trap
|
|
|
|
|
2016-08-31 10:08:32 +08:00
|
|
|
mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
|
|
|
|
cbnz x1, el1_trap // called HVC
|
2015-10-25 16:01:56 +08:00
|
|
|
|
|
|
|
/* Here, we're pretty sure the host called HVC. */
|
2016-08-31 10:08:32 +08:00
|
|
|
ldp x0, x1, [sp], #16
|
2015-10-25 16:01:56 +08:00
|
|
|
|
2017-04-04 02:37:42 +08:00
|
|
|
/* Check for a stub HVC call */
|
|
|
|
cmp x0, #HVC_STUB_HCALL_NR
|
|
|
|
b.hs 1f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute the idmap address of __kvm_handle_stub_hvc and
|
|
|
|
* jump there. Since we use kimage_voffset, do not use the
|
|
|
|
* HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
|
|
|
|
* (by loading it from the constant pool).
|
|
|
|
*
|
|
|
|
* Preserve x0-x4, which may contain stub parameters.
|
|
|
|
*/
|
|
|
|
ldr x5, =__kvm_handle_stub_hvc
|
|
|
|
ldr_l x6, kimage_voffset
|
|
|
|
|
|
|
|
/* x5 = __pa(x5) */
|
|
|
|
sub x5, x5, x6
|
|
|
|
br x5
|
2015-10-25 16:01:56 +08:00
|
|
|
|
2015-01-29 21:52:12 +08:00
|
|
|
1:
|
2015-10-25 16:01:56 +08:00
|
|
|
/*
|
2015-01-29 21:52:12 +08:00
|
|
|
* Perform the EL2 call
|
2015-10-25 16:01:56 +08:00
|
|
|
*/
|
|
|
|
kern_hyp_va x0
|
2015-01-29 21:52:12 +08:00
|
|
|
do_el2_call
|
2015-10-25 16:01:56 +08:00
|
|
|
|
2017-04-04 02:37:42 +08:00
|
|
|
eret
|
2015-10-25 16:01:56 +08:00
|
|
|
|
|
|
|
el1_trap:
|
|
|
|
/*
|
2016-08-31 10:08:32 +08:00
|
|
|
* x0: ESR_EC
|
2015-10-25 16:01:56 +08:00
|
|
|
*/
|
|
|
|
|
2016-11-08 21:56:21 +08:00
|
|
|
/*
|
|
|
|
* We trap the first access to the FP/SIMD to save the host context
|
|
|
|
* and restore the guest context lazily.
|
|
|
|
* If FP/SIMD is not implemented, handle the trap and inject an
|
|
|
|
* undefined instruction exception to the guest.
|
|
|
|
*/
|
|
|
|
alternative_if_not ARM64_HAS_NO_FPSIMD
|
2016-08-31 10:08:32 +08:00
|
|
|
cmp x0, #ESR_ELx_EC_FP_ASIMD
|
2015-10-25 16:01:56 +08:00
|
|
|
b.eq __fpsimd_guest_restore
|
2016-11-08 21:56:21 +08:00
|
|
|
alternative_else_nop_endif
|
2015-10-25 16:01:56 +08:00
|
|
|
|
2016-08-31 10:08:32 +08:00
|
|
|
mrs x1, tpidr_el2
|
|
|
|
mov x0, #ARM_EXCEPTION_TRAP
|
2015-10-25 16:01:56 +08:00
|
|
|
b __guest_exit
|
|
|
|
|
|
|
|
el1_irq:
|
2016-08-31 10:08:32 +08:00
|
|
|
stp x0, x1, [sp, #-16]!
|
|
|
|
mrs x1, tpidr_el2
|
|
|
|
mov x0, #ARM_EXCEPTION_IRQ
|
2015-10-25 16:01:56 +08:00
|
|
|
b __guest_exit
|
|
|
|
|
2016-09-06 21:02:04 +08:00
|
|
|
el1_error:
|
|
|
|
stp x0, x1, [sp, #-16]!
|
|
|
|
mrs x1, tpidr_el2
|
|
|
|
mov x0, #ARM_EXCEPTION_EL1_SERROR
|
|
|
|
b __guest_exit
|
|
|
|
|
2016-09-06 21:02:07 +08:00
|
|
|
el2_error:
|
|
|
|
/*
|
|
|
|
* Only two possibilities:
|
|
|
|
* 1) Either we come from the exit path, having just unmasked
|
|
|
|
* PSTATE.A: change the return code to an EL2 fault, and
|
|
|
|
* carry on, as we're already in a sane state to handle it.
|
|
|
|
* 2) Or we come from anywhere else, and that's a bug: we panic.
|
|
|
|
*
|
|
|
|
* For (1), x0 contains the original return code and x1 doesn't
|
|
|
|
* contain anything meaningful at that stage. We can reuse them
|
|
|
|
* as temp registers.
|
|
|
|
* For (2), who cares?
|
|
|
|
*/
|
|
|
|
mrs x0, elr_el2
|
|
|
|
adr x1, abort_guest_exit_start
|
|
|
|
cmp x0, x1
|
|
|
|
adr x1, abort_guest_exit_end
|
|
|
|
ccmp x0, x1, #4, ne
|
|
|
|
b.ne __hyp_panic
|
|
|
|
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
|
|
|
|
eret
|
|
|
|
|
2015-10-25 23:21:52 +08:00
|
|
|
ENTRY(__hyp_do_panic)
|
|
|
|
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
|
|
|
PSR_MODE_EL1h)
|
|
|
|
msr spsr_el2, lr
|
|
|
|
ldr lr, =panic
|
|
|
|
msr elr_el2, lr
|
|
|
|
eret
|
|
|
|
ENDPROC(__hyp_do_panic)
|
|
|
|
|
|
|
|
.macro invalid_vector label, target = __hyp_panic
|
2015-10-25 16:01:56 +08:00
|
|
|
.align 2
|
|
|
|
\label:
|
|
|
|
b \target
|
|
|
|
ENDPROC(\label)
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/* None of these should ever happen */
|
|
|
|
invalid_vector el2t_sync_invalid
|
|
|
|
invalid_vector el2t_irq_invalid
|
|
|
|
invalid_vector el2t_fiq_invalid
|
|
|
|
invalid_vector el2t_error_invalid
|
|
|
|
invalid_vector el2h_sync_invalid
|
|
|
|
invalid_vector el2h_irq_invalid
|
|
|
|
invalid_vector el2h_fiq_invalid
|
|
|
|
invalid_vector el1_sync_invalid
|
|
|
|
invalid_vector el1_irq_invalid
|
|
|
|
invalid_vector el1_fiq_invalid
|
|
|
|
|
|
|
|
.ltorg
|
|
|
|
|
|
|
|
.align 11
|
|
|
|
|
2015-10-25 21:58:00 +08:00
|
|
|
ENTRY(__kvm_hyp_vector)
|
2015-10-25 16:01:56 +08:00
|
|
|
ventry el2t_sync_invalid // Synchronous EL2t
|
|
|
|
ventry el2t_irq_invalid // IRQ EL2t
|
|
|
|
ventry el2t_fiq_invalid // FIQ EL2t
|
|
|
|
ventry el2t_error_invalid // Error EL2t
|
|
|
|
|
|
|
|
ventry el2h_sync_invalid // Synchronous EL2h
|
|
|
|
ventry el2h_irq_invalid // IRQ EL2h
|
|
|
|
ventry el2h_fiq_invalid // FIQ EL2h
|
2016-09-06 21:02:07 +08:00
|
|
|
ventry el2_error // Error EL2h
|
2015-10-25 16:01:56 +08:00
|
|
|
|
|
|
|
ventry el1_sync // Synchronous 64-bit EL1
|
|
|
|
ventry el1_irq // IRQ 64-bit EL1
|
|
|
|
ventry el1_fiq_invalid // FIQ 64-bit EL1
|
2016-09-06 21:02:04 +08:00
|
|
|
ventry el1_error // Error 64-bit EL1
|
2015-10-25 16:01:56 +08:00
|
|
|
|
|
|
|
ventry el1_sync // Synchronous 32-bit EL1
|
|
|
|
ventry el1_irq // IRQ 32-bit EL1
|
|
|
|
ventry el1_fiq_invalid // FIQ 32-bit EL1
|
2016-09-06 21:02:04 +08:00
|
|
|
ventry el1_error // Error 32-bit EL1
|
2015-10-25 21:58:00 +08:00
|
|
|
ENDPROC(__kvm_hyp_vector)
|