2019-06-04 16:11:33 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2014-04-16 10:47:52 +08:00
|
|
|
/*
|
|
|
|
* EFI entry point.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013, 2014 Red Hat, Inc.
|
|
|
|
* Author: Mark Salter <msalter@redhat.com>
|
|
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
|
|
|
|
#include <asm/assembler.h>
|
|
|
|
|
|
|
|
__INIT
|
|
|
|
|
efi/arm64: Clean EFI stub exit code from cache instead of avoiding it
Commit 9f9223778 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.
However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.
So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20200228121408.9075-6-ardb@kernel.org
2020-02-28 20:14:07 +08:00
|
|
|
SYM_CODE_START(efi_enter_kernel)
|
2014-04-16 10:47:52 +08:00
|
|
|
/*
|
2020-05-23 17:01:57 +08:00
|
|
|
* efi_pe_entry() will have copied the kernel image if necessary and we
|
2020-02-17 19:44:37 +08:00
|
|
|
* end up here with device tree address in x1 and the kernel entry
|
|
|
|
* point stored in x0. Save those values in registers which are
|
|
|
|
* callee preserved.
|
2014-04-16 10:47:52 +08:00
|
|
|
*/
|
2020-03-27 01:14:23 +08:00
|
|
|
ldr w2, =primary_entry_offset
|
efi/arm64: Clean EFI stub exit code from cache instead of avoiding it
Commit 9f9223778 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.
However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.
So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20200228121408.9075-6-ardb@kernel.org
2020-02-28 20:14:07 +08:00
|
|
|
add x19, x0, x2 // relocated Image entrypoint
|
2020-02-17 19:44:37 +08:00
|
|
|
mov x20, x1 // DTB address
|
2014-04-16 10:47:52 +08:00
|
|
|
|
2014-11-13 20:22:01 +08:00
|
|
|
/*
|
efi/arm64: Clean EFI stub exit code from cache instead of avoiding it
Commit 9f9223778 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.
However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.
So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20200228121408.9075-6-ardb@kernel.org
2020-02-28 20:14:07 +08:00
|
|
|
* Clean the copied Image to the PoC, and ensure it is not shadowed by
|
2014-11-13 20:22:01 +08:00
|
|
|
* stale icache entries from before relocation.
|
|
|
|
*/
|
2020-02-17 19:44:37 +08:00
|
|
|
ldr w1, =kernel_size
|
efi/arm64: Clean EFI stub exit code from cache instead of avoiding it
Commit 9f9223778 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.
However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.
So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20200228121408.9075-6-ardb@kernel.org
2020-02-28 20:14:07 +08:00
|
|
|
bl __clean_dcache_area_poc
|
2014-04-16 10:47:52 +08:00
|
|
|
ic ialluis
|
|
|
|
|
2014-11-13 20:22:01 +08:00
|
|
|
/*
|
efi/arm64: Clean EFI stub exit code from cache instead of avoiding it
Commit 9f9223778 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.
However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.
So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20200228121408.9075-6-ardb@kernel.org
2020-02-28 20:14:07 +08:00
|
|
|
* Clean the remainder of this routine to the PoC
|
|
|
|
* so that we can safely disable the MMU and caches.
|
2014-11-13 20:22:01 +08:00
|
|
|
*/
|
efi/arm64: Clean EFI stub exit code from cache instead of avoiding it
Commit 9f9223778 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.
However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.
So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20200228121408.9075-6-ardb@kernel.org
2020-02-28 20:14:07 +08:00
|
|
|
adr x0, 0f
|
|
|
|
ldr w1, 3f
|
|
|
|
bl __clean_dcache_area_poc
|
2020-02-17 19:44:37 +08:00
|
|
|
0:
|
2014-04-16 10:47:52 +08:00
|
|
|
/* Turn off Dcache and MMU */
|
|
|
|
mrs x0, CurrentEL
|
2014-06-06 21:16:21 +08:00
|
|
|
cmp x0, #CurrentEL_EL2
|
2014-04-16 10:47:52 +08:00
|
|
|
b.ne 1f
|
|
|
|
mrs x0, sctlr_el2
|
|
|
|
bic x0, x0, #1 << 0 // clear SCTLR.M
|
|
|
|
bic x0, x0, #1 << 2 // clear SCTLR.C
|
2017-12-12 06:42:32 +08:00
|
|
|
pre_disable_mmu_workaround
|
2014-04-16 10:47:52 +08:00
|
|
|
msr sctlr_el2, x0
|
|
|
|
isb
|
|
|
|
b 2f
|
|
|
|
1:
|
|
|
|
mrs x0, sctlr_el1
|
|
|
|
bic x0, x0, #1 << 0 // clear SCTLR.M
|
|
|
|
bic x0, x0, #1 << 2 // clear SCTLR.C
|
2017-12-12 06:42:32 +08:00
|
|
|
pre_disable_mmu_workaround
|
2014-04-16 10:47:52 +08:00
|
|
|
msr sctlr_el1, x0
|
|
|
|
isb
|
|
|
|
2:
|
|
|
|
/* Jump to kernel entry point */
|
|
|
|
mov x0, x20
|
|
|
|
mov x1, xzr
|
|
|
|
mov x2, xzr
|
|
|
|
mov x3, xzr
|
efi/arm64: Clean EFI stub exit code from cache instead of avoiding it
Commit 9f9223778 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.
However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.
So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20200228121408.9075-6-ardb@kernel.org
2020-02-28 20:14:07 +08:00
|
|
|
br x19
|
|
|
|
SYM_CODE_END(efi_enter_kernel)
|
|
|
|
3: .long . - 0b
|