efi: arm64: enter with MMU and caches enabled
Instead of cleaning the entire loaded kernel image to the PoC and disabling the MMU and caches before branching to the kernel's bare metal entry point, we can leave the MMU and caches enabled, and rely on EFI's cacheable 1:1 mapping of all of system RAM (which is mandated by the spec) to populate the initial page tables. This removes the need for managing coherency in software, which is tedious and error prone. Note that we still need to clean the executable region of the image to the PoU if this is required for I/D coherency, but only if we actually decided to move the image in memory, as otherwise, this will have been taken care of by the loader. This change affects both the builtin EFI stub as well as the zboot decompressor, which now carries the entire EFI stub along with the decompression code and the compressed image. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20230111102236.1430401-7-ardb@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
3dcf60bbfd
commit
6178617038
|
@ -105,6 +105,8 @@ static inline unsigned long efi_get_kimg_min_align(void)
|
|||
#define EFI_ALLOC_ALIGN SZ_64K
|
||||
#define EFI_ALLOC_LIMIT ((1UL << 48) - 1)
|
||||
|
||||
extern unsigned long primary_entry_offset(void);
|
||||
|
||||
/*
|
||||
* On ARM systems, virtually remapped UEFI runtime services are set up in two
|
||||
* distinct stages:
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#error This file should only be included in vmlinux.lds.S
|
||||
#endif
|
||||
|
||||
PROVIDE(__efistub_primary_entry_offset = primary_entry - _text);
|
||||
PROVIDE(__efistub_primary_entry = primary_entry);
|
||||
|
||||
/*
|
||||
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
|
||||
|
@ -21,10 +21,11 @@ PROVIDE(__efistub_primary_entry_offset = primary_entry - _text);
|
|||
* linked at. The routines below are all implemented in assembler in a
|
||||
* position independent manner
|
||||
*/
|
||||
PROVIDE(__efistub_dcache_clean_poc = __pi_dcache_clean_poc);
|
||||
PROVIDE(__efistub_caches_clean_inval_pou = __pi_caches_clean_inval_pou);
|
||||
|
||||
PROVIDE(__efistub__text = _text);
|
||||
PROVIDE(__efistub__end = _end);
|
||||
PROVIDE(__efistub___inittext_end = __inittext_end);
|
||||
PROVIDE(__efistub__edata = _edata);
|
||||
PROVIDE(__efistub_screen_info = screen_info);
|
||||
PROVIDE(__efistub__ctype = _ctype);
|
||||
|
|
|
@ -56,6 +56,7 @@ SYM_FUNC_START(caches_clean_inval_pou)
|
|||
caches_clean_inval_pou_macro
|
||||
ret
|
||||
SYM_FUNC_END(caches_clean_inval_pou)
|
||||
SYM_FUNC_ALIAS(__pi_caches_clean_inval_pou, caches_clean_inval_pou)
|
||||
|
||||
/*
|
||||
* caches_clean_inval_user_pou(start,end)
|
||||
|
|
|
@ -87,7 +87,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
|
|||
screen_info.o efi-stub-entry.o
|
||||
|
||||
lib-$(CONFIG_ARM) += arm32-stub.o
|
||||
lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o arm64-entry.o smbios.o
|
||||
lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o
|
||||
lib-$(CONFIG_X86) += x86-stub.o
|
||||
lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o
|
||||
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
|
||||
|
@ -141,7 +141,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
|
|||
#
|
||||
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
|
||||
--prefix-symbols=__efistub_
|
||||
STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS64
|
||||
STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
|
||||
|
||||
# For RISC-V, we don't need anything special other than arm64. Keep all the
|
||||
# symbols in .init section and make sure that no absolute symbols references
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* EFI entry point.
|
||||
*
|
||||
* Copyright (C) 2013, 2014 Red Hat, Inc.
|
||||
* Author: Mark Salter <msalter@redhat.com>
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
/*
|
||||
* The entrypoint of a arm64 bare metal image is at offset #0 of the
|
||||
* image, so this is a reasonable default for primary_entry_offset.
|
||||
* Only when the EFI stub is integrated into the core kernel, it is not
|
||||
* guaranteed that the PE/COFF header has been copied to memory too, so
|
||||
* in this case, primary_entry_offset should be overridden by the
|
||||
* linker and point to primary_entry() directly.
|
||||
*/
|
||||
.weak primary_entry_offset
|
||||
|
||||
SYM_CODE_START(efi_enter_kernel)
|
||||
/*
|
||||
* efi_pe_entry() will have copied the kernel image if necessary and we
|
||||
* end up here with device tree address in x1 and the kernel entry
|
||||
* point stored in x0. Save those values in registers which are
|
||||
* callee preserved.
|
||||
*/
|
||||
ldr w2, =primary_entry_offset
|
||||
add x19, x0, x2 // relocated Image entrypoint
|
||||
|
||||
mov x0, x1 // DTB address
|
||||
mov x1, xzr
|
||||
mov x2, xzr
|
||||
mov x3, xzr
|
||||
|
||||
/*
|
||||
* Clean the remainder of this routine to the PoC
|
||||
* so that we can safely disable the MMU and caches.
|
||||
*/
|
||||
adr x4, 1f
|
||||
dc civac, x4
|
||||
dsb sy
|
||||
|
||||
/* Turn off Dcache and MMU */
|
||||
mrs x4, CurrentEL
|
||||
cmp x4, #CurrentEL_EL2
|
||||
mrs x4, sctlr_el1
|
||||
b.ne 0f
|
||||
mrs x4, sctlr_el2
|
||||
0: bic x4, x4, #SCTLR_ELx_M
|
||||
bic x4, x4, #SCTLR_ELx_C
|
||||
b.eq 1f
|
||||
b 2f
|
||||
|
||||
.balign 32
|
||||
1: pre_disable_mmu_workaround
|
||||
msr sctlr_el2, x4
|
||||
isb
|
||||
br x19 // jump to kernel entrypoint
|
||||
|
||||
2: pre_disable_mmu_workaround
|
||||
msr sctlr_el1, x4
|
||||
isb
|
||||
br x19 // jump to kernel entrypoint
|
||||
|
||||
.org 1b + 32
|
||||
SYM_CODE_END(efi_enter_kernel)
|
|
@ -58,7 +58,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
efi_handle_t image_handle)
|
||||
{
|
||||
efi_status_t status;
|
||||
unsigned long kernel_size, kernel_memsize = 0;
|
||||
unsigned long kernel_size, kernel_codesize, kernel_memsize;
|
||||
u32 phys_seed = 0;
|
||||
u64 min_kimg_align = efi_get_kimg_min_align();
|
||||
|
||||
|
@ -93,6 +93,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
SEGMENT_ALIGN >> 10);
|
||||
|
||||
kernel_size = _edata - _text;
|
||||
kernel_codesize = __inittext_end - _text;
|
||||
kernel_memsize = kernel_size + (_end - _edata);
|
||||
*reserve_size = kernel_memsize;
|
||||
|
||||
|
@ -121,7 +122,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
*/
|
||||
*image_addr = (u64)_text;
|
||||
*reserve_size = 0;
|
||||
goto clean_image_to_poc;
|
||||
return EFI_SUCCESS;
|
||||
}
|
||||
|
||||
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
|
||||
|
@ -137,14 +138,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
|
||||
*image_addr = *reserve_addr;
|
||||
memcpy((void *)*image_addr, _text, kernel_size);
|
||||
|
||||
clean_image_to_poc:
|
||||
/*
|
||||
* Clean the copied Image to the PoC, and ensure it is not shadowed by
|
||||
* stale icache entries from before relocation.
|
||||
*/
|
||||
dcache_clean_poc(*image_addr, *image_addr + kernel_size);
|
||||
asm("ic ialluis");
|
||||
caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
|
||||
|
||||
return EFI_SUCCESS;
|
||||
}
|
||||
|
||||
asmlinkage void primary_entry(void);
|
||||
|
||||
unsigned long primary_entry_offset(void)
|
||||
{
|
||||
/*
|
||||
* When built as part of the kernel, the EFI stub cannot branch to the
|
||||
* kernel proper via the image header, as the PE/COFF header is
|
||||
* strictly not part of the in-memory presentation of the image, only
|
||||
* of the file representation. So instead, we need to jump to the
|
||||
* actual entrypoint in the .text region of the image.
|
||||
*/
|
||||
return (char *)primary_entry - _text;
|
||||
}
|
||||
|
|
|
@ -56,6 +56,12 @@ efi_status_t check_platform_features(void)
|
|||
return EFI_SUCCESS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
#define DCTYPE "civac"
|
||||
#else
|
||||
#define DCTYPE "cvau"
|
||||
#endif
|
||||
|
||||
void efi_cache_sync_image(unsigned long image_base,
|
||||
unsigned long alloc_size,
|
||||
unsigned long code_size)
|
||||
|
@ -64,13 +70,38 @@ void efi_cache_sync_image(unsigned long image_base,
|
|||
u64 lsize = 4 << cpuid_feature_extract_unsigned_field(ctr,
|
||||
CTR_EL0_DminLine_SHIFT);
|
||||
|
||||
do {
|
||||
asm("dc civac, %0" :: "r"(image_base));
|
||||
image_base += lsize;
|
||||
alloc_size -= lsize;
|
||||
} while (alloc_size >= lsize);
|
||||
/* only perform the cache maintenance if needed for I/D coherency */
|
||||
if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
|
||||
do {
|
||||
asm("dc " DCTYPE ", %0" :: "r"(image_base));
|
||||
image_base += lsize;
|
||||
code_size -= lsize;
|
||||
} while (code_size >= lsize);
|
||||
}
|
||||
|
||||
asm("ic ialluis");
|
||||
dsb(ish);
|
||||
isb();
|
||||
}
|
||||
|
||||
unsigned long __weak primary_entry_offset(void)
|
||||
{
|
||||
/*
|
||||
* By default, we can invoke the kernel via the branch instruction in
|
||||
* the image header, so offset #0. This will be overridden by the EFI
|
||||
* stub build that is linked into the core kernel, as in that case, the
|
||||
* image header may not have been loaded into memory, or may be mapped
|
||||
* with non-executable permissions.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __noreturn efi_enter_kernel(unsigned long entrypoint,
|
||||
unsigned long fdt_addr,
|
||||
unsigned long fdt_size)
|
||||
{
|
||||
void (* __noreturn enter_kernel)(u64, u64, u64, u64);
|
||||
|
||||
enter_kernel = (void *)entrypoint + primary_entry_offset();
|
||||
enter_kernel(fdt_addr, 0, 0, 0);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue