OpenCloudOS-Kernel/arch/arm64/mm/proc.S

471 lines
11 KiB
ArmAsm
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Based on arch/arm/mm/proc.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
* Copyright (C) 2012 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
*/
#include <linux/init.h>
#include <linux/linkage.h>
mm: reorder includes after introduction of linux/pgtable.h The replacement of <asm/pgrable.h> with <linux/pgtable.h> made the include of the latter in the middle of asm includes. Fix this up with the aid of the below script and manual adjustments here and there. import sys import re if len(sys.argv) is not 3: print "USAGE: %s <file> <header>" % (sys.argv[0]) sys.exit(1) hdr_to_move="#include <linux/%s>" % sys.argv[2] moved = False in_hdrs = False with open(sys.argv[1], "r") as f: lines = f.readlines() for _line in lines: line = _line.rstrip(' ') if line == hdr_to_move: continue if line.startswith("#include <linux/"): in_hdrs = True elif not moved and in_hdrs: moved = True print hdr_to_move print line Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-4-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-09 12:32:42 +08:00
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/asm_pointer_auth.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/smp.h>
#ifdef CONFIG_ARM64_64K_PAGES
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
#elif defined(CONFIG_ARM64_16K_PAGES)
#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K
#else /* CONFIG_ARM64_4K_PAGES */
#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
#endif
#ifdef CONFIG_RANDOMIZE_BASE
#define TCR_KASLR_FLAGS TCR_NFD1
#else
#define TCR_KASLR_FLAGS 0
#endif
#define TCR_SMP_FLAGS TCR_SHARED
/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
#ifdef CONFIG_KASAN_SW_TAGS
#define TCR_KASAN_FLAGS TCR_TBI1
#else
#define TCR_KASAN_FLAGS 0
#endif
/*
* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
* changed during __cpu_setup to Normal Tagged if the system supports MTE.
*/
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
arm64: kernel: remove ARM64_CPU_SUSPEND config option ARM64_CPU_SUSPEND config option was introduced to make code providing context save/restore selectable only on platforms requiring power management capabilities. Currently ARM64_CPU_SUSPEND depends on the PM_SLEEP config option which in turn is set by the SUSPEND config option. The introduction of CPU_IDLE for arm64 requires that code configured by ARM64_CPU_SUSPEND (context save/restore) should be compiled in in order to enable the CPU idle driver to rely on CPU operations carrying out context save/restore. The ARM64_CPUIDLE config option (ARM64 generic idle driver) is therefore forced to select ARM64_CPU_SUSPEND, even if there may be (ie PM_SLEEP) failed dependencies, which is not a clean way of handling the kernel configuration option. For these reasons, this patch removes the ARM64_CPU_SUSPEND config option and makes the context save/restore dependent on CPU_PM, which is selected whenever either SUSPEND or CPU_IDLE are configured, cleaning up dependencies in the process. This way, code previously configured through ARM64_CPU_SUSPEND is compiled in whenever a power management subsystem requires it to be present in the kernel (SUSPEND || CPU_IDLE), which is the behaviour expected on ARM64 kernels. The cpu_suspend and cpu_init_idle CPU operations are added only if CPU_IDLE is selected, since they are CPU_IDLE specific methods and should be grouped and defined accordingly. PSCI CPU operations are updated to reflect the introduced changes. Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Will Deacon <will.deacon@arm.com> Cc: Krzysztof Kozlowski <k.kozlowski@samsung.com> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-27 02:33:44 +08:00
#ifdef CONFIG_CPU_PM
/**
* cpu_do_suspend - save CPU registers context
*
* x0: virtual address of context pointer
*
* This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
*/
SYM_FUNC_START(cpu_do_suspend)
mrs x2, tpidr_el0
mrs x3, tpidrro_el0
mrs x4, contextidr_el1
mrs x5, osdlr_el1
mrs x6, cpacr_el1
mrs x7, tcr_el1
mrs x8, vbar_el1
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x12, tpidr_el1
alternative_else
mrs x12, tpidr_el2
alternative_endif
mrs x13, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
stp x6, x7, [x0, #32]
stp x8, x9, [x0, #48]
stp x10, x11, [x0, #64]
stp x12, x13, [x0, #80]
/*
* Save x18 as it may be used as a platform register, e.g. by shadow
* call stack.
*/
str x18, [x0, #96]
ret
SYM_FUNC_END(cpu_do_suspend)
/**
* cpu_do_resume - restore CPU register context
*
* x0: Address of context pointer
*/
.pushsection ".idmap.text", "awx"
SYM_FUNC_START(cpu_do_resume)
ldp x2, x3, [x0]
ldp x4, x5, [x0, #16]
ldp x6, x8, [x0, #32]
ldp x9, x10, [x0, #48]
ldp x11, x12, [x0, #64]
ldp x13, x14, [x0, #80]
/*
* Restore x18, as it may be used as a platform register, and clear
* the buffer to minimize the risk of exposure when used for shadow
* call stack.
*/
ldr x18, [x0, #96]
str xzr, [x0, #96]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
msr cpacr_el1, x6
/* Don't change t0sz here, mask those bits when restoring */
mrs x7, tcr_el1
bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
msr tcr_el1, x8
msr vbar_el1, x9
/*
* __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
* debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
* exception. Mask them until local_daif_restore() in cpu_suspend()
* resets them.
*/
disable_daif
msr mdscr_el1, x10
msr sctlr_el1, x12
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
msr tpidr_el1, x13
alternative_else
msr tpidr_el2, x13
alternative_endif
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
*/
msr osdlr_el1, x5
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
reset_amuserenr_el0 x0 // Disable AMU access from EL0
alternative_if ARM64_HAS_RAS_EXTN
msr_s SYS_DISR_EL1, xzr
alternative_else_nop_endif
ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
isb
ret
SYM_FUNC_END(cpu_do_resume)
.popsection
#endif
.pushsection ".idmap.text", "awx"
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
adrp \tmp1, empty_zero_page
phys_to_ttbr \tmp2, \tmp1
offset_ttbr1 \tmp2, \tmp1
msr ttbr1_el1, \tmp2
isb
tlbi vmalle1
dsb nsh
isb
.endm
arm64: mm: add code to safely replace TTBR1_EL1 If page tables are modified without suitable TLB maintenance, the ARM architecture permits multiple TLB entries to be allocated for the same VA. When this occurs, it is permitted that TLB conflict aborts are raised in response to synchronous data/instruction accesses, and/or and amalgamation of the TLB entries may be used as a result of a TLB lookup. The presence of conflicting TLB entries may result in a variety of behaviours detrimental to the system (e.g. erroneous physical addresses may be used by I-cache fetches and/or page table walks). Some of these cases may result in unexpected changes of hardware state, and/or result in the (asynchronous) delivery of SError. To avoid these issues, we must avoid situations where conflicting entries may be allocated into TLBs. For user and module mappings we can follow a strict break-before-make approach, but this cannot work for modifications to the swapper page tables that cover the kernel text and data. Instead, this patch adds code which is intended to be executed from the idmap, which can safely unmap the swapper page tables as it only requires the idmap to be active. This enables us to uninstall the active TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry without potentially unmapping code or data required for the sequence. This avoids the risk of conflict, but requires that updates are staged in a copy of the swapper page tables prior to being installed. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Cc: Laura Abbott <labbott@fedoraproject.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:01 +08:00
/*
arm64: mm: Support Common Not Private translations Common Not Private (CNP) is a feature of ARMv8.2 extension which allows translation table entries to be shared between different PEs in the same inner shareable domain, so the hardware can use this fact to optimise the caching of such entries in the TLB. CNP occupies one bit in TTBRx_ELy and VTTBR_EL2, which advertises to the hardware that the translation table entries pointed to by this TTBR are the same as every PE in the same inner shareable domain for which the equivalent TTBR also has CNP bit set. In case CNP bit is set but TTBR does not point at the same translation table entries for a given ASID and VMID, then the system is mis-configured, so the results of translations are UNPREDICTABLE. For kernel we postpone setting CNP till all cpus are up and rely on cpufeature framework to 1) patch the code which is sensitive to CNP and 2) update TTBR1_EL1 with CNP bit set. TTBR1_EL1 can be reprogrammed as result of hibernation or cpuidle (via __enable_mmu). For these two cases we restore CnP bit via __cpu_suspend_exit(). There are a few cases we need to care of changes in TTBR0_EL1: - a switch to idmap - software emulated PAN we rule out latter via Kconfig options and for the former we make sure that CNP is set for non-zero ASIDs only. Reviewed-by: James Morse <james.morse@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com> [catalin.marinas@arm.com: default y for CONFIG_ARM64_CNP] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2018-07-31 21:08:56 +08:00
* void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
arm64: mm: add code to safely replace TTBR1_EL1 If page tables are modified without suitable TLB maintenance, the ARM architecture permits multiple TLB entries to be allocated for the same VA. When this occurs, it is permitted that TLB conflict aborts are raised in response to synchronous data/instruction accesses, and/or and amalgamation of the TLB entries may be used as a result of a TLB lookup. The presence of conflicting TLB entries may result in a variety of behaviours detrimental to the system (e.g. erroneous physical addresses may be used by I-cache fetches and/or page table walks). Some of these cases may result in unexpected changes of hardware state, and/or result in the (asynchronous) delivery of SError. To avoid these issues, we must avoid situations where conflicting entries may be allocated into TLBs. For user and module mappings we can follow a strict break-before-make approach, but this cannot work for modifications to the swapper page tables that cover the kernel text and data. Instead, this patch adds code which is intended to be executed from the idmap, which can safely unmap the swapper page tables as it only requires the idmap to be active. This enables us to uninstall the active TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry without potentially unmapping code or data required for the sequence. This avoids the risk of conflict, but requires that updates are staged in a copy of the swapper page tables prior to being installed. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Cc: Laura Abbott <labbott@fedoraproject.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:01 +08:00
*
* This is the low-level counterpart to cpu_replace_ttbr1, and should not be
* called by anything else. It can only be executed from a TTBR0 mapping.
*/
SYM_FUNC_START(idmap_cpu_replace_ttbr1)
save_and_disable_daif flags=x2
arm64: mm: add code to safely replace TTBR1_EL1 If page tables are modified without suitable TLB maintenance, the ARM architecture permits multiple TLB entries to be allocated for the same VA. When this occurs, it is permitted that TLB conflict aborts are raised in response to synchronous data/instruction accesses, and/or and amalgamation of the TLB entries may be used as a result of a TLB lookup. The presence of conflicting TLB entries may result in a variety of behaviours detrimental to the system (e.g. erroneous physical addresses may be used by I-cache fetches and/or page table walks). Some of these cases may result in unexpected changes of hardware state, and/or result in the (asynchronous) delivery of SError. To avoid these issues, we must avoid situations where conflicting entries may be allocated into TLBs. For user and module mappings we can follow a strict break-before-make approach, but this cannot work for modifications to the swapper page tables that cover the kernel text and data. Instead, this patch adds code which is intended to be executed from the idmap, which can safely unmap the swapper page tables as it only requires the idmap to be active. This enables us to uninstall the active TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry without potentially unmapping code or data required for the sequence. This avoids the risk of conflict, but requires that updates are staged in a copy of the swapper page tables prior to being installed. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Cc: Laura Abbott <labbott@fedoraproject.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:01 +08:00
__idmap_cpu_set_reserved_ttbr1 x1, x3
arm64: mm: add code to safely replace TTBR1_EL1 If page tables are modified without suitable TLB maintenance, the ARM architecture permits multiple TLB entries to be allocated for the same VA. When this occurs, it is permitted that TLB conflict aborts are raised in response to synchronous data/instruction accesses, and/or and amalgamation of the TLB entries may be used as a result of a TLB lookup. The presence of conflicting TLB entries may result in a variety of behaviours detrimental to the system (e.g. erroneous physical addresses may be used by I-cache fetches and/or page table walks). Some of these cases may result in unexpected changes of hardware state, and/or result in the (asynchronous) delivery of SError. To avoid these issues, we must avoid situations where conflicting entries may be allocated into TLBs. For user and module mappings we can follow a strict break-before-make approach, but this cannot work for modifications to the swapper page tables that cover the kernel text and data. Instead, this patch adds code which is intended to be executed from the idmap, which can safely unmap the swapper page tables as it only requires the idmap to be active. This enables us to uninstall the active TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry without potentially unmapping code or data required for the sequence. This avoids the risk of conflict, but requires that updates are staged in a copy of the swapper page tables prior to being installed. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Cc: Laura Abbott <labbott@fedoraproject.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:01 +08:00
offset_ttbr1 x0, x3
arm64: mm: Support Common Not Private translations Common Not Private (CNP) is a feature of ARMv8.2 extension which allows translation table entries to be shared between different PEs in the same inner shareable domain, so the hardware can use this fact to optimise the caching of such entries in the TLB. CNP occupies one bit in TTBRx_ELy and VTTBR_EL2, which advertises to the hardware that the translation table entries pointed to by this TTBR are the same as every PE in the same inner shareable domain for which the equivalent TTBR also has CNP bit set. In case CNP bit is set but TTBR does not point at the same translation table entries for a given ASID and VMID, then the system is mis-configured, so the results of translations are UNPREDICTABLE. For kernel we postpone setting CNP till all cpus are up and rely on cpufeature framework to 1) patch the code which is sensitive to CNP and 2) update TTBR1_EL1 with CNP bit set. TTBR1_EL1 can be reprogrammed as result of hibernation or cpuidle (via __enable_mmu). For these two cases we restore CnP bit via __cpu_suspend_exit(). There are a few cases we need to care of changes in TTBR0_EL1: - a switch to idmap - software emulated PAN we rule out latter via Kconfig options and for the former we make sure that CNP is set for non-zero ASIDs only. Reviewed-by: James Morse <james.morse@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com> [catalin.marinas@arm.com: default y for CONFIG_ARM64_CNP] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2018-07-31 21:08:56 +08:00
msr ttbr1_el1, x0
arm64: mm: add code to safely replace TTBR1_EL1 If page tables are modified without suitable TLB maintenance, the ARM architecture permits multiple TLB entries to be allocated for the same VA. When this occurs, it is permitted that TLB conflict aborts are raised in response to synchronous data/instruction accesses, and/or and amalgamation of the TLB entries may be used as a result of a TLB lookup. The presence of conflicting TLB entries may result in a variety of behaviours detrimental to the system (e.g. erroneous physical addresses may be used by I-cache fetches and/or page table walks). Some of these cases may result in unexpected changes of hardware state, and/or result in the (asynchronous) delivery of SError. To avoid these issues, we must avoid situations where conflicting entries may be allocated into TLBs. For user and module mappings we can follow a strict break-before-make approach, but this cannot work for modifications to the swapper page tables that cover the kernel text and data. Instead, this patch adds code which is intended to be executed from the idmap, which can safely unmap the swapper page tables as it only requires the idmap to be active. This enables us to uninstall the active TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry without potentially unmapping code or data required for the sequence. This avoids the risk of conflict, but requires that updates are staged in a copy of the swapper page tables prior to being installed. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Cc: Laura Abbott <labbott@fedoraproject.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:01 +08:00
isb
restore_daif x2
arm64: mm: add code to safely replace TTBR1_EL1 If page tables are modified without suitable TLB maintenance, the ARM architecture permits multiple TLB entries to be allocated for the same VA. When this occurs, it is permitted that TLB conflict aborts are raised in response to synchronous data/instruction accesses, and/or and amalgamation of the TLB entries may be used as a result of a TLB lookup. The presence of conflicting TLB entries may result in a variety of behaviours detrimental to the system (e.g. erroneous physical addresses may be used by I-cache fetches and/or page table walks). Some of these cases may result in unexpected changes of hardware state, and/or result in the (asynchronous) delivery of SError. To avoid these issues, we must avoid situations where conflicting entries may be allocated into TLBs. For user and module mappings we can follow a strict break-before-make approach, but this cannot work for modifications to the swapper page tables that cover the kernel text and data. Instead, this patch adds code which is intended to be executed from the idmap, which can safely unmap the swapper page tables as it only requires the idmap to be active. This enables us to uninstall the active TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry without potentially unmapping code or data required for the sequence. This avoids the risk of conflict, but requires that updates are staged in a copy of the swapper page tables prior to being installed. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Cc: Laura Abbott <labbott@fedoraproject.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:01 +08:00
ret
SYM_FUNC_END(idmap_cpu_replace_ttbr1)
arm64: mm: add code to safely replace TTBR1_EL1 If page tables are modified without suitable TLB maintenance, the ARM architecture permits multiple TLB entries to be allocated for the same VA. When this occurs, it is permitted that TLB conflict aborts are raised in response to synchronous data/instruction accesses, and/or and amalgamation of the TLB entries may be used as a result of a TLB lookup. The presence of conflicting TLB entries may result in a variety of behaviours detrimental to the system (e.g. erroneous physical addresses may be used by I-cache fetches and/or page table walks). Some of these cases may result in unexpected changes of hardware state, and/or result in the (asynchronous) delivery of SError. To avoid these issues, we must avoid situations where conflicting entries may be allocated into TLBs. For user and module mappings we can follow a strict break-before-make approach, but this cannot work for modifications to the swapper page tables that cover the kernel text and data. Instead, this patch adds code which is intended to be executed from the idmap, which can safely unmap the swapper page tables as it only requires the idmap to be active. This enables us to uninstall the active TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry without potentially unmapping code or data required for the sequence. This avoids the risk of conflict, but requires that updates are staged in a copy of the swapper page tables prior to being installed. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Cc: Laura Abbott <labbott@fedoraproject.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:01 +08:00
.popsection
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
.pushsection ".idmap.text", "awx"
.macro __idmap_kpti_get_pgtable_ent, type
dc cvac, cur_\()\type\()p // Ensure any existing dirty
dmb sy // lines are written back before
ldr \type, [cur_\()\type\()p] // loading the entry
tbz \type, #0, skip_\()\type // Skip invalid and
tbnz \type, #11, skip_\()\type // non-global entries
.endm
.macro __idmap_kpti_put_pgtable_ent_ng, type
orr \type, \type, #PTE_NG // Same bit for blocks and pages
str \type, [cur_\()\type\()p] // Update the entry and ensure
dmb sy // that it is visible to all
dc civac, cur_\()\type\()p // CPUs.
.endm
/*
* void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
*
* Called exactly once from stop_machine context by each CPU found during boot.
*/
__idmap_kpti_flag:
.long 1
SYM_FUNC_START(idmap_kpti_install_ng_mappings)
cpu .req w0
num_cpus .req w1
swapper_pa .req x2
swapper_ttb .req x3
flag_ptr .req x4
cur_pgdp .req x5
end_pgdp .req x6
pgd .req x7
cur_pudp .req x8
end_pudp .req x9
pud .req x10
cur_pmdp .req x11
end_pmdp .req x12
pmd .req x13
cur_ptep .req x14
end_ptep .req x15
pte .req x16
mrs swapper_ttb, ttbr1_el1
arm64: mm: Offset TTBR1 to allow 52-bit PTRS_PER_PGD Enabling 52-bit VAs on arm64 requires that the PGD table expands from 64 entries (for the 48-bit case) to 1024 entries. This quantity, PTRS_PER_PGD is used as follows to compute which PGD entry corresponds to a given virtual address, addr: pgd_index(addr) -> (addr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1) Userspace addresses are prefixed by 0's, so for a 48-bit userspace address, uva, the following is true: (uva >> PGDIR_SHIFT) & (1024 - 1) == (uva >> PGDIR_SHIFT) & (64 - 1) In other words, a 48-bit userspace address will have the same pgd_index when using PTRS_PER_PGD = 64 and 1024. Kernel addresses are prefixed by 1's so, given a 48-bit kernel address, kva, we have the following inequality: (kva >> PGDIR_SHIFT) & (1024 - 1) != (kva >> PGDIR_SHIFT) & (64 - 1) In other words a 48-bit kernel virtual address will have a different pgd_index when using PTRS_PER_PGD = 64 and 1024. If, however, we note that: kva = 0xFFFF << 48 + lower (where lower[63:48] == 0b) and, PGDIR_SHIFT = 42 (as we are dealing with 64KB PAGE_SIZE) We can consider: (kva >> PGDIR_SHIFT) & (1024 - 1) - (kva >> PGDIR_SHIFT) & (64 - 1) = (0xFFFF << 6) & 0x3FF - (0xFFFF << 6) & 0x3F // "lower" cancels out = 0x3C0 In other words, one can switch PTRS_PER_PGD to the 52-bit value globally provided that they increment ttbr1_el1 by 0x3C0 * 8 = 0x1E00 bytes when running with 48-bit kernel VAs (TCR_EL1.T1SZ = 16). For kernel configuration where 52-bit userspace VAs are possible, this patch offsets ttbr1_el1 and sets PTRS_PER_PGD corresponding to the 52-bit value. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Suggested-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Steve Capper <steve.capper@arm.com> [will: added comment to TTBR1_BADDR_4852_OFFSET calculation] Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-12-07 06:50:39 +08:00
restore_ttbr1 swapper_ttb
adr flag_ptr, __idmap_kpti_flag
cbnz cpu, __idmap_kpti_secondary
/* We're the boot CPU. Wait for the others to catch up */
sevl
1: wfe
ldaxr w17, [flag_ptr]
eor w17, w17, num_cpus
cbnz w17, 1b
/* We need to walk swapper, so turn off the MMU. */
pre_disable_mmu_workaround
mrs x17, sctlr_el1
bic x17, x17, #SCTLR_ELx_M
msr sctlr_el1, x17
isb
/* Everybody is enjoying the idmap, so we can rewrite swapper. */
/* PGD */
mov cur_pgdp, swapper_pa
add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
do_pgd: __idmap_kpti_get_pgtable_ent pgd
tbnz pgd, #1, walk_puds
next_pgd:
__idmap_kpti_put_pgtable_ent_ng pgd
skip_pgd:
add cur_pgdp, cur_pgdp, #8
cmp cur_pgdp, end_pgdp
b.ne do_pgd
/* Publish the updated tables and nuke all the TLBs */
dsb sy
tlbi vmalle1is
dsb ish
isb
/* We're done: fire up the MMU again */
mrs x17, sctlr_el1
orr x17, x17, #SCTLR_ELx_M
msr sctlr_el1, x17
isb
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively from the PoC are discarded, since they may have
* been dynamically patched at the PoU.
*/
ic iallu
dsb nsh
isb
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
ret
/* PUD */
walk_puds:
.if CONFIG_PGTABLE_LEVELS > 3
pte_to_phys cur_pudp, pgd
add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
do_pud: __idmap_kpti_get_pgtable_ent pud
tbnz pud, #1, walk_pmds
next_pud:
__idmap_kpti_put_pgtable_ent_ng pud
skip_pud:
add cur_pudp, cur_pudp, 8
cmp cur_pudp, end_pudp
b.ne do_pud
b next_pgd
.else /* CONFIG_PGTABLE_LEVELS <= 3 */
mov pud, pgd
b walk_pmds
next_pud:
b next_pgd
.endif
/* PMD */
walk_pmds:
.if CONFIG_PGTABLE_LEVELS > 2
pte_to_phys cur_pmdp, pud
add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
do_pmd: __idmap_kpti_get_pgtable_ent pmd
tbnz pmd, #1, walk_ptes
next_pmd:
__idmap_kpti_put_pgtable_ent_ng pmd
skip_pmd:
add cur_pmdp, cur_pmdp, #8
cmp cur_pmdp, end_pmdp
b.ne do_pmd
b next_pud
.else /* CONFIG_PGTABLE_LEVELS <= 2 */
mov pmd, pud
b walk_ptes
next_pmd:
b next_pud
.endif
/* PTE */
walk_ptes:
pte_to_phys cur_ptep, pmd
add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
do_pte: __idmap_kpti_get_pgtable_ent pte
__idmap_kpti_put_pgtable_ent_ng pte
skip_pte:
add cur_ptep, cur_ptep, #8
cmp cur_ptep, end_ptep
b.ne do_pte
b next_pmd
.unreq cpu
.unreq num_cpus
.unreq swapper_pa
.unreq cur_pgdp
.unreq end_pgdp
.unreq pgd
.unreq cur_pudp
.unreq end_pudp
.unreq pud
.unreq cur_pmdp
.unreq end_pmdp
.unreq pmd
.unreq cur_ptep
.unreq end_ptep
.unreq pte
/* Secondary CPUs end up here */
__idmap_kpti_secondary:
/* Uninstall swapper before surgery begins */
__idmap_cpu_set_reserved_ttbr1 x16, x17
/* Increment the flag to let the boot CPU we're ready */
1: ldxr w16, [flag_ptr]
add w16, w16, #1
stxr w17, w16, [flag_ptr]
cbnz w17, 1b
/* Wait for the boot CPU to finish messing around with swapper */
sevl
1: wfe
ldxr w16, [flag_ptr]
cbnz w16, 1b
/* All done, act like nothing happened */
offset_ttbr1 swapper_ttb, x16
msr ttbr1_el1, swapper_ttb
isb
ret
.unreq swapper_ttb
.unreq flag_ptr
SYM_FUNC_END(idmap_kpti_install_ng_mappings)
.popsection
#endif
/*
* __cpu_setup
*
* Initialise the processor for turning the MMU on.
*
* Output:
* Return in x0 the value of the SCTLR_EL1 register.
*/
.pushsection ".idmap.text", "awx"
SYM_FUNC_START(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB
dsb nsh
mov x1, #3 << 20
msr cpacr_el1, x1 // Enable FP/ASIMD
mov x1, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x1 // access to the DCC from EL0
arm64: debug: unmask PSTATE.D earlier Clearing PSTATE.D is one of the requirements for generating a debug exception. The arm64 booting protocol requires that PSTATE.D is set, since many of the debug registers (for example, the hw_breakpoint registers) are UNKNOWN out of reset and could potentially generate spurious, fatal debug exceptions in early boot code if PSTATE.D was clear. Once the debug registers have been safely initialised, PSTATE.D is cleared, however this is currently broken for two reasons: (1) The boot CPU clears PSTATE.D in a postcore_initcall and secondary CPUs clear PSTATE.D in secondary_start_kernel. Since the initcall runs after SMP (and the scheduler) have been initialised, there is no guarantee that it is actually running on the boot CPU. In this case, the boot CPU is left with PSTATE.D set and is not capable of generating debug exceptions. (2) In a preemptible kernel, we may explicitly schedule on the IRQ return path to EL1. If an IRQ occurs with PSTATE.D set in the idle thread, then we may schedule the kthread_init thread, run the postcore_initcall to clear PSTATE.D and then context switch back to the idle thread before returning from the IRQ. The exception return path will then restore PSTATE.D from the stack, and set it again. This patch fixes the problem by moving the clearing of PSTATE.D earlier to proc.S. This has the desirable effect of clearing it in one place for all CPUs, long before we have to worry about the scheduler or any exception handling. We ensure that the previous reset of MDSCR_EL1 has completed before unmasking the exception, so that any spurious exceptions resulting from UNKNOWN debug registers are not generated. Without this patch applied, the kprobes selftests have been seen to fail under KVM, where we end up attempting to step the OOL instruction buffer with PSTATE.D set and therefore fail to complete the step. Cc: <stable@vger.kernel.org> Acked-by: Mark Rutland <mark.rutland@arm.com> Reported-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-19 22:07:37 +08:00
isb // Unmask debug exceptions now,
enable_dbg // since this is per-cpu
reset_pmuserenr_el0 x1 // Disable PMU access from EL0
reset_amuserenr_el0 x1 // Disable AMU access from EL0
/*
* Memory region attributes
*/
mov_q x5, MAIR_EL1_SET
msr mair_el1, x5
/*
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel.
*/
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
tcr_clear_errata_bits x10, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x9, vabits_actual
sub x9, xzr, x9
add x9, x9, #64
tcr_set_t1sz x10, x9
#else
ldr_l x9, idmap_t0sz
#endif
tcr_set_t0sz x10, x9
/*
* Set the IPS bits in TCR_EL1.
*/
tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Enable hardware update of the Access Flags bit.
* Hardware dirty bit management is enabled later,
* via capabilities.
*/
mrs x9, ID_AA64MMFR1_EL1
and x9, x9, #0xf
cbz x9, 1f
orr x10, x10, #TCR_HA // hardware Access flag update
1:
#endif /* CONFIG_ARM64_HW_AFDBM */
msr tcr_el1, x10
/*
* Prepare SCTLR
*/
mov_q x0, SCTLR_EL1_SET
ret // return to head.S
SYM_FUNC_END(__cpu_setup)