More arm64 updates for 5.10

- Improve performance of Spectre-v2 mitigation on Falkor CPUs (if you're lucky
   enough to have one)
 
 - Select HAVE_MOVE_PMD. This has been shown to improve mremap() performance,
   which is used heavily by the Android runtime GC, and it seems we forgot to
   enable this upstream back in 2018.
 
 - Ensure linker flags are consistent between LLVM and BFD
 
 - Fix stale comment in Spectre mitigation rework
 
 - Fix broken copyright header
 
 - Fix KASLR randomisation of the linear map
 
 - Prevent arm64-specific prctl()s from compat tasks (return -EINVAL)
 -----BEGIN PGP SIGNATURE-----
 
 iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAl+QEPAQHHdpbGxAa2Vy
 bmVsLm9yZwAKCRC3rHDchMFjNE8jB/0YNYKO9mis/Xn5KcOCwlg4dbc2uVBknZXD
 f7otEJ6SOax2HcWz8qJlrJ+qbGFawPIqFBUAM0vU1VmoyctIoKRFTA8ACfWfWtnK
 QBfHrcxtJCh/GGq+E1IyuqWzCjppeY/7gYVdgi1xDEZRSaLz53MC1GVBwKBtu5cf
 X2Bfm8d9+PSSnmKfpO65wSCTvN3PQX1SNEHwwTWFZQx0p7GcQK1DdwoobM6dRnVy
 +e984ske+2a+nTrkhLSyQIgsfHuLB4pD6XdM/UOThnfdNxdQ0dUGn375sXP+b4dW
 7MTH9HP/dXIymTcuErMXOHJXLk/zUiUBaOxkmOxdvrhQd0uFNFIc
 =e9p9
 -----END PGP SIGNATURE-----

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull more arm64 updates from Will Deacon:
 "A small selection of further arm64 fixes and updates. Most of these
  are fixes that came in during the merge window, with the exception of
  the HAVE_MOVE_PMD mremap() speed-up which we discussed back in 2018
  and somehow forgot to enable upstream.

   - Improve performance of Spectre-v2 mitigation on Falkor CPUs (if
     you're lucky enough to have one)

   - Select HAVE_MOVE_PMD. This has been shown to improve mremap()
     performance, which is used heavily by the Android runtime GC, and
     it seems we forgot to enable this upstream back in 2018.

   - Ensure linker flags are consistent between LLVM and BFD

   - Fix stale comment in Spectre mitigation rework

   - Fix broken copyright header

   - Fix KASLR randomisation of the linear map

   - Prevent arm64-specific prctl()s from compat tasks (return -EINVAL)"

Link: https://lore.kernel.org/kvmarm/20181108181201.88826-3-joelaf@google.com/

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: proton-pack: Update comment to reflect new function name
  arm64: spectre-v2: Favour CPU-specific mitigation at EL2
  arm64: link with -z norelro regardless of CONFIG_RELOCATABLE
  arm64: Fix a broken copyright header in gen_vdso_offsets.sh
  arm64: mremap speedup - Enable HAVE_MOVE_PMD
  arm64: mm: use single quantity to represent the PA to VA translation
  arm64: reject prctl(PR_PAC_RESET_KEYS) on compat tasks
This commit is contained in:
Linus Torvalds 2020-10-23 09:46:16 -07:00
commit 032c7ed958
9 changed files with 43 additions and 51 deletions

View File

@ -123,6 +123,7 @@ config ARM64
select GENERIC_VDSO_TIME_NS select GENERIC_VDSO_TIME_NS
select HANDLE_DOMAIN_IRQ select HANDLE_DOMAIN_IRQ
select HARDIRQS_SW_RESEND select HARDIRQS_SW_RESEND
select HAVE_MOVE_PMD
select HAVE_PCI select HAVE_PCI
select HAVE_ACPI_APEI if (ACPI && EFI) select HAVE_ACPI_APEI if (ACPI && EFI)
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB

View File

@ -10,13 +10,13 @@
# #
# Copyright (C) 1995-2001 by Russell King # Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=--no-undefined -X LDFLAGS_vmlinux :=--no-undefined -X -z norelro
ifeq ($(CONFIG_RELOCATABLE), y) ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
# for relative relocs, since this leads to better Image compression # for relative relocs, since this leads to better Image compression
# with the relocation offsets always being zero. # with the relocation offsets always being zero.
LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \ LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \
$(call ld-option, --no-apply-dynamic-relocs) $(call ld-option, --no-apply-dynamic-relocs)
endif endif

View File

@ -169,7 +169,6 @@
extern u64 vabits_actual; extern u64 vabits_actual;
#define PAGE_END (_PAGE_END(vabits_actual)) #define PAGE_END (_PAGE_END(vabits_actual))
extern s64 physvirt_offset;
extern s64 memstart_addr; extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */ /* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
@ -245,7 +244,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
*/ */
#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) #define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
#define __lm_to_phys(addr) (((addr) + physvirt_offset)) #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset) #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \ #define __virt_to_phys_nodebug(x) ({ \
@ -263,7 +262,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x) #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
#endif /* CONFIG_DEBUG_VIRTUAL */ #endif /* CONFIG_DEBUG_VIRTUAL */
#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset)) #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
/* /*

View File

@ -24,6 +24,8 @@
#define VMALLOC_START (MODULES_END) #define VMALLOC_START (MODULES_END)
#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL #define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
@ -34,8 +36,6 @@
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/sched.h> #include <linux/sched.h>
extern struct page *vmemmap;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE

View File

@ -678,7 +678,7 @@ int sve_set_current_vl(unsigned long arg)
vl = arg & PR_SVE_VL_LEN_MASK; vl = arg & PR_SVE_VL_LEN_MASK;
flags = arg & ~vl; flags = arg & ~vl;
if (!system_supports_sve()) if (!system_supports_sve() || is_compat_task())
return -EINVAL; return -EINVAL;
ret = sve_set_vector_length(current, vl, flags); ret = sve_set_vector_length(current, vl, flags);
@ -691,7 +691,7 @@ int sve_set_current_vl(unsigned long arg)
/* PR_SVE_GET_VL */ /* PR_SVE_GET_VL */
int sve_get_current_vl(void) int sve_get_current_vl(void)
{ {
if (!system_supports_sve()) if (!system_supports_sve() || is_compat_task())
return -EINVAL; return -EINVAL;
return sve_prctl_status(0); return sve_prctl_status(0);

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/compat.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/prctl.h> #include <linux/prctl.h>
#include <linux/random.h> #include <linux/random.h>
@ -17,6 +18,9 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
if (!system_supports_address_auth() && !system_supports_generic_auth()) if (!system_supports_address_auth() && !system_supports_generic_auth())
return -EINVAL; return -EINVAL;
if (is_compat_thread(task_thread_info(tsk)))
return -EINVAL;
if (!arg) { if (!arg) {
ptrauth_keys_init_user(keys); ptrauth_keys_init_user(keys);
return 0; return 0;

View File

@ -67,7 +67,8 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
* - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2. * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
* - Mitigated in hardware and listed in our "safe list". * - Mitigated in hardware and listed in our "safe list".
* - Mitigated in software by firmware. * - Mitigated in software by firmware.
* - Mitigated in software by a CPU-specific dance in the kernel. * - Mitigated in software by a CPU-specific dance in the kernel and a
* firmware call at EL2.
* - Vulnerable. * - Vulnerable.
* *
* It's not unlikely for different CPUs in a big.LITTLE system to fall into * It's not unlikely for different CPUs in a big.LITTLE system to fall into
@ -204,8 +205,8 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
__SMCCC_WORKAROUND_1_SMC_SZ; __SMCCC_WORKAROUND_1_SMC_SZ;
/* /*
* detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if * Vinz Clortho takes the hyp_vecs start/end "keys" at
* we're a guest. Skip the hyp-vectors work. * the door when we're a guest. Skip the hyp-vectors work.
*/ */
if (!is_hyp_mode_available()) { if (!is_hyp_mode_available()) {
__this_cpu_write(bp_hardening_data.fn, fn); __this_cpu_write(bp_hardening_data.fn, fn);
@ -259,6 +260,16 @@ static void qcom_link_stack_sanitisation(void)
: "=&r" (tmp)); : "=&r" (tmp));
} }
static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
{
u32 midr = read_cpuid_id();
if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
return NULL;
return qcom_link_stack_sanitisation;
}
static enum mitigation_state spectre_v2_enable_fw_mitigation(void) static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
{ {
bp_hardening_cb_t cb; bp_hardening_cb_t cb;
@ -284,26 +295,15 @@ static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
return SPECTRE_VULNERABLE; return SPECTRE_VULNERABLE;
} }
/*
* Prefer a CPU-specific workaround if it exists. Note that we
* still rely on firmware for the mitigation at EL2.
*/
cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
install_bp_hardening_cb(cb); install_bp_hardening_cb(cb);
return SPECTRE_MITIGATED; return SPECTRE_MITIGATED;
} }
static enum mitigation_state spectre_v2_enable_sw_mitigation(void)
{
u32 midr;
if (spectre_v2_mitigations_off())
return SPECTRE_VULNERABLE;
midr = read_cpuid_id();
if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
return SPECTRE_VULNERABLE;
install_bp_hardening_cb(qcom_link_stack_sanitisation);
return SPECTRE_MITIGATED;
}
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
{ {
enum mitigation_state state; enum mitigation_state state;
@ -313,8 +313,6 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
state = spectre_v2_get_cpu_hw_mitigation_state(); state = spectre_v2_get_cpu_hw_mitigation_state();
if (state == SPECTRE_VULNERABLE) if (state == SPECTRE_VULNERABLE)
state = spectre_v2_enable_fw_mitigation(); state = spectre_v2_enable_fw_mitigation();
if (state == SPECTRE_VULNERABLE)
state = spectre_v2_enable_sw_mitigation();
update_mitigation_state(&spectre_v2_state, state); update_mitigation_state(&spectre_v2_state, state);
} }

View File

@ -8,7 +8,7 @@
# Doing this inside the Makefile will break the $(filter-out) function, # Doing this inside the Makefile will break the $(filter-out) function,
# causing Kbuild to rebuild the vdso-offsets header file every time. # causing Kbuild to rebuild the vdso-offsets header file every time.
# #
# Author: Will Deacon <will.deacon@arm.com # Author: Will Deacon <will.deacon@arm.com>
# #
LC_ALL=C LC_ALL=C

View File

@ -53,12 +53,6 @@
s64 memstart_addr __ro_after_init = -1; s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr); EXPORT_SYMBOL(memstart_addr);
s64 physvirt_offset __ro_after_init;
EXPORT_SYMBOL(physvirt_offset);
struct page *vmemmap __ro_after_init;
EXPORT_SYMBOL(vmemmap);
/* /*
* We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
* memory as some devices, namely the Raspberry Pi 4, have peripherals with * memory as some devices, namely the Raspberry Pi 4, have peripherals with
@ -289,20 +283,6 @@ void __init arm64_memblock_init(void)
memstart_addr = round_down(memblock_start_of_DRAM(), memstart_addr = round_down(memblock_start_of_DRAM(),
ARM64_MEMSTART_ALIGN); ARM64_MEMSTART_ALIGN);
physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
/*
* If we are running with a 52-bit kernel VA config on a system that
* does not support it, we have to offset our vmemmap and physvirt_offset
* s.t. we avoid the 52-bit portion of the direct linear map
*/
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
}
/* /*
* Remove the memory that we will not be able to cover with the * Remove the memory that we will not be able to cover with the
* linear mapping. Take care not to clip the kernel which may be * linear mapping. Take care not to clip the kernel which may be
@ -317,6 +297,16 @@ void __init arm64_memblock_init(void)
memblock_remove(0, memstart_addr); memblock_remove(0, memstart_addr);
} }
/*
* If we are running with a 52-bit kernel VA config on a system that
* does not support it, we have to place the available physical
* memory in the 48-bit addressable part of the linear region, i.e.,
* we have to move it upward. Since memstart_addr represents the
* physical address of PAGE_OFFSET, we have to *subtract* from it.
*/
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
/* /*
* Apply the memory limit if it was set. Since the kernel may be loaded * Apply the memory limit if it was set. Since the kernel may be loaded
* high up in memory, add back the kernel region that must be accessible * high up in memory, add back the kernel region that must be accessible