Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: - further Spectre variant 1 fixes for user accessors. - kbuild cleanups (Masahiro Yamada) - hook up sync core functionality (Will Deacon) - nommu updates for hypervisor mode booting (Vladimir Murzin) - use compiler built-ins for fls and ffs (Nicolas Pitre) * 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: spectre-v1: mitigate user accesses ARM: spectre-v1: use get_user() for __get_user() ARM: use __inttype() in get_user() ARM: oabi-compat: copy semops using __copy_from_user() ARM: vfp: use __copy_from_user() when restoring VFP state ARM: 8785/1: use compiler built-ins for ffs and fls ARM: 8784/1: NOMMU: Allow enter in Hyp mode ARM: 8783/1: NOMMU: Extend check for VBAR support ARM: 8782/1: vfp: clean up arch/arm/vfp/Makefile ARM: signal: copy registers using __copy_from_user() ARM: tcm: ensure inline stub functions are marked static ARM: 8779/1: add endianness option to LDFLAGS instead of LD ARM: 8777/1: Hook up SYNC_CORE functionality for sys_membarrier()
This commit is contained in:
commit
9b27efe081
|
@ -5,10 +5,10 @@
|
|||
#
|
||||
# Architecture requirements
|
||||
#
|
||||
# * arm64
|
||||
# * arm/arm64
|
||||
#
|
||||
# Rely on eret context synchronization when returning from IPI handler, and
|
||||
# when returning to user-space.
|
||||
# Rely on implicit context synchronization as a result of exception return
|
||||
# when returning from IPI handler, and when returning to user-space.
|
||||
#
|
||||
# * x86
|
||||
#
|
||||
|
@ -31,7 +31,7 @@
|
|||
-----------------------
|
||||
| alpha: | TODO |
|
||||
| arc: | TODO |
|
||||
| arm: | TODO |
|
||||
| arm: | ok |
|
||||
| arm64: | ok |
|
||||
| c6x: | TODO |
|
||||
| h8300: | TODO |
|
||||
|
|
|
@ -9,6 +9,7 @@ config ARM
|
|||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_MEMBARRIER_SYNC_CORE
|
||||
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
|
|
|
@ -46,12 +46,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
|
|||
KBUILD_CPPFLAGS += -mbig-endian
|
||||
CHECKFLAGS += -D__ARMEB__
|
||||
AS += -EB
|
||||
LD += -EB
|
||||
LDFLAGS += -EB
|
||||
else
|
||||
KBUILD_CPPFLAGS += -mlittle-endian
|
||||
CHECKFLAGS += -D__ARMEL__
|
||||
AS += -EL
|
||||
LD += -EL
|
||||
LDFLAGS += -EL
|
||||
endif
|
||||
|
||||
#
|
||||
|
|
|
@ -460,6 +460,10 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|||
adds \tmp, \addr, #\size - 1
|
||||
sbcccs \tmp, \tmp, \limit
|
||||
bcs \bad
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
movcs \addr, #0
|
||||
csdb
|
||||
#endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
|
|
@ -215,7 +215,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
|
|||
|
||||
#if __LINUX_ARM_ARCH__ < 5
|
||||
|
||||
#include <asm-generic/bitops/ffz.h>
|
||||
#include <asm-generic/bitops/__fls.h>
|
||||
#include <asm-generic/bitops/__ffs.h>
|
||||
#include <asm-generic/bitops/fls.h>
|
||||
|
@ -223,93 +222,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
|
|||
|
||||
#else
|
||||
|
||||
static inline int constant_fls(int x)
|
||||
{
|
||||
int r = 32;
|
||||
|
||||
if (!x)
|
||||
return 0;
|
||||
if (!(x & 0xffff0000u)) {
|
||||
x <<= 16;
|
||||
r -= 16;
|
||||
}
|
||||
if (!(x & 0xff000000u)) {
|
||||
x <<= 8;
|
||||
r -= 8;
|
||||
}
|
||||
if (!(x & 0xf0000000u)) {
|
||||
x <<= 4;
|
||||
r -= 4;
|
||||
}
|
||||
if (!(x & 0xc0000000u)) {
|
||||
x <<= 2;
|
||||
r -= 2;
|
||||
}
|
||||
if (!(x & 0x80000000u)) {
|
||||
x <<= 1;
|
||||
r -= 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* On ARMv5 and above those functions can be implemented around the
|
||||
* clz instruction for much better code efficiency. __clz returns
|
||||
* the number of leading zeros, zero input will return 32, and
|
||||
* 0x80000000 will return 0.
|
||||
* On ARMv5 and above, the gcc built-ins may rely on the clz instruction
|
||||
* and produce optimal inlined code in all cases. On ARMv7 it is even
|
||||
* better by also using the rbit instruction.
|
||||
*/
|
||||
static inline unsigned int __clz(unsigned int x)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* fls() returns zero if the input is zero, otherwise returns the bit
|
||||
* position of the last set bit, where the LSB is 1 and MSB is 32.
|
||||
*/
|
||||
static inline int fls(int x)
|
||||
{
|
||||
if (__builtin_constant_p(x))
|
||||
return constant_fls(x);
|
||||
|
||||
return 32 - __clz(x);
|
||||
}
|
||||
|
||||
/*
|
||||
* __fls() returns the bit position of the last bit set, where the
|
||||
* LSB is 0 and MSB is 31. Zero input is undefined.
|
||||
*/
|
||||
static inline unsigned long __fls(unsigned long x)
|
||||
{
|
||||
return fls(x) - 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* ffs() returns zero if the input was zero, otherwise returns the bit
|
||||
* position of the first set bit, where the LSB is 1 and MSB is 32.
|
||||
*/
|
||||
static inline int ffs(int x)
|
||||
{
|
||||
return fls(x & -x);
|
||||
}
|
||||
|
||||
/*
|
||||
* __ffs() returns the bit position of the first bit set, where the
|
||||
* LSB is 0 and MSB is 31. Zero input is undefined.
|
||||
*/
|
||||
static inline unsigned long __ffs(unsigned long x)
|
||||
{
|
||||
return ffs(x) - 1;
|
||||
}
|
||||
|
||||
#define ffz(x) __ffs( ~(x) )
|
||||
#include <asm-generic/bitops/builtin-__fls.h>
|
||||
#include <asm-generic/bitops/builtin-__ffs.h>
|
||||
#include <asm-generic/bitops/builtin-fls.h>
|
||||
#include <asm-generic/bitops/builtin-ffs.h>
|
||||
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitops/ffz.h>
|
||||
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
|
|
|
@ -123,8 +123,8 @@ struct user_vfp_exc;
|
|||
|
||||
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
|
||||
struct user_vfp_exc __user *);
|
||||
extern int vfp_restore_user_hwstate(struct user_vfp __user *,
|
||||
struct user_vfp_exc __user *);
|
||||
extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
struct user_vfp_exc *);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -84,6 +84,13 @@ static inline void set_fs(mm_segment_t fs)
|
|||
: "cc"); \
|
||||
flag; })
|
||||
|
||||
/*
|
||||
* This is a type: either unsigned long, if the argument fits into
|
||||
* that type, or otherwise unsigned long long.
|
||||
*/
|
||||
#define __inttype(x) \
|
||||
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
||||
|
||||
/*
|
||||
* Single-value transfer routines. They automatically use the right
|
||||
* size if we just have the right pointer type. Note that the functions
|
||||
|
@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *);
|
|||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
register typeof(*(p)) __user *__p asm("r0") = (p); \
|
||||
register typeof(x) __r2 asm("r2"); \
|
||||
register __inttype(x) __r2 asm("r2"); \
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
unsigned int __ua_flags = uaccess_save_and_enable(); \
|
||||
|
@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs)
|
|||
#define user_addr_max() \
|
||||
(uaccess_kernel() ? ~0UL : get_fs())
|
||||
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
/*
|
||||
* When mitigating Spectre variant 1, it is not worth fixing the non-
|
||||
* verifying accessors, because we need to add verification of the
|
||||
* address space there. Force these to use the standard get_user()
|
||||
* version instead.
|
||||
*/
|
||||
#define __get_user(x, ptr) get_user(x, ptr)
|
||||
#else
|
||||
|
||||
/*
|
||||
* The "__xxx" versions of the user access functions do not verify the
|
||||
* address space - it must have been done previously with a separate
|
||||
|
@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs)
|
|||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__get_user_err((x), (ptr), err); \
|
||||
(void) 0; \
|
||||
})
|
||||
|
||||
#define __get_user_err(x, ptr, err) \
|
||||
do { \
|
||||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||
|
@ -324,6 +335,7 @@ do { \
|
|||
|
||||
#define __get_user_asm_word(x, addr, err) \
|
||||
__get_user_asm(x, addr, err, ldr)
|
||||
#endif
|
||||
|
||||
|
||||
#define __put_user_switch(x, ptr, __err, __fn) \
|
||||
|
|
|
@ -53,7 +53,11 @@ ENTRY(stext)
|
|||
THUMB(1: )
|
||||
#endif
|
||||
|
||||
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
|
||||
#ifdef CONFIG_ARM_VIRT_EXT
|
||||
bl __hyp_stub_install
|
||||
#endif
|
||||
@ ensure svc mode and all interrupts masked
|
||||
safe_svcmode_maskall r9
|
||||
@ and irqs disabled
|
||||
#if defined(CONFIG_CPU_CP15)
|
||||
mrc p15, 0, r9, c0, c0 @ get processor id
|
||||
|
@ -89,7 +93,11 @@ ENTRY(secondary_startup)
|
|||
* the processor type - there is no need to check the machine type
|
||||
* as it has already been validated by the primary processor.
|
||||
*/
|
||||
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
|
||||
#ifdef CONFIG_ARM_VIRT_EXT
|
||||
bl __hyp_stub_install_secondary
|
||||
#endif
|
||||
safe_svcmode_maskall r9
|
||||
|
||||
#ifndef CONFIG_CPU_CP15
|
||||
ldr r9, =CONFIG_PROCESSOR_ID
|
||||
#else
|
||||
|
|
|
@ -150,22 +150,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
|
|||
|
||||
static int restore_vfp_context(char __user **auxp)
|
||||
{
|
||||
struct vfp_sigframe __user *frame =
|
||||
(struct vfp_sigframe __user *)*auxp;
|
||||
unsigned long magic;
|
||||
unsigned long size;
|
||||
int err = 0;
|
||||
|
||||
__get_user_error(magic, &frame->magic, err);
|
||||
__get_user_error(size, &frame->size, err);
|
||||
struct vfp_sigframe frame;
|
||||
int err;
|
||||
|
||||
err = __copy_from_user(&frame, *auxp, sizeof(frame));
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
|
||||
return err;
|
||||
|
||||
if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
*auxp += size;
|
||||
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
|
||||
*auxp += sizeof(frame);
|
||||
return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -176,6 +172,7 @@ static int restore_vfp_context(char __user **auxp)
|
|||
|
||||
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
|
||||
{
|
||||
struct sigcontext context;
|
||||
char __user *aux;
|
||||
sigset_t set;
|
||||
int err;
|
||||
|
@ -184,23 +181,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
|
|||
if (err == 0)
|
||||
set_current_blocked(&set);
|
||||
|
||||
__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
|
||||
__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
|
||||
__get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
|
||||
__get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
|
||||
__get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
|
||||
__get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
|
||||
__get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
|
||||
__get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
|
||||
__get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
|
||||
__get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
|
||||
__get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
|
||||
__get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
|
||||
__get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
|
||||
__get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
|
||||
__get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
|
||||
__get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
|
||||
__get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
|
||||
err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
|
||||
if (err == 0) {
|
||||
regs->ARM_r0 = context.arm_r0;
|
||||
regs->ARM_r1 = context.arm_r1;
|
||||
regs->ARM_r2 = context.arm_r2;
|
||||
regs->ARM_r3 = context.arm_r3;
|
||||
regs->ARM_r4 = context.arm_r4;
|
||||
regs->ARM_r5 = context.arm_r5;
|
||||
regs->ARM_r6 = context.arm_r6;
|
||||
regs->ARM_r7 = context.arm_r7;
|
||||
regs->ARM_r8 = context.arm_r8;
|
||||
regs->ARM_r9 = context.arm_r9;
|
||||
regs->ARM_r10 = context.arm_r10;
|
||||
regs->ARM_fp = context.arm_fp;
|
||||
regs->ARM_ip = context.arm_ip;
|
||||
regs->ARM_sp = context.arm_sp;
|
||||
regs->ARM_lr = context.arm_lr;
|
||||
regs->ARM_pc = context.arm_pc;
|
||||
regs->ARM_cpsr = context.arm_cpsr;
|
||||
}
|
||||
|
||||
err |= !valid_user_regs(regs);
|
||||
|
||||
|
|
|
@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
|
|||
return -ENOMEM;
|
||||
err = 0;
|
||||
for (i = 0; i < nsops; i++) {
|
||||
__get_user_error(sops[i].sem_num, &tsops->sem_num, err);
|
||||
__get_user_error(sops[i].sem_op, &tsops->sem_op, err);
|
||||
__get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
|
||||
struct oabi_sembuf osb;
|
||||
err |= __copy_from_user(&osb, tsops, sizeof(osb));
|
||||
sops[i].sem_num = osb.sem_num;
|
||||
sops[i].sem_op = osb.sem_op;
|
||||
sops[i].sem_flg = osb.sem_flg;
|
||||
tsops++;
|
||||
}
|
||||
if (timeout) {
|
||||
|
|
|
@ -90,6 +90,15 @@
|
|||
.text
|
||||
|
||||
ENTRY(arm_copy_from_user)
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
get_thread_info r3
|
||||
ldr r3, [r3, #TI_ADDR_LIMIT]
|
||||
adds ip, r1, r2 @ ip=addr+size
|
||||
sub r3, r3, #1 @ addr_limit - 1
|
||||
cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
|
||||
movcs r1, #0 @ addr = NULL
|
||||
csdb
|
||||
#endif
|
||||
|
||||
#include "copy_template.S"
|
||||
|
||||
|
|
|
@ -702,7 +702,6 @@ config ARM_THUMBEE
|
|||
|
||||
config ARM_VIRT_EXT
|
||||
bool
|
||||
depends on MMU
|
||||
default y if CPU_V7
|
||||
help
|
||||
Enable the kernel to make use of the ARM Virtualization
|
||||
|
|
|
@ -53,7 +53,8 @@ static inline bool security_extensions_enabled(void)
|
|||
{
|
||||
/* Check CPUID Identification Scheme before ID_PFR1 read */
|
||||
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
|
||||
return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
|
||||
return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
|
||||
cpuid_feature_extract(CPUID_EXT_PFR1, 20);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
void __init tcm_init(void);
|
||||
#else
|
||||
/* No TCM support, just blank inlines to be optimized out */
|
||||
inline void tcm_init(void)
|
||||
static inline void tcm_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -8,8 +8,5 @@
|
|||
# asflags-y := -DDEBUG
|
||||
|
||||
KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
|
||||
LDFLAGS +=--no-warn-mismatch
|
||||
|
||||
obj-y += vfp.o
|
||||
|
||||
vfp-$(CONFIG_VFP) += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
|
||||
obj-y += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
|
||||
|
|
|
@ -596,13 +596,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
|||
}
|
||||
|
||||
/* Sanitise and restore the current VFP state from the provided structures. */
|
||||
int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
|
||||
struct user_vfp_exc __user *ufp_exc)
|
||||
int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
|
||||
unsigned long fpexc;
|
||||
int err = 0;
|
||||
|
||||
/* Disable VFP to avoid corrupting the new thread state. */
|
||||
vfp_flush_hwstate(thread);
|
||||
|
@ -611,17 +609,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
|
|||
* Copy the floating point registers. There can be unused
|
||||
* registers see asm/hwcap.h for details.
|
||||
*/
|
||||
err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
|
||||
sizeof(hwstate->fpregs));
|
||||
memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
|
||||
/*
|
||||
* Copy the status and control register.
|
||||
*/
|
||||
__get_user_error(hwstate->fpscr, &ufp->fpscr, err);
|
||||
hwstate->fpscr = ufp->fpscr;
|
||||
|
||||
/*
|
||||
* Sanitise and restore the exception registers.
|
||||
*/
|
||||
__get_user_error(fpexc, &ufp_exc->fpexc, err);
|
||||
fpexc = ufp_exc->fpexc;
|
||||
|
||||
/* Ensure the VFP is enabled. */
|
||||
fpexc |= FPEXC_EN;
|
||||
|
@ -630,10 +627,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
|
|||
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
|
||||
hwstate->fpexc = fpexc;
|
||||
|
||||
__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
|
||||
__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
|
||||
hwstate->fpinst = ufp_exc->fpinst;
|
||||
hwstate->fpinst2 = ufp_exc->fpinst2;
|
||||
|
||||
return err ? -EFAULT : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue