ARM updates for 5.4-rc:
- fix for alignment faults under high memory pressure - use u32 for ARM instructions in fault handler - mark functions that must always be inlined with __always_inline - fix for nommu XIP - fix ARMv7M switch to handler mode in reboot path - fix the recently introduced AMBA reset control error paths -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAl2vMJkACgkQ9OeQG+St rGTYCg//c999vIr6q+90NHuP3dHJfITyAXxiOrBtHJdqApgENiU4aBjBCjrmV88m syYTPaNEXO2h4qkdO8kdZMVu4HB2WUSEnP1mq7RX/hW1W0uBOjLFdUlRznYF+T8x 2c2AKOeT+K5iwyMGYlmB6xK9N7OWTadz5wOtga4W1mml0GAkeeAa1Z6x151+q1PE eir0W5dwdQ+TN+rE2J+nNrQdSngO12EOUWGM2gei2AEcr6ItKhww2eneusdz+012 ylrO6KHBNobace6LCC0XdJ8zF8e682sT/gnLOL+H73y7M0iGcDhBJ1fUMyeyaoDR RQh58bG0ixl0Gz4nEgTeCgzyLYGyGoIc7NV9RoV6ONxPC1bJ+ETyj0GRlPcUsGli l3ZrAsGuvSQ8EHqHZ3YVUSQOTiFjH2C7EKFAcCFSscgzcjedyqe4+rUz2nDZoc/U zG3gkKbYMi9ToWHCzbzaHi06G+YnwFLEH2zRZhUomemjQcgyS5P4LjCY6UcnZh1R 6DzUEzN4Qt4N+nIU52x+klhZxS1m4Qhf/qwXWfsZASH5rkHAa2ivZ1vx88mnG9/f o7X6HwEDrCE59JWyAeR8gnsyWoOoRjU/jZvXAugMirkv3MamI7ogOTwPQVmFjNbM g65kncRITclh02L0ABD1zdUiQzKJTeUhYcXIErXNfgo+MdZAhag= =EBH5 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm :Pull ARM fixes from Russell King: - fix for alignment faults under high memory pressure - use u32 for ARM instructions in fault handler - mark functions that must always be inlined with __always_inline - fix for nommu XIP - fix ARMv7M switch to handler mode in reboot path - fix the recently introduced AMBA reset control error paths * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: 8926/1: v7m: remove register save to stack before svc ARM: 8914/1: NOMMU: Fix exc_ret for XIP ARM: 8908/1: add __always_inline to functions called from __get_user_check() ARM: mm: alignment: use "u32" for 32-bit instructions ARM: mm: fix alignment handler faults under memory pressure drivers/amba: fix reset control error handling
This commit is contained in:
commit
13b86bc4cd
|
@ -82,7 +82,7 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_CPU_CP15_MMU
|
||||
static inline unsigned int get_domain(void)
|
||||
static __always_inline unsigned int get_domain(void)
|
||||
{
|
||||
unsigned int domain;
|
||||
|
||||
|
@ -94,7 +94,7 @@ static inline unsigned int get_domain(void)
|
|||
return domain;
|
||||
}
|
||||
|
||||
static inline void set_domain(unsigned val)
|
||||
static __always_inline void set_domain(unsigned int val)
|
||||
{
|
||||
asm volatile(
|
||||
"mcr p15, 0, %0, c3, c0 @ set domain"
|
||||
|
@ -102,12 +102,12 @@ static inline void set_domain(unsigned val)
|
|||
isb();
|
||||
}
|
||||
#else
|
||||
static inline unsigned int get_domain(void)
|
||||
static __always_inline unsigned int get_domain(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void set_domain(unsigned val)
|
||||
static __always_inline void set_domain(unsigned int val)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
* perform such accesses (eg, via list poison values) which could then
|
||||
* be exploited for priviledge escalation.
|
||||
*/
|
||||
static inline unsigned int uaccess_save_and_enable(void)
|
||||
static __always_inline unsigned int uaccess_save_and_enable(void)
|
||||
{
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
unsigned int old_domain = get_domain();
|
||||
|
@ -37,7 +37,7 @@ static inline unsigned int uaccess_save_and_enable(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void uaccess_restore(unsigned int flags)
|
||||
static __always_inline void uaccess_restore(unsigned int flags)
|
||||
{
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/* Restore the user access mask */
|
||||
|
|
|
@ -68,7 +68,7 @@ ENDPROC(__vet_atags)
|
|||
* The following fragment of code is executed with the MMU on in MMU mode,
|
||||
* and uses absolute addresses; this is not position independent.
|
||||
*
|
||||
* r0 = cp#15 control register
|
||||
* r0 = cp#15 control register (exc_ret for M-class)
|
||||
* r1 = machine ID
|
||||
* r2 = atags/dtb pointer
|
||||
* r9 = processor ID
|
||||
|
@ -137,7 +137,8 @@ __mmap_switched_data:
|
|||
#ifdef CONFIG_CPU_CP15
|
||||
.long cr_alignment @ r3
|
||||
#else
|
||||
.long 0 @ r3
|
||||
M_CLASS(.long exc_ret) @ r3
|
||||
AR_CLASS(.long 0) @ r3
|
||||
#endif
|
||||
.size __mmap_switched_data, . - __mmap_switched_data
|
||||
|
||||
|
|
|
@ -201,6 +201,8 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
|
|||
bic r0, r0, #V7M_SCB_CCR_IC
|
||||
#endif
|
||||
str r0, [r12, V7M_SCB_CCR]
|
||||
/* Pass exc_ret to __mmap_switched */
|
||||
mov r0, r10
|
||||
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
|
||||
ret lr
|
||||
ENDPROC(__after_proc_init)
|
||||
|
|
|
@ -324,7 +324,7 @@ union offset_union {
|
|||
__put32_unaligned_check("strbt", val, addr)
|
||||
|
||||
static void
|
||||
do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs *regs, union offset_union offset)
|
||||
do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
|
||||
{
|
||||
if (!LDST_U_BIT(instr))
|
||||
offset.un = -offset.un;
|
||||
|
@ -337,7 +337,7 @@ do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs
|
|||
}
|
||||
|
||||
static int
|
||||
do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *regs)
|
||||
do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int rd = RD_BITS(instr);
|
||||
|
||||
|
@ -386,8 +386,7 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r
|
|||
}
|
||||
|
||||
static int
|
||||
do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
|
||||
struct pt_regs *regs)
|
||||
do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int rd = RD_BITS(instr);
|
||||
unsigned int rd2;
|
||||
|
@ -449,7 +448,7 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
|
|||
}
|
||||
|
||||
static int
|
||||
do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *regs)
|
||||
do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int rd = RD_BITS(instr);
|
||||
|
||||
|
@ -498,7 +497,7 @@ do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *reg
|
|||
* PU = 10 A B
|
||||
*/
|
||||
static int
|
||||
do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs)
|
||||
do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int rd, rn, correction, nr_regs, regbits;
|
||||
unsigned long eaddr, newaddr;
|
||||
|
@ -539,7 +538,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
|
|||
* processor for us.
|
||||
*/
|
||||
if (addr != eaddr) {
|
||||
pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
|
||||
pr_err("LDMSTM: PC = %08lx, instr = %08x, "
|
||||
"addr = %08lx, eaddr = %08lx\n",
|
||||
instruction_pointer(regs), instr, addr, eaddr);
|
||||
show_regs(regs);
|
||||
|
@ -716,10 +715,10 @@ thumb2arm(u16 tinstr)
|
|||
* 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
|
||||
*/
|
||||
static void *
|
||||
do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
|
||||
do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
|
||||
union offset_union *poffset)
|
||||
{
|
||||
unsigned long instr = *pinstr;
|
||||
u32 instr = *pinstr;
|
||||
u16 tinst1 = (instr >> 16) & 0xffff;
|
||||
u16 tinst2 = instr & 0xffff;
|
||||
|
||||
|
@ -767,17 +766,48 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
|
||||
{
|
||||
u32 instr = 0;
|
||||
int fault;
|
||||
|
||||
if (user_mode(regs))
|
||||
fault = get_user(instr, ip);
|
||||
else
|
||||
fault = probe_kernel_address(ip, instr);
|
||||
|
||||
*inst = __mem_to_opcode_arm(instr);
|
||||
|
||||
return fault;
|
||||
}
|
||||
|
||||
static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
|
||||
{
|
||||
u16 instr = 0;
|
||||
int fault;
|
||||
|
||||
if (user_mode(regs))
|
||||
fault = get_user(instr, ip);
|
||||
else
|
||||
fault = probe_kernel_address(ip, instr);
|
||||
|
||||
*inst = __mem_to_opcode_thumb16(instr);
|
||||
|
||||
return fault;
|
||||
}
|
||||
|
||||
static int
|
||||
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
{
|
||||
union offset_union uninitialized_var(offset);
|
||||
unsigned long instr = 0, instrptr;
|
||||
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
|
||||
unsigned long instrptr;
|
||||
int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
|
||||
unsigned int type;
|
||||
unsigned int fault;
|
||||
u32 instr = 0;
|
||||
u16 tinstr = 0;
|
||||
int isize = 4;
|
||||
int thumb2_32b = 0;
|
||||
int fault;
|
||||
|
||||
if (interrupts_enabled(regs))
|
||||
local_irq_enable();
|
||||
|
@ -786,15 +816,14 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
|
||||
if (thumb_mode(regs)) {
|
||||
u16 *ptr = (u16 *)(instrptr & ~1);
|
||||
fault = probe_kernel_address(ptr, tinstr);
|
||||
tinstr = __mem_to_opcode_thumb16(tinstr);
|
||||
|
||||
fault = alignment_get_thumb(regs, ptr, &tinstr);
|
||||
if (!fault) {
|
||||
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
|
||||
IS_T32(tinstr)) {
|
||||
/* Thumb-2 32-bit */
|
||||
u16 tinst2 = 0;
|
||||
fault = probe_kernel_address(ptr + 1, tinst2);
|
||||
tinst2 = __mem_to_opcode_thumb16(tinst2);
|
||||
u16 tinst2;
|
||||
fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
|
||||
instr = __opcode_thumb32_compose(tinstr, tinst2);
|
||||
thumb2_32b = 1;
|
||||
} else {
|
||||
|
@ -803,8 +832,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
} else {
|
||||
fault = probe_kernel_address((void *)instrptr, instr);
|
||||
instr = __mem_to_opcode_arm(instr);
|
||||
fault = alignment_get_arm(regs, (void *)instrptr, &instr);
|
||||
}
|
||||
|
||||
if (fault) {
|
||||
|
@ -926,7 +954,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
* Oops, we didn't handle the instruction.
|
||||
*/
|
||||
pr_err("Alignment trap: not handling instruction "
|
||||
"%0*lx at [<%08lx>]\n",
|
||||
"%0*x at [<%08lx>]\n",
|
||||
isize << 1,
|
||||
isize == 2 ? tinstr : instr, instrptr);
|
||||
ai_skipped += 1;
|
||||
|
@ -936,7 +964,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
ai_user += 1;
|
||||
|
||||
if (ai_usermode & UM_WARN)
|
||||
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
|
||||
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
|
||||
"Address=0x%08lx FSR 0x%03x\n", current->comm,
|
||||
task_pid_nr(current), instrptr,
|
||||
isize << 1,
|
||||
|
|
|
@ -132,13 +132,11 @@ __v7m_setup_cont:
|
|||
dsb
|
||||
mov r6, lr @ save LR
|
||||
ldr sp, =init_thread_union + THREAD_START_SP
|
||||
stmia sp, {r0-r3, r12}
|
||||
cpsie i
|
||||
svc #0
|
||||
1: cpsid i
|
||||
ldr r0, =exc_ret
|
||||
orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
|
||||
str lr, [r0]
|
||||
/* Calculate exc_ret */
|
||||
orr r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK
|
||||
ldmia sp, {r0-r3, r12}
|
||||
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
|
||||
mov lr, r6 @ restore LR
|
||||
|
|
|
@ -409,9 +409,11 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
|
|||
*/
|
||||
rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
|
||||
if (IS_ERR(rstc)) {
|
||||
if (PTR_ERR(rstc) != -EPROBE_DEFER)
|
||||
dev_err(&dev->dev, "Can't get amba reset!\n");
|
||||
return PTR_ERR(rstc);
|
||||
ret = PTR_ERR(rstc);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&dev->dev, "can't get reset: %d\n",
|
||||
ret);
|
||||
goto err_reset;
|
||||
}
|
||||
reset_control_deassert(rstc);
|
||||
reset_control_put(rstc);
|
||||
|
@ -472,6 +474,12 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
|
|||
release_resource(&dev->res);
|
||||
err_out:
|
||||
return ret;
|
||||
|
||||
err_reset:
|
||||
amba_put_disable_pclk(dev);
|
||||
iounmap(tmp);
|
||||
dev_pm_domain_detach(&dev->dev, true);
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue