2019-06-03 13:44:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/kernel/traps.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-2009 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
|
2015-07-24 23:37:48 +08:00
|
|
|
#include <linux/bug.h>
|
2019-08-21 01:45:57 +08:00
|
|
|
#include <linux/context_tracking.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/kallsyms.h>
|
2019-08-21 01:45:57 +08:00
|
|
|
#include <linux/kprobes.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <linux/kdebug.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kexec.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/init.h>
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2017-02-09 01:51:35 +08:00
|
|
|
#include <linux/sched/debug.h>
|
2017-02-09 01:51:37 +08:00
|
|
|
#include <linux/sched/task_stack.h>
|
arm64: add VMAP_STACK overflow detection
This patch adds stack overflow detection to arm64, usable when vmap'd stacks
are in use.
Overflow is detected in a small preamble executed for each exception entry,
which checks whether there is enough space on the current stack for the general
purpose registers to be saved. If there is not enough space, the overflow
handler is invoked on a per-cpu overflow stack. This approach preserves the
original exception information in ESR_EL1 (and where appropriate, FAR_EL1).
Task and IRQ stacks are aligned to double their size, enabling overflow to be
detected with a single bit test. For example, a 16K stack is aligned to 32K,
ensuring that bit 14 of the SP must be zero. On an overflow (or underflow),
this bit is flipped. Thus, overflow (of less than the size of the stack) can be
detected by testing whether this bit is set.
The overflow check is performed before any attempt is made to access the
stack, avoiding recursive faults (and the loss of exception information
these would entail). As logical operations cannot be performed on the SP
directly, the SP is temporarily swapped with a general purpose register
using arithmetic operations to enable the test to be performed.
This gives us a useful error message on stack overflow, as can be trigger with
the LKDTM overflow test:
[ 305.388749] lkdtm: Performing direct entry OVERFLOW
[ 305.395444] Insufficient stack space to handle exception!
[ 305.395482] ESR: 0x96000047 -- DABT (current EL)
[ 305.399890] FAR: 0xffff00000a5e7f30
[ 305.401315] Task stack: [0xffff00000a5e8000..0xffff00000a5ec000]
[ 305.403815] IRQ stack: [0xffff000008000000..0xffff000008004000]
[ 305.407035] Overflow stack: [0xffff80003efce4e0..0xffff80003efcf4e0]
[ 305.409622] CPU: 0 PID: 1219 Comm: sh Not tainted 4.13.0-rc3-00021-g9636aea #5
[ 305.412785] Hardware name: linux,dummy-virt (DT)
[ 305.415756] task: ffff80003d051c00 task.stack: ffff00000a5e8000
[ 305.419221] PC is at recursive_loop+0x10/0x48
[ 305.421637] LR is at recursive_loop+0x38/0x48
[ 305.423768] pc : [<ffff00000859f330>] lr : [<ffff00000859f358>] pstate: 40000145
[ 305.428020] sp : ffff00000a5e7f50
[ 305.430469] x29: ffff00000a5e8350 x28: ffff80003d051c00
[ 305.433191] x27: ffff000008981000 x26: ffff000008f80400
[ 305.439012] x25: ffff00000a5ebeb8 x24: ffff00000a5ebeb8
[ 305.440369] x23: ffff000008f80138 x22: 0000000000000009
[ 305.442241] x21: ffff80003ce65000 x20: ffff000008f80188
[ 305.444552] x19: 0000000000000013 x18: 0000000000000006
[ 305.446032] x17: 0000ffffa2601280 x16: ffff0000081fe0b8
[ 305.448252] x15: ffff000008ff546d x14: 000000000047a4c8
[ 305.450246] x13: ffff000008ff7872 x12: 0000000005f5e0ff
[ 305.452953] x11: ffff000008ed2548 x10: 000000000005ee8d
[ 305.454824] x9 : ffff000008545380 x8 : ffff00000a5e8770
[ 305.457105] x7 : 1313131313131313 x6 : 00000000000000e1
[ 305.459285] x5 : 0000000000000000 x4 : 0000000000000000
[ 305.461781] x3 : 0000000000000000 x2 : 0000000000000400
[ 305.465119] x1 : 0000000000000013 x0 : 0000000000000012
[ 305.467724] Kernel panic - not syncing: kernel stack overflow
[ 305.470561] CPU: 0 PID: 1219 Comm: sh Not tainted 4.13.0-rc3-00021-g9636aea #5
[ 305.473325] Hardware name: linux,dummy-virt (DT)
[ 305.475070] Call trace:
[ 305.476116] [<ffff000008088ad8>] dump_backtrace+0x0/0x378
[ 305.478991] [<ffff000008088e64>] show_stack+0x14/0x20
[ 305.481237] [<ffff00000895a178>] dump_stack+0x98/0xb8
[ 305.483294] [<ffff0000080c3288>] panic+0x118/0x280
[ 305.485673] [<ffff0000080c2e9c>] nmi_panic+0x6c/0x70
[ 305.486216] [<ffff000008089710>] handle_bad_stack+0x118/0x128
[ 305.486612] Exception stack(0xffff80003efcf3a0 to 0xffff80003efcf4e0)
[ 305.487334] f3a0: 0000000000000012 0000000000000013 0000000000000400 0000000000000000
[ 305.488025] f3c0: 0000000000000000 0000000000000000 00000000000000e1 1313131313131313
[ 305.488908] f3e0: ffff00000a5e8770 ffff000008545380 000000000005ee8d ffff000008ed2548
[ 305.489403] f400: 0000000005f5e0ff ffff000008ff7872 000000000047a4c8 ffff000008ff546d
[ 305.489759] f420: ffff0000081fe0b8 0000ffffa2601280 0000000000000006 0000000000000013
[ 305.490256] f440: ffff000008f80188 ffff80003ce65000 0000000000000009 ffff000008f80138
[ 305.490683] f460: ffff00000a5ebeb8 ffff00000a5ebeb8 ffff000008f80400 ffff000008981000
[ 305.491051] f480: ffff80003d051c00 ffff00000a5e8350 ffff00000859f358 ffff00000a5e7f50
[ 305.491444] f4a0: ffff00000859f330 0000000040000145 0000000000000000 0000000000000000
[ 305.492008] f4c0: 0001000000000000 0000000000000000 ffff00000a5e8350 ffff00000859f330
[ 305.493063] [<ffff00000808205c>] __bad_stack+0x88/0x8c
[ 305.493396] [<ffff00000859f330>] recursive_loop+0x10/0x48
[ 305.493731] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494088] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494425] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494649] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494898] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495205] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495453] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495708] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496000] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496302] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496644] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496894] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.497138] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.497325] [<ffff00000859f3dc>] lkdtm_OVERFLOW+0x14/0x20
[ 305.497506] [<ffff00000859f314>] lkdtm_do_action+0x1c/0x28
[ 305.497786] [<ffff00000859f178>] direct_entry+0xe0/0x170
[ 305.498095] [<ffff000008345568>] full_proxy_write+0x60/0xa8
[ 305.498387] [<ffff0000081fb7f4>] __vfs_write+0x1c/0x128
[ 305.498679] [<ffff0000081fcc68>] vfs_write+0xa0/0x1b0
[ 305.498926] [<ffff0000081fe0fc>] SyS_write+0x44/0xa0
[ 305.499182] Exception stack(0xffff00000a5ebec0 to 0xffff00000a5ec000)
[ 305.499429] bec0: 0000000000000001 000000001c4cf5e0 0000000000000009 000000001c4cf5e0
[ 305.499674] bee0: 574f4c465245564f 0000000000000000 0000000000000000 8000000080808080
[ 305.499904] bf00: 0000000000000040 0000000000000038 fefefeff1b4bc2ff 7f7f7f7f7f7fff7f
[ 305.500189] bf20: 0101010101010101 0000000000000000 000000000047a4c8 0000000000000038
[ 305.500712] bf40: 0000000000000000 0000ffffa2601280 0000ffffc63f6068 00000000004b5000
[ 305.501241] bf60: 0000000000000001 000000001c4cf5e0 0000000000000009 000000001c4cf5e0
[ 305.501791] bf80: 0000000000000020 0000000000000000 00000000004b5000 000000001c4cc458
[ 305.502314] bfa0: 0000000000000000 0000ffffc63f7950 000000000040a3c4 0000ffffc63f70e0
[ 305.502762] bfc0: 0000ffffa2601268 0000000080000000 0000000000000001 0000000000000040
[ 305.503207] bfe0: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
[ 305.503680] [<ffff000008082fb0>] el0_svc_naked+0x24/0x28
[ 305.504720] Kernel Offset: disabled
[ 305.505189] CPU features: 0x002082
[ 305.505473] Memory Limit: none
[ 305.506181] ---[ end Kernel panic - not syncing: kernel stack overflow
This patch was co-authored by Ard Biesheuvel and Mark Rutland.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
2017-07-15 03:30:35 +08:00
|
|
|
#include <linux/sizes.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/syscalls.h>
|
2017-02-04 07:16:44 +08:00
|
|
|
#include <linux/mm_types.h>
|
2018-12-28 16:30:54 +08:00
|
|
|
#include <linux/kasan.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
#include <asm/atomic.h>
|
2015-07-24 23:37:48 +08:00
|
|
|
#include <asm/bug.h>
|
2018-03-26 22:12:28 +08:00
|
|
|
#include <asm/cpufeature.h>
|
2017-11-02 20:12:34 +08:00
|
|
|
#include <asm/daifflags.h>
|
2013-03-16 16:48:13 +08:00
|
|
|
#include <asm/debug-monitors.h>
|
2014-11-18 20:16:30 +08:00
|
|
|
#include <asm/esr.h>
|
2015-07-24 23:37:48 +08:00
|
|
|
#include <asm/insn.h>
|
2019-10-26 00:42:10 +08:00
|
|
|
#include <asm/kprobes.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/traps.h>
|
arm64: add VMAP_STACK overflow detection
This patch adds stack overflow detection to arm64, usable when vmap'd stacks
are in use.
Overflow is detected in a small preamble executed for each exception entry,
which checks whether there is enough space on the current stack for the general
purpose registers to be saved. If there is not enough space, the overflow
handler is invoked on a per-cpu overflow stack. This approach preserves the
original exception information in ESR_EL1 (and where appropriate, FAR_EL1).
Task and IRQ stacks are aligned to double their size, enabling overflow to be
detected with a single bit test. For example, a 16K stack is aligned to 32K,
ensuring that bit 14 of the SP must be zero. On an overflow (or underflow),
this bit is flipped. Thus, overflow (of less than the size of the stack) can be
detected by testing whether this bit is set.
The overflow check is performed before any attempt is made to access the
stack, avoiding recursive faults (and the loss of exception information
these would entail). As logical operations cannot be performed on the SP
directly, the SP is temporarily swapped with a general purpose register
using arithmetic operations to enable the test to be performed.
This gives us a useful error message on stack overflow, as can be trigger with
the LKDTM overflow test:
[ 305.388749] lkdtm: Performing direct entry OVERFLOW
[ 305.395444] Insufficient stack space to handle exception!
[ 305.395482] ESR: 0x96000047 -- DABT (current EL)
[ 305.399890] FAR: 0xffff00000a5e7f30
[ 305.401315] Task stack: [0xffff00000a5e8000..0xffff00000a5ec000]
[ 305.403815] IRQ stack: [0xffff000008000000..0xffff000008004000]
[ 305.407035] Overflow stack: [0xffff80003efce4e0..0xffff80003efcf4e0]
[ 305.409622] CPU: 0 PID: 1219 Comm: sh Not tainted 4.13.0-rc3-00021-g9636aea #5
[ 305.412785] Hardware name: linux,dummy-virt (DT)
[ 305.415756] task: ffff80003d051c00 task.stack: ffff00000a5e8000
[ 305.419221] PC is at recursive_loop+0x10/0x48
[ 305.421637] LR is at recursive_loop+0x38/0x48
[ 305.423768] pc : [<ffff00000859f330>] lr : [<ffff00000859f358>] pstate: 40000145
[ 305.428020] sp : ffff00000a5e7f50
[ 305.430469] x29: ffff00000a5e8350 x28: ffff80003d051c00
[ 305.433191] x27: ffff000008981000 x26: ffff000008f80400
[ 305.439012] x25: ffff00000a5ebeb8 x24: ffff00000a5ebeb8
[ 305.440369] x23: ffff000008f80138 x22: 0000000000000009
[ 305.442241] x21: ffff80003ce65000 x20: ffff000008f80188
[ 305.444552] x19: 0000000000000013 x18: 0000000000000006
[ 305.446032] x17: 0000ffffa2601280 x16: ffff0000081fe0b8
[ 305.448252] x15: ffff000008ff546d x14: 000000000047a4c8
[ 305.450246] x13: ffff000008ff7872 x12: 0000000005f5e0ff
[ 305.452953] x11: ffff000008ed2548 x10: 000000000005ee8d
[ 305.454824] x9 : ffff000008545380 x8 : ffff00000a5e8770
[ 305.457105] x7 : 1313131313131313 x6 : 00000000000000e1
[ 305.459285] x5 : 0000000000000000 x4 : 0000000000000000
[ 305.461781] x3 : 0000000000000000 x2 : 0000000000000400
[ 305.465119] x1 : 0000000000000013 x0 : 0000000000000012
[ 305.467724] Kernel panic - not syncing: kernel stack overflow
[ 305.470561] CPU: 0 PID: 1219 Comm: sh Not tainted 4.13.0-rc3-00021-g9636aea #5
[ 305.473325] Hardware name: linux,dummy-virt (DT)
[ 305.475070] Call trace:
[ 305.476116] [<ffff000008088ad8>] dump_backtrace+0x0/0x378
[ 305.478991] [<ffff000008088e64>] show_stack+0x14/0x20
[ 305.481237] [<ffff00000895a178>] dump_stack+0x98/0xb8
[ 305.483294] [<ffff0000080c3288>] panic+0x118/0x280
[ 305.485673] [<ffff0000080c2e9c>] nmi_panic+0x6c/0x70
[ 305.486216] [<ffff000008089710>] handle_bad_stack+0x118/0x128
[ 305.486612] Exception stack(0xffff80003efcf3a0 to 0xffff80003efcf4e0)
[ 305.487334] f3a0: 0000000000000012 0000000000000013 0000000000000400 0000000000000000
[ 305.488025] f3c0: 0000000000000000 0000000000000000 00000000000000e1 1313131313131313
[ 305.488908] f3e0: ffff00000a5e8770 ffff000008545380 000000000005ee8d ffff000008ed2548
[ 305.489403] f400: 0000000005f5e0ff ffff000008ff7872 000000000047a4c8 ffff000008ff546d
[ 305.489759] f420: ffff0000081fe0b8 0000ffffa2601280 0000000000000006 0000000000000013
[ 305.490256] f440: ffff000008f80188 ffff80003ce65000 0000000000000009 ffff000008f80138
[ 305.490683] f460: ffff00000a5ebeb8 ffff00000a5ebeb8 ffff000008f80400 ffff000008981000
[ 305.491051] f480: ffff80003d051c00 ffff00000a5e8350 ffff00000859f358 ffff00000a5e7f50
[ 305.491444] f4a0: ffff00000859f330 0000000040000145 0000000000000000 0000000000000000
[ 305.492008] f4c0: 0001000000000000 0000000000000000 ffff00000a5e8350 ffff00000859f330
[ 305.493063] [<ffff00000808205c>] __bad_stack+0x88/0x8c
[ 305.493396] [<ffff00000859f330>] recursive_loop+0x10/0x48
[ 305.493731] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494088] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494425] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494649] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494898] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495205] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495453] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495708] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496000] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496302] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496644] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496894] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.497138] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.497325] [<ffff00000859f3dc>] lkdtm_OVERFLOW+0x14/0x20
[ 305.497506] [<ffff00000859f314>] lkdtm_do_action+0x1c/0x28
[ 305.497786] [<ffff00000859f178>] direct_entry+0xe0/0x170
[ 305.498095] [<ffff000008345568>] full_proxy_write+0x60/0xa8
[ 305.498387] [<ffff0000081fb7f4>] __vfs_write+0x1c/0x128
[ 305.498679] [<ffff0000081fcc68>] vfs_write+0xa0/0x1b0
[ 305.498926] [<ffff0000081fe0fc>] SyS_write+0x44/0xa0
[ 305.499182] Exception stack(0xffff00000a5ebec0 to 0xffff00000a5ec000)
[ 305.499429] bec0: 0000000000000001 000000001c4cf5e0 0000000000000009 000000001c4cf5e0
[ 305.499674] bee0: 574f4c465245564f 0000000000000000 0000000000000000 8000000080808080
[ 305.499904] bf00: 0000000000000040 0000000000000038 fefefeff1b4bc2ff 7f7f7f7f7f7fff7f
[ 305.500189] bf20: 0101010101010101 0000000000000000 000000000047a4c8 0000000000000038
[ 305.500712] bf40: 0000000000000000 0000ffffa2601280 0000ffffc63f6068 00000000004b5000
[ 305.501241] bf60: 0000000000000001 000000001c4cf5e0 0000000000000009 000000001c4cf5e0
[ 305.501791] bf80: 0000000000000020 0000000000000000 00000000004b5000 000000001c4cc458
[ 305.502314] bfa0: 0000000000000000 0000ffffc63f7950 000000000040a3c4 0000ffffc63f70e0
[ 305.502762] bfc0: 0000ffffa2601268 0000000080000000 0000000000000001 0000000000000040
[ 305.503207] bfe0: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
[ 305.503680] [<ffff000008082fb0>] el0_svc_naked+0x24/0x28
[ 305.504720] Kernel Offset: disabled
[ 305.505189] CPU features: 0x002082
[ 305.505473] Memory Limit: none
[ 305.506181] ---[ end Kernel panic - not syncing: kernel stack overflow
This patch was co-authored by Ard Biesheuvel and Mark Rutland.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
2017-07-15 03:30:35 +08:00
|
|
|
#include <asm/smp.h>
|
2016-11-04 04:23:05 +08:00
|
|
|
#include <asm/stack_pointer.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
#include <asm/exception.h>
|
|
|
|
#include <asm/system_misc.h>
|
2016-06-29 01:07:32 +08:00
|
|
|
#include <asm/sysreg.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
static const char *handler[]= {
|
|
|
|
"Synchronous Abort",
|
|
|
|
"IRQ",
|
|
|
|
"FIQ",
|
|
|
|
"Error"
|
|
|
|
};
|
|
|
|
|
2018-02-02 06:13:38 +08:00
|
|
|
int show_unhandled_signals = 0;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2015-10-17 22:28:11 +08:00
|
|
|
static void dump_backtrace_entry(unsigned long where)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2017-10-19 20:19:20 +08:00
|
|
|
printk(" %pS\n", (void *)where);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2019-06-26 19:50:13 +08:00
|
|
|
static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
unsigned long addr = instruction_pointer(regs);
|
|
|
|
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
|
|
|
|
int i;
|
|
|
|
|
2019-06-26 19:50:13 +08:00
|
|
|
if (user_mode(regs))
|
|
|
|
return;
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
for (i = -4; i < 1; i++) {
|
|
|
|
unsigned int val, bad;
|
|
|
|
|
2019-06-26 19:50:13 +08:00
|
|
|
bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
if (!bad)
|
|
|
|
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
|
|
|
|
else {
|
|
|
|
p += sprintf(p, "bad PC value");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-26 19:50:13 +08:00
|
|
|
printk("%sCode: %s\n", lvl, str);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2017-05-09 09:53:37 +08:00
|
|
|
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
struct stackframe frame;
|
2019-04-09 00:56:34 +08:00
|
|
|
int skip = 0;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
arm64: fix dump_backtrace/unwind_frame with NULL tsk
In some places, dump_backtrace() is called with a NULL tsk parameter,
e.g. in bug_handler() in arch/arm64, or indirectly via show_stack() in
core code. The expectation is that this is treated as if current were
passed instead of NULL. Similar is true of unwind_frame().
Commit a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust") didn't
take this into account. In dump_backtrace() it compares tsk against
current *before* we check if tsk is NULL, and in unwind_frame() we never
set tsk if it is NULL.
Due to this, we won't initialise irq_stack_ptr in either function. In
dump_backtrace() this results in calling dump_mem() for memory
immediately above the IRQ stack range, rather than for the relevant
range on the task stack. In unwind_frame we'll reject unwinding frames
on the IRQ stack.
In either case this results in incomplete or misleading backtrace
information, but is not otherwise problematic. The initial percpu areas
(including the IRQ stacks) are allocated in the linear map, and dump_mem
uses __get_user(), so we shouldn't access anything with side-effects,
and will handle holes safely.
This patch fixes the issue by having both functions handle the NULL tsk
case before doing anything else with tsk.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust")
Acked-by: James Morse <james.morse@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-09-24 00:55:05 +08:00
|
|
|
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
|
|
|
|
|
2019-04-09 00:56:34 +08:00
|
|
|
if (regs) {
|
|
|
|
if (user_mode(regs))
|
|
|
|
return;
|
|
|
|
skip = 1;
|
|
|
|
}
|
|
|
|
|
arm64: fix dump_backtrace/unwind_frame with NULL tsk
In some places, dump_backtrace() is called with a NULL tsk parameter,
e.g. in bug_handler() in arch/arm64, or indirectly via show_stack() in
core code. The expectation is that this is treated as if current were
passed instead of NULL. Similar is true of unwind_frame().
Commit a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust") didn't
take this into account. In dump_backtrace() it compares tsk against
current *before* we check if tsk is NULL, and in unwind_frame() we never
set tsk if it is NULL.
Due to this, we won't initialise irq_stack_ptr in either function. In
dump_backtrace() this results in calling dump_mem() for memory
immediately above the IRQ stack range, rather than for the relevant
range on the task stack. In unwind_frame we'll reject unwinding frames
on the IRQ stack.
In either case this results in incomplete or misleading backtrace
information, but is not otherwise problematic. The initial percpu areas
(including the IRQ stacks) are allocated in the linear map, and dump_mem
uses __get_user(), so we shouldn't access anything with side-effects,
and will handle holes safely.
This patch fixes the issue by having both functions handle the NULL tsk
case before doing anything else with tsk.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust")
Acked-by: James Morse <james.morse@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-09-24 00:55:05 +08:00
|
|
|
if (!tsk)
|
|
|
|
tsk = current;
|
|
|
|
|
2016-11-04 04:23:08 +08:00
|
|
|
if (!try_get_task_stack(tsk))
|
|
|
|
return;
|
|
|
|
|
2015-12-15 16:33:41 +08:00
|
|
|
if (tsk == current) {
|
2019-07-02 21:07:28 +08:00
|
|
|
start_backtrace(&frame,
|
|
|
|
(unsigned long)__builtin_frame_address(0),
|
|
|
|
(unsigned long)dump_backtrace);
|
2012-03-05 19:49:27 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* task blocked in __switch_to
|
|
|
|
*/
|
2019-07-02 21:07:28 +08:00
|
|
|
start_backtrace(&frame,
|
|
|
|
thread_saved_fp(tsk),
|
|
|
|
thread_saved_pc(tsk));
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2015-12-22 00:44:27 +08:00
|
|
|
printk("Call trace:\n");
|
2017-10-19 20:19:20 +08:00
|
|
|
do {
|
2015-12-15 16:33:41 +08:00
|
|
|
/* skip until specified stack frame */
|
|
|
|
if (!skip) {
|
arm64: unwind: reference pt_regs via embedded stack frame
As it turns out, the unwind code is slightly broken, and probably has
been for a while. The problem is in the dumping of the exception stack,
which is intended to dump the contents of the pt_regs struct at each
level in the call stack where an exception was taken and routed to a
routine marked as __exception (which means its stack frame is right
below the pt_regs struct on the stack).
'Right below the pt_regs struct' is ill defined, though: the unwind
code assigns 'frame pointer + 0x10' to the .sp member of the stackframe
struct at each level, and dump_backtrace() happily dereferences that as
the pt_regs pointer when encountering an __exception routine. However,
the actual size of the stack frame created by this routine (which could
be one of many __exception routines we have in the kernel) is not known,
and so frame.sp is pretty useless to figure out where struct pt_regs
really is.
So it seems the only way to ensure that we can find our struct pt_regs
when walking the stack frames is to put it at a known fixed offset of
the stack frame pointer that is passed to such __exception routines.
The simplest way to do that is to put it inside pt_regs itself, which is
the main change implemented by this patch. As a bonus, doing this allows
us to get rid of a fair amount of cruft related to walking from one stack
to the other, which is especially nice since we intend to introduce yet
another stack for overflow handling once we add support for vmapped
stacks. It also fixes an inconsistency where we only add a stack frame
pointing to ELR_EL1 if we are executing from the IRQ stack but not when
we are executing from the task stack.
To consistly identify exceptions regs even in the presence of exceptions
taken from entry code, we must check whether the next frame was created
by entry text, rather than whether the current frame was crated by
exception text.
To avoid backtracing using PCs that fall in the idmap, or are controlled
by userspace, we must explcitly zero the FP and LR in startup paths, and
must ensure that the frame embedded in pt_regs is zeroed upon entry from
EL0. To avoid these NULL entries showin in the backtrace, unwind_frame()
is updated to avoid them.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[Mark: compare current frame against .entry.text, avoid bogus PCs]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
2017-07-23 01:45:33 +08:00
|
|
|
dump_backtrace_entry(frame.pc);
|
2015-12-15 16:33:41 +08:00
|
|
|
} else if (frame.fp == regs->regs[29]) {
|
|
|
|
skip = 0;
|
|
|
|
/*
|
|
|
|
* Mostly, this is the case where this function is
|
|
|
|
* called in panic/abort. As exception handler's
|
|
|
|
* stack frame does not contain the corresponding pc
|
|
|
|
* at which an exception has taken place, use regs->pc
|
|
|
|
* instead.
|
|
|
|
*/
|
|
|
|
dump_backtrace_entry(regs->pc);
|
|
|
|
}
|
2017-10-19 20:19:20 +08:00
|
|
|
} while (!unwind_frame(tsk, &frame));
|
2016-11-04 04:23:08 +08:00
|
|
|
|
|
|
|
put_task_stack(tsk);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void show_stack(struct task_struct *tsk, unsigned long *sp)
|
|
|
|
{
|
|
|
|
dump_backtrace(NULL, tsk);
|
|
|
|
barrier();
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
#define S_PREEMPT " PREEMPT"
|
2019-10-16 03:17:49 +08:00
|
|
|
#elif defined(CONFIG_PREEMPT_RT)
|
|
|
|
#define S_PREEMPT " PREEMPT_RT"
|
2012-03-05 19:49:27 +08:00
|
|
|
#else
|
|
|
|
#define S_PREEMPT ""
|
|
|
|
#endif
|
2019-10-16 03:17:49 +08:00
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
#define S_SMP " SMP"
|
|
|
|
|
2016-11-04 04:23:06 +08:00
|
|
|
static int __die(const char *str, int err, struct pt_regs *regs)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
static int die_counter;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
|
|
|
|
str, err, ++die_counter);
|
|
|
|
|
|
|
|
/* trap and error numbers are mostly meaningless on ARM */
|
|
|
|
ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
|
|
|
|
if (ret == NOTIFY_STOP)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
print_modules();
|
2019-04-09 00:56:34 +08:00
|
|
|
show_regs(regs);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2019-06-26 19:50:13 +08:00
|
|
|
dump_kernel_instr(KERN_EMERG, regs);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEFINE_RAW_SPINLOCK(die_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is protected against re-entrancy.
|
|
|
|
*/
|
|
|
|
void die(const char *str, struct pt_regs *regs, int err)
|
|
|
|
{
|
|
|
|
int ret;
|
2017-07-07 17:29:34 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&die_lock, flags);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
oops_enter();
|
|
|
|
|
|
|
|
console_verbose();
|
|
|
|
bust_spinlocks(1);
|
2016-11-04 04:23:06 +08:00
|
|
|
ret = __die(str, err, regs);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2016-11-04 04:23:06 +08:00
|
|
|
if (regs && kexec_should_crash(current))
|
2012-03-05 19:49:27 +08:00
|
|
|
crash_kexec(regs);
|
|
|
|
|
|
|
|
bust_spinlocks(0);
|
2013-01-21 14:47:39 +08:00
|
|
|
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
|
2012-03-05 19:49:27 +08:00
|
|
|
oops_exit();
|
|
|
|
|
|
|
|
if (in_interrupt())
|
|
|
|
panic("Fatal exception in interrupt");
|
|
|
|
if (panic_on_oops)
|
|
|
|
panic("Fatal exception");
|
2017-07-07 17:29:34 +08:00
|
|
|
|
|
|
|
raw_spin_unlock_irqrestore(&die_lock, flags);
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
if (ret != NOTIFY_STOP)
|
|
|
|
do_exit(SIGSEGV);
|
|
|
|
}
|
|
|
|
|
2018-09-22 06:52:21 +08:00
|
|
|
static void arm64_show_signal(int signo, const char *str)
|
2018-02-20 23:08:51 +08:00
|
|
|
{
|
|
|
|
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
|
|
|
|
DEFAULT_RATELIMIT_BURST);
|
2018-09-22 06:38:41 +08:00
|
|
|
struct task_struct *tsk = current;
|
2018-02-20 21:46:05 +08:00
|
|
|
unsigned int esr = tsk->thread.fault_code;
|
|
|
|
struct pt_regs *regs = task_pt_regs(tsk);
|
|
|
|
|
2018-09-22 06:52:21 +08:00
|
|
|
/* Leave if the signal won't be shown */
|
|
|
|
if (!show_unhandled_signals ||
|
|
|
|
!unhandled_signal(tsk, signo) ||
|
|
|
|
!__ratelimit(&rs))
|
|
|
|
return;
|
2018-02-20 21:46:05 +08:00
|
|
|
|
|
|
|
pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
|
|
|
|
if (esr)
|
|
|
|
pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
|
|
|
|
|
|
|
|
pr_cont("%s", str);
|
|
|
|
print_vma_addr(KERN_CONT " in ", regs->pc);
|
|
|
|
pr_cont("\n");
|
|
|
|
__show_regs(regs);
|
2018-09-22 06:52:21 +08:00
|
|
|
}
|
2018-02-20 21:46:05 +08:00
|
|
|
|
2018-09-22 16:26:57 +08:00
|
|
|
void arm64_force_sig_fault(int signo, int code, void __user *addr,
|
|
|
|
const char *str)
|
|
|
|
{
|
|
|
|
arm64_show_signal(signo, str);
|
2019-05-24 00:11:19 +08:00
|
|
|
if (signo == SIGKILL)
|
2019-05-23 23:17:27 +08:00
|
|
|
force_sig(SIGKILL);
|
2019-05-24 00:11:19 +08:00
|
|
|
else
|
2019-05-24 00:04:24 +08:00
|
|
|
force_sig_fault(signo, code, addr);
|
2018-09-22 16:26:57 +08:00
|
|
|
}
|
2018-02-20 21:46:05 +08:00
|
|
|
|
2018-09-22 16:37:15 +08:00
|
|
|
void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
|
|
|
|
const char *str)
|
|
|
|
{
|
|
|
|
arm64_show_signal(SIGBUS, str);
|
2019-02-06 08:14:19 +08:00
|
|
|
force_sig_mceerr(code, addr, lsb);
|
2018-09-22 16:37:15 +08:00
|
|
|
}
|
|
|
|
|
2018-09-22 16:52:41 +08:00
|
|
|
void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
|
|
|
|
const char *str)
|
|
|
|
{
|
|
|
|
arm64_show_signal(SIGTRAP, str);
|
|
|
|
force_sig_ptrace_errno_trap(errno, addr);
|
2018-02-20 21:46:05 +08:00
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
void arm64_notify_die(const char *str, struct pt_regs *regs,
|
2018-09-21 23:24:40 +08:00
|
|
|
int signo, int sicode, void __user *addr,
|
|
|
|
int err)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2014-04-07 06:04:12 +08:00
|
|
|
if (user_mode(regs)) {
|
2018-02-20 21:46:05 +08:00
|
|
|
WARN_ON(regs != current_pt_regs());
|
2014-04-07 06:04:12 +08:00
|
|
|
current->thread.fault_address = 0;
|
|
|
|
current->thread.fault_code = err;
|
2018-09-21 23:24:40 +08:00
|
|
|
|
2018-09-22 16:26:57 +08:00
|
|
|
arm64_force_sig_fault(signo, sicode, addr, str);
|
2014-04-07 06:04:12 +08:00
|
|
|
} else {
|
2012-03-05 19:49:27 +08:00
|
|
|
die(str, regs, err);
|
2014-04-07 06:04:12 +08:00
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2017-10-25 17:04:33 +08:00
|
|
|
void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
|
|
|
|
{
|
|
|
|
regs->pc += size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we were single stepping, we want to get the step exception after
|
|
|
|
* we return from the trap.
|
|
|
|
*/
|
2018-04-03 18:22:51 +08:00
|
|
|
if (user_mode(regs))
|
|
|
|
user_fastforward_single_step(current);
|
2017-10-25 17:04:33 +08:00
|
|
|
}
|
|
|
|
|
2014-11-18 19:41:22 +08:00
|
|
|
static LIST_HEAD(undef_hook);
|
|
|
|
static DEFINE_RAW_SPINLOCK(undef_lock);
|
|
|
|
|
|
|
|
void register_undef_hook(struct undef_hook *hook)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&undef_lock, flags);
|
|
|
|
list_add(&hook->node, &undef_hook);
|
|
|
|
raw_spin_unlock_irqrestore(&undef_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void unregister_undef_hook(struct undef_hook *hook)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&undef_lock, flags);
|
|
|
|
list_del(&hook->node);
|
|
|
|
raw_spin_unlock_irqrestore(&undef_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int call_undef_hook(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct undef_hook *hook;
|
|
|
|
unsigned long flags;
|
|
|
|
u32 instr;
|
|
|
|
int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
|
|
|
|
void __user *pc = (void __user *)instruction_pointer(regs);
|
|
|
|
|
2018-08-07 20:43:06 +08:00
|
|
|
if (!user_mode(regs)) {
|
|
|
|
__le32 instr_le;
|
|
|
|
if (probe_kernel_address((__force __le32 *)pc, instr_le))
|
|
|
|
goto exit;
|
|
|
|
instr = le32_to_cpu(instr_le);
|
|
|
|
} else if (compat_thumb_mode(regs)) {
|
2014-11-18 19:41:22 +08:00
|
|
|
/* 16-bit Thumb instruction */
|
2017-06-28 22:55:55 +08:00
|
|
|
__le16 instr_le;
|
|
|
|
if (get_user(instr_le, (__le16 __user *)pc))
|
2014-11-18 19:41:22 +08:00
|
|
|
goto exit;
|
2017-06-28 22:55:55 +08:00
|
|
|
instr = le16_to_cpu(instr_le);
|
2014-11-18 19:41:22 +08:00
|
|
|
if (aarch32_insn_is_wide(instr)) {
|
|
|
|
u32 instr2;
|
|
|
|
|
2017-06-28 22:55:55 +08:00
|
|
|
if (get_user(instr_le, (__le16 __user *)(pc + 2)))
|
2014-11-18 19:41:22 +08:00
|
|
|
goto exit;
|
2017-06-28 22:55:55 +08:00
|
|
|
instr2 = le16_to_cpu(instr_le);
|
2014-11-18 19:41:22 +08:00
|
|
|
instr = (instr << 16) | instr2;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* 32-bit ARM instruction */
|
2017-06-28 22:55:55 +08:00
|
|
|
__le32 instr_le;
|
|
|
|
if (get_user(instr_le, (__le32 __user *)pc))
|
2014-11-18 19:41:22 +08:00
|
|
|
goto exit;
|
2017-06-28 22:55:55 +08:00
|
|
|
instr = le32_to_cpu(instr_le);
|
2014-11-18 19:41:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&undef_lock, flags);
|
|
|
|
list_for_each_entry(hook, &undef_hook, node)
|
|
|
|
if ((instr & hook->instr_mask) == hook->instr_val &&
|
|
|
|
(regs->pstate & hook->pstate_mask) == hook->pstate_val)
|
|
|
|
fn = hook->fn;
|
|
|
|
|
|
|
|
raw_spin_unlock_irqrestore(&undef_lock, flags);
|
|
|
|
exit:
|
|
|
|
return fn ? fn(regs, instr) : 1;
|
|
|
|
}
|
|
|
|
|
2018-02-20 22:16:29 +08:00
|
|
|
void force_signal_inject(int signal, int code, unsigned long address)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2016-06-29 01:07:31 +08:00
|
|
|
const char *desc;
|
2018-02-20 22:16:29 +08:00
|
|
|
struct pt_regs *regs = current_pt_regs();
|
|
|
|
|
2018-08-14 23:24:54 +08:00
|
|
|
if (WARN_ON(!user_mode(regs)))
|
|
|
|
return;
|
|
|
|
|
2016-06-29 01:07:31 +08:00
|
|
|
switch (signal) {
|
|
|
|
case SIGILL:
|
|
|
|
desc = "undefined instruction";
|
|
|
|
break;
|
|
|
|
case SIGSEGV:
|
|
|
|
desc = "illegal memory access";
|
|
|
|
break;
|
|
|
|
default:
|
arm64/sve: Core task context handling
This patch adds the core support for switching and managing the SVE
architectural state of user tasks.
Calls to the existing FPSIMD low-level save/restore functions are
factored out as new functions task_fpsimd_{save,load}(), since SVE
now dynamically may or may not need to be handled at these points
depending on the kernel configuration, hardware features discovered
at boot, and the runtime state of the task. To make these
decisions as fast as possible, const cpucaps are used where
feasible, via the system_supports_sve() helper.
The SVE registers are only tracked for threads that have explicitly
used SVE, indicated by the new thread flag TIF_SVE. Otherwise, the
FPSIMD view of the architectural state is stored in
thread.fpsimd_state as usual.
When in use, the SVE registers are not stored directly in
thread_struct due to their potentially large and variable size.
Because the task_struct slab allocator must be configured very
early during kernel boot, it is also tricky to configure it
correctly to match the maximum vector length provided by the
hardware, since this depends on examining secondary CPUs as well as
the primary. Instead, a pointer sve_state in thread_struct points
to a dynamically allocated buffer containing the SVE register data,
and code is added to allocate and free this buffer at appropriate
times.
TIF_SVE is set when taking an SVE access trap from userspace, if
suitable hardware support has been detected. This enables SVE for
the thread: a subsequent return to userspace will disable the trap
accordingly. If such a trap is taken without sufficient system-
wide hardware support, SIGILL is sent to the thread instead as if
an undefined instruction had been executed: this may happen if
userspace tries to use SVE in a system where not all CPUs support
it for example.
The kernel will clear TIF_SVE and disable SVE for the thread
whenever an explicit syscall is made by userspace. For backwards
compatibility reasons and conformance with the spirit of the base
AArch64 procedure call standard, the subset of the SVE register
state that aliases the FPSIMD registers is still preserved across a
syscall even if this happens. The remainder of the SVE register
state logically becomes zero at syscall entry, though the actual
zeroing work is currently deferred until the thread next tries to
use SVE, causing another trap to the kernel. This implementation
is suboptimal: in the future, the fastpath case may be optimised
to zero the registers in-place and leave SVE enabled for the task,
where beneficial.
TIF_SVE is also cleared in the following slowpath cases, which are
taken as reasonable hints that the task may no longer use SVE:
* exec
* fork and clone
Code is added to sync data between thread.fpsimd_state and
thread.sve_state whenever enabling/disabling SVE, in a manner
consistent with the SVE architectural programmer's model.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Alex Bennée <alex.bennee@linaro.org>
[will: added #include to fix allnoconfig build]
[will: use enable_daif in do_sve_acc]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 23:51:05 +08:00
|
|
|
desc = "unknown or unrecoverable error";
|
2016-06-29 01:07:31 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-02-21 02:08:40 +08:00
|
|
|
/* Force signals we don't understand to SIGKILL */
|
2018-04-16 23:45:01 +08:00
|
|
|
if (WARN_ON(signal != SIGKILL &&
|
2018-02-21 02:08:40 +08:00
|
|
|
siginfo_layout(signal, code) != SIL_FAULT)) {
|
|
|
|
signal = SIGKILL;
|
|
|
|
}
|
|
|
|
|
2018-09-21 23:24:40 +08:00
|
|
|
arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
|
2016-06-29 01:07:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up process info to signal segmentation fault - called on access error.
|
|
|
|
*/
|
2018-02-20 22:16:29 +08:00
|
|
|
void arm64_notify_segfault(unsigned long addr)
|
2016-06-29 01:07:31 +08:00
|
|
|
{
|
|
|
|
int code;
|
|
|
|
|
|
|
|
down_read(¤t->mm->mmap_sem);
|
|
|
|
if (find_vma(current->mm, addr) == NULL)
|
|
|
|
code = SEGV_MAPERR;
|
|
|
|
else
|
|
|
|
code = SEGV_ACCERR;
|
|
|
|
up_read(¤t->mm->mmap_sem);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2018-02-20 22:16:29 +08:00
|
|
|
force_signal_inject(SIGSEGV, code, addr);
|
2016-06-29 01:07:31 +08:00
|
|
|
}
|
|
|
|
|
2019-10-26 00:42:15 +08:00
|
|
|
void do_undefinstr(struct pt_regs *regs)
|
2016-06-29 01:07:31 +08:00
|
|
|
{
|
2012-03-05 19:49:27 +08:00
|
|
|
/* check for AArch32 breakpoint instructions */
|
2013-03-16 16:48:13 +08:00
|
|
|
if (!aarch32_break_handler(regs))
|
2012-03-05 19:49:27 +08:00
|
|
|
return;
|
|
|
|
|
2014-11-18 19:41:22 +08:00
|
|
|
if (call_undef_hook(regs) == 0)
|
|
|
|
return;
|
|
|
|
|
2018-08-07 20:43:06 +08:00
|
|
|
BUG_ON(!user_mode(regs));
|
2018-08-14 23:24:54 +08:00
|
|
|
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
2019-10-26 00:42:10 +08:00
|
|
|
NOKPROBE_SYMBOL(do_undefinstr);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2016-06-29 01:07:32 +08:00
|
|
|
#define __user_cache_maint(insn, address, res) \
|
arm64: traps: fix userspace cache maintenance emulation on a tagged pointer
When we emulate userspace cache maintenance in the kernel, we can
currently send the task a SIGSEGV even though the maintenance was done
on a valid address. This happens if the address has a non-zero address
tag, and happens to not be mapped in.
When we get the address from a user register, we don't currently remove
the address tag before performing cache maintenance on it. If the
maintenance faults, we end up in either __do_page_fault, where find_vma
can't find the VMA if the address has a tag, or in do_translation_fault,
where the tagged address will appear to be above TASK_SIZE. In both
cases, the address is not mapped in, and the task is sent a SIGSEGV.
This patch removes the tag from the address before using it. With this
patch, the fault is handled correctly, the address gets mapped in, and
the cache maintenance succeeds.
As a second bug, if cache maintenance (correctly) fails on an invalid
tagged address, the address gets passed into arm64_notify_segfault,
where find_vma fails to find the VMA due to the tag, and the wrong
si_code may be sent as part of the siginfo_t of the segfault. With this
patch, the correct si_code is sent.
Fixes: 7dd01aef0557 ("arm64: trap userspace "dc cvau" cache operation on errata-affected core")
Cc: <stable@vger.kernel.org> # 4.8.x-
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-05-03 23:37:45 +08:00
|
|
|
if (address >= user_addr_max()) { \
|
2016-10-19 21:40:54 +08:00
|
|
|
res = -EFAULT; \
|
2016-09-02 21:54:03 +08:00
|
|
|
} else { \
|
|
|
|
uaccess_ttbr0_enable(); \
|
2016-10-19 21:40:54 +08:00
|
|
|
asm volatile ( \
|
|
|
|
"1: " insn ", %1\n" \
|
|
|
|
" mov %w0, #0\n" \
|
|
|
|
"2:\n" \
|
|
|
|
" .pushsection .fixup,\"ax\"\n" \
|
|
|
|
" .align 2\n" \
|
|
|
|
"3: mov %w0, %w2\n" \
|
|
|
|
" b 2b\n" \
|
|
|
|
" .popsection\n" \
|
|
|
|
_ASM_EXTABLE(1b, 3b) \
|
|
|
|
: "=r" (res) \
|
2016-09-02 21:54:03 +08:00
|
|
|
: "r" (address), "i" (-EFAULT)); \
|
|
|
|
uaccess_ttbr0_disable(); \
|
|
|
|
}
|
2016-06-29 01:07:32 +08:00
|
|
|
|
2016-09-09 21:07:15 +08:00
|
|
|
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
|
2016-06-29 01:07:32 +08:00
|
|
|
{
|
|
|
|
unsigned long address;
|
2018-09-20 12:06:19 +08:00
|
|
|
int rt = ESR_ELx_SYS64_ISS_RT(esr);
|
2016-09-09 21:07:15 +08:00
|
|
|
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
|
|
|
|
int ret = 0;
|
2016-06-29 01:07:32 +08:00
|
|
|
|
arm64: traps: fix userspace cache maintenance emulation on a tagged pointer
When we emulate userspace cache maintenance in the kernel, we can
currently send the task a SIGSEGV even though the maintenance was done
on a valid address. This happens if the address has a non-zero address
tag, and happens to not be mapped in.
When we get the address from a user register, we don't currently remove
the address tag before performing cache maintenance on it. If the
maintenance faults, we end up in either __do_page_fault, where find_vma
can't find the VMA if the address has a tag, or in do_translation_fault,
where the tagged address will appear to be above TASK_SIZE. In both
cases, the address is not mapped in, and the task is sent a SIGSEGV.
This patch removes the tag from the address before using it. With this
patch, the fault is handled correctly, the address gets mapped in, and
the cache maintenance succeeds.
As a second bug, if cache maintenance (correctly) fails on an invalid
tagged address, the address gets passed into arm64_notify_segfault,
where find_vma fails to find the VMA due to the tag, and the wrong
si_code may be sent as part of the siginfo_t of the segfault. With this
patch, the correct si_code is sent.
Fixes: 7dd01aef0557 ("arm64: trap userspace "dc cvau" cache operation on errata-affected core")
Cc: <stable@vger.kernel.org> # 4.8.x-
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-05-03 23:37:45 +08:00
|
|
|
address = untagged_addr(pt_regs_read_reg(regs, rt));
|
2016-06-29 01:07:32 +08:00
|
|
|
|
2016-09-09 21:07:15 +08:00
|
|
|
switch (crm) {
|
|
|
|
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
|
|
|
|
__user_cache_maint("dc civac", address, ret);
|
|
|
|
break;
|
|
|
|
case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
|
|
|
|
__user_cache_maint("dc civac", address, ret);
|
|
|
|
break;
|
2019-04-09 17:52:42 +08:00
|
|
|
case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
|
|
|
|
__user_cache_maint("sys 3, c7, c13, 1", address, ret);
|
|
|
|
break;
|
2017-07-25 18:55:41 +08:00
|
|
|
case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
|
|
|
|
__user_cache_maint("sys 3, c7, c12, 1", address, ret);
|
|
|
|
break;
|
2016-09-09 21:07:15 +08:00
|
|
|
case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
|
|
|
|
__user_cache_maint("dc civac", address, ret);
|
|
|
|
break;
|
|
|
|
case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
|
|
|
|
__user_cache_maint("ic ivau", address, ret);
|
|
|
|
break;
|
|
|
|
default:
|
2018-02-20 22:16:29 +08:00
|
|
|
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
|
2016-06-29 01:07:32 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
2018-02-20 22:16:29 +08:00
|
|
|
arm64_notify_segfault(address);
|
2016-06-29 01:07:32 +08:00
|
|
|
else
|
2017-10-25 17:04:33 +08:00
|
|
|
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
2016-06-29 01:07:32 +08:00
|
|
|
}
|
|
|
|
|
2016-09-09 21:07:16 +08:00
|
|
|
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
2018-09-20 12:06:19 +08:00
|
|
|
int rt = ESR_ELx_SYS64_ISS_RT(esr);
|
2017-02-09 23:19:19 +08:00
|
|
|
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
|
|
|
|
|
2019-10-18 01:42:59 +08:00
|
|
|
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
|
|
|
|
/* Hide DIC so that we can trap the unnecessary maintenance...*/
|
2019-10-18 01:42:58 +08:00
|
|
|
val &= ~BIT(CTR_DIC_SHIFT);
|
|
|
|
|
2019-10-18 01:42:59 +08:00
|
|
|
/* ... and fake IminLine to reduce the number of traps. */
|
|
|
|
val &= ~CTR_IMINLINE_MASK;
|
|
|
|
val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
|
|
|
|
}
|
|
|
|
|
2017-02-09 23:19:19 +08:00
|
|
|
pt_regs_write_reg(regs, rt, val);
|
2016-09-09 21:07:16 +08:00
|
|
|
|
2017-10-25 17:04:33 +08:00
|
|
|
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
2016-09-09 21:07:16 +08:00
|
|
|
}
|
|
|
|
|
2017-02-01 19:48:58 +08:00
|
|
|
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
2018-09-20 12:06:19 +08:00
|
|
|
int rt = ESR_ELx_SYS64_ISS_RT(esr);
|
2017-02-01 19:48:58 +08:00
|
|
|
|
2019-04-08 23:49:03 +08:00
|
|
|
pt_regs_write_reg(regs, rt, arch_timer_read_counter());
|
2017-10-25 17:04:33 +08:00
|
|
|
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
2017-02-01 19:48:58 +08:00
|
|
|
}
|
|
|
|
|
2017-04-24 16:04:03 +08:00
|
|
|
static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
2018-09-20 12:06:19 +08:00
|
|
|
int rt = ESR_ELx_SYS64_ISS_RT(esr);
|
2017-04-24 16:04:03 +08:00
|
|
|
|
2017-07-22 01:15:27 +08:00
|
|
|
pt_regs_write_reg(regs, rt, arch_timer_get_rate());
|
2017-10-25 17:04:33 +08:00
|
|
|
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
2017-04-24 16:04:03 +08:00
|
|
|
}
|
|
|
|
|
2018-09-20 12:06:21 +08:00
|
|
|
static void mrs_handler(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
u32 sysreg, rt;
|
|
|
|
|
|
|
|
rt = ESR_ELx_SYS64_ISS_RT(esr);
|
|
|
|
sysreg = esr_sys64_to_sysreg(esr);
|
|
|
|
|
|
|
|
if (do_emulate_mrs(regs, sysreg, rt) != 0)
|
|
|
|
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:19:43 +08:00
|
|
|
static void wfi_handler(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
|
|
|
}
|
|
|
|
|
2016-09-09 21:07:15 +08:00
|
|
|
struct sys64_hook {
|
|
|
|
unsigned int esr_mask;
|
|
|
|
unsigned int esr_val;
|
|
|
|
void (*handler)(unsigned int esr, struct pt_regs *regs);
|
|
|
|
};
|
|
|
|
|
2019-08-13 22:16:39 +08:00
|
|
|
static const struct sys64_hook sys64_hooks[] = {
|
2016-09-09 21:07:15 +08:00
|
|
|
{
|
|
|
|
.esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
|
|
|
|
.esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
|
|
|
|
.handler = user_cache_maint_handler,
|
|
|
|
},
|
2016-09-09 21:07:16 +08:00
|
|
|
{
|
|
|
|
/* Trap read access to CTR_EL0 */
|
|
|
|
.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
|
|
|
|
.esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
|
|
|
|
.handler = ctr_read_handler,
|
|
|
|
},
|
2017-02-01 19:48:58 +08:00
|
|
|
{
|
|
|
|
/* Trap read access to CNTVCT_EL0 */
|
|
|
|
.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
|
|
|
|
.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
|
|
|
|
.handler = cntvct_read_handler,
|
|
|
|
},
|
2017-04-24 16:04:03 +08:00
|
|
|
{
|
|
|
|
/* Trap read access to CNTFRQ_EL0 */
|
|
|
|
.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
|
|
|
|
.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
|
|
|
|
.handler = cntfrq_read_handler,
|
|
|
|
},
|
2018-09-20 12:06:21 +08:00
|
|
|
{
|
|
|
|
/* Trap read access to CPUID registers */
|
|
|
|
.esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
|
|
|
|
.esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
|
|
|
|
.handler = mrs_handler,
|
|
|
|
},
|
2018-10-01 19:19:43 +08:00
|
|
|
{
|
|
|
|
/* Trap WFI instructions executed in userspace */
|
|
|
|
.esr_mask = ESR_ELx_WFx_MASK,
|
|
|
|
.esr_val = ESR_ELx_WFx_WFI_VAL,
|
|
|
|
.handler = wfi_handler,
|
|
|
|
},
|
2016-09-09 21:07:15 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2018-09-28 00:15:29 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
2018-09-28 00:15:30 +08:00
|
|
|
#define PSTATE_IT_1_0_SHIFT 25
|
|
|
|
#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
|
|
|
|
#define PSTATE_IT_7_2_SHIFT 10
|
|
|
|
#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
|
|
|
|
|
|
|
|
static u32 compat_get_it_state(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
u32 it, pstate = regs->pstate;
|
|
|
|
|
|
|
|
it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
|
|
|
|
it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
|
|
|
|
|
|
|
|
return it;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void compat_set_it_state(struct pt_regs *regs, u32 it)
|
|
|
|
{
|
|
|
|
u32 pstate_it;
|
|
|
|
|
|
|
|
pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
|
|
|
|
pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
|
|
|
|
|
|
|
|
regs->pstate &= ~PSR_AA32_IT_MASK;
|
|
|
|
regs->pstate |= pstate_it;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int cond;
|
|
|
|
|
|
|
|
/* Only a T32 instruction can trap without CV being set */
|
|
|
|
if (!(esr & ESR_ELx_CV)) {
|
|
|
|
u32 it;
|
|
|
|
|
|
|
|
it = compat_get_it_state(regs);
|
|
|
|
if (!it)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
cond = it >> 4;
|
|
|
|
} else {
|
|
|
|
cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return aarch32_opcode_cond_checks[cond](regs->pstate);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void advance_itstate(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
u32 it;
|
|
|
|
|
|
|
|
/* ARM mode */
|
|
|
|
if (!(regs->pstate & PSR_AA32_T_BIT) ||
|
|
|
|
!(regs->pstate & PSR_AA32_IT_MASK))
|
|
|
|
return;
|
|
|
|
|
|
|
|
it = compat_get_it_state(regs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is the last instruction of the block, wipe the IT
|
|
|
|
* state. Otherwise advance it.
|
|
|
|
*/
|
|
|
|
if (!(it & 7))
|
|
|
|
it = 0;
|
|
|
|
else
|
|
|
|
it = (it & 0xe0) | ((it << 1) & 0x1f);
|
|
|
|
|
|
|
|
compat_set_it_state(regs, it);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
|
|
|
|
unsigned int sz)
|
|
|
|
{
|
|
|
|
advance_itstate(regs);
|
|
|
|
arm64_skip_faulting_instruction(regs, sz);
|
|
|
|
}
|
|
|
|
|
2018-09-28 00:15:33 +08:00
|
|
|
static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
|
|
|
|
|
|
|
|
pt_regs_write_reg(regs, reg, arch_timer_get_rate());
|
|
|
|
arm64_compat_skip_faulting_instruction(regs, 4);
|
|
|
|
}
|
|
|
|
|
2019-08-13 22:16:39 +08:00
|
|
|
static const struct sys64_hook cp15_32_hooks[] = {
|
2018-09-28 00:15:33 +08:00
|
|
|
{
|
|
|
|
.esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
|
|
|
|
.esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
|
|
|
|
.handler = compat_cntfrq_read_handler,
|
|
|
|
},
|
2018-09-28 00:15:31 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2018-09-28 00:15:32 +08:00
|
|
|
static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
|
|
|
|
int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
|
2019-04-08 23:49:03 +08:00
|
|
|
u64 val = arch_timer_read_counter();
|
2018-09-28 00:15:32 +08:00
|
|
|
|
|
|
|
pt_regs_write_reg(regs, rt, lower_32_bits(val));
|
|
|
|
pt_regs_write_reg(regs, rt2, upper_32_bits(val));
|
|
|
|
arm64_compat_skip_faulting_instruction(regs, 4);
|
|
|
|
}
|
|
|
|
|
2019-08-13 22:16:39 +08:00
|
|
|
static const struct sys64_hook cp15_64_hooks[] = {
|
2018-09-28 00:15:32 +08:00
|
|
|
{
|
|
|
|
.esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
|
|
|
|
.esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
|
|
|
|
.handler = compat_cntvct_read_handler,
|
|
|
|
},
|
2018-09-28 00:15:31 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2019-10-26 00:42:15 +08:00
|
|
|
void do_cp15instr(unsigned int esr, struct pt_regs *regs)
|
2018-09-28 00:15:29 +08:00
|
|
|
{
|
2019-08-13 22:16:39 +08:00
|
|
|
const struct sys64_hook *hook, *hook_base;
|
2018-09-28 00:15:31 +08:00
|
|
|
|
2018-09-28 00:15:30 +08:00
|
|
|
if (!cp15_cond_valid(esr, regs)) {
|
|
|
|
/*
|
|
|
|
* There is no T16 variant of a CP access, so we
|
|
|
|
* always advance PC by 4 bytes.
|
|
|
|
*/
|
|
|
|
arm64_compat_skip_faulting_instruction(regs, 4);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-28 00:15:31 +08:00
|
|
|
switch (ESR_ELx_EC(esr)) {
|
|
|
|
case ESR_ELx_EC_CP15_32:
|
|
|
|
hook_base = cp15_32_hooks;
|
|
|
|
break;
|
|
|
|
case ESR_ELx_EC_CP15_64:
|
|
|
|
hook_base = cp15_64_hooks;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
do_undefinstr(regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (hook = hook_base; hook->handler; hook++)
|
|
|
|
if ((hook->esr_mask & esr) == hook->esr_val) {
|
|
|
|
hook->handler(esr, regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-28 00:15:29 +08:00
|
|
|
/*
|
|
|
|
* New cp15 instructions may previously have been undefined at
|
|
|
|
* EL0. Fall back to our usual undefined instruction handler
|
|
|
|
* so that we handle these consistently.
|
|
|
|
*/
|
|
|
|
do_undefinstr(regs);
|
|
|
|
}
|
2019-10-26 00:42:10 +08:00
|
|
|
NOKPROBE_SYMBOL(do_cp15instr);
|
2018-09-28 00:15:29 +08:00
|
|
|
#endif
|
|
|
|
|
2019-10-26 00:42:15 +08:00
|
|
|
void do_sysinstr(unsigned int esr, struct pt_regs *regs)
|
2016-09-09 21:07:15 +08:00
|
|
|
{
|
2019-08-13 22:16:39 +08:00
|
|
|
const struct sys64_hook *hook;
|
2016-09-09 21:07:15 +08:00
|
|
|
|
|
|
|
for (hook = sys64_hooks; hook->handler; hook++)
|
|
|
|
if ((hook->esr_mask & esr) == hook->esr_val) {
|
|
|
|
hook->handler(esr, regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
arm64: handle sys and undef traps consistently
If an EL0 instruction in the SYS class triggers an exception, do_sysintr
looks for a sys64_hook matching the instruction, and if none is found,
injects a SIGILL. This mirrors what we do for undefined instruction
encodings in do_undefinstr, where we look for an undef_hook matching the
instruction, and if none is found, inject a SIGILL.
Over time, new SYS instruction encodings may be allocated. Prior to
allocation, exceptions resulting from these would be handled by
do_undefinstr, whereas after allocation these may be handled by
do_sysintr.
To ensure that we have consistent behaviour if and when this happens, it
would be beneficial to have do_sysinstr fall back to do_undefinstr.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Suzuki Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-01-28 00:15:38 +08:00
|
|
|
/*
|
|
|
|
* New SYS instructions may previously have been undefined at EL0. Fall
|
|
|
|
* back to our usual undefined instruction handler so that we handle
|
|
|
|
* these consistently.
|
|
|
|
*/
|
|
|
|
do_undefinstr(regs);
|
2016-09-09 21:07:15 +08:00
|
|
|
}
|
2019-10-26 00:42:10 +08:00
|
|
|
NOKPROBE_SYMBOL(do_sysinstr);
|
2016-09-09 21:07:15 +08:00
|
|
|
|
2014-11-18 20:16:30 +08:00
|
|
|
static const char *esr_class_str[] = {
|
|
|
|
[0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
|
|
|
|
[ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
|
|
|
|
[ESR_ELx_EC_WFx] = "WFI/WFE",
|
|
|
|
[ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
|
|
|
|
[ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
|
|
|
|
[ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
|
|
|
|
[ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
|
|
|
|
[ESR_ELx_EC_FP_ASIMD] = "ASIMD",
|
|
|
|
[ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
|
2019-07-13 12:40:54 +08:00
|
|
|
[ESR_ELx_EC_PAC] = "PAC",
|
2014-11-18 20:16:30 +08:00
|
|
|
[ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
|
|
|
|
[ESR_ELx_EC_ILL] = "PSTATE.IL",
|
|
|
|
[ESR_ELx_EC_SVC32] = "SVC (AArch32)",
|
|
|
|
[ESR_ELx_EC_HVC32] = "HVC (AArch32)",
|
|
|
|
[ESR_ELx_EC_SMC32] = "SMC (AArch32)",
|
|
|
|
[ESR_ELx_EC_SVC64] = "SVC (AArch64)",
|
|
|
|
[ESR_ELx_EC_HVC64] = "HVC (AArch64)",
|
|
|
|
[ESR_ELx_EC_SMC64] = "SMC (AArch64)",
|
|
|
|
[ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
|
2017-10-31 23:51:00 +08:00
|
|
|
[ESR_ELx_EC_SVE] = "SVE",
|
2019-07-16 15:14:19 +08:00
|
|
|
[ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
|
2014-11-18 20:16:30 +08:00
|
|
|
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
|
|
|
|
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
|
|
|
|
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
|
|
|
|
[ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
|
|
|
|
[ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
|
|
|
|
[ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
|
|
|
|
[ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
|
|
|
|
[ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
|
|
|
|
[ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
|
|
|
|
[ESR_ELx_EC_SERROR] = "SError",
|
|
|
|
[ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
|
|
|
|
[ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
|
|
|
|
[ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
|
|
|
|
[ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
|
|
|
|
[ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
|
|
|
|
[ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
|
|
|
|
[ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
|
|
|
|
[ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
|
|
|
|
[ESR_ELx_EC_BRK64] = "BRK (AArch64)",
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *esr_get_class_string(u32 esr)
|
|
|
|
{
|
2016-05-31 19:33:01 +08:00
|
|
|
return esr_class_str[ESR_ELx_EC(esr)];
|
2014-11-18 20:16:30 +08:00
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
arm64: avoid returning from bad_mode
Generally, taking an unexpected exception should be a fatal event, and
bad_mode is intended to cater for this. However, it should be possible
to contain unexpected synchronous exceptions from EL0 without bringing
the kernel down, by sending a SIGILL to the task.
We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
don't kill the kernel on a bad esr from el0"), by sending a signal for
any bad_mode call resulting from an EL0 exception.
However, this also applies to other unexpected exceptions, such as
SError and FIQ. The entry paths for these exceptions branch to bad_mode
without configuring the link register, and have no kernel_exit. Thus, if
we take one of these exceptions from EL0, bad_mode will eventually
return to the original user link register value.
This patch fixes this by introducing a new bad_el0_sync handler to cater
for the recoverable case, and restoring bad_mode to its original state,
whereby it calls panic() and never returns. The recoverable case
branches to bad_el0_sync with a bl, and returns to userspace via the
usual ret_to_user mechanism.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
Reported-by: Mark Salter <msalter@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-01-19 01:23:41 +08:00
|
|
|
* bad_mode handles the impossible case in the exception vector. This is always
|
|
|
|
* fatal.
|
2012-03-05 19:49:27 +08:00
|
|
|
*/
|
|
|
|
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
|
|
|
|
{
|
|
|
|
console_verbose();
|
|
|
|
|
2016-05-31 19:07:47 +08:00
|
|
|
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
|
|
|
|
handler[reason], smp_processor_id(), esr,
|
|
|
|
esr_get_class_string(esr));
|
arm64: avoid returning from bad_mode
Generally, taking an unexpected exception should be a fatal event, and
bad_mode is intended to cater for this. However, it should be possible
to contain unexpected synchronous exceptions from EL0 without bringing
the kernel down, by sending a SIGILL to the task.
We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
don't kill the kernel on a bad esr from el0"), by sending a signal for
any bad_mode call resulting from an EL0 exception.
However, this also applies to other unexpected exceptions, such as
SError and FIQ. The entry paths for these exceptions branch to bad_mode
without configuring the link register, and have no kernel_exit. Thus, if
we take one of these exceptions from EL0, bad_mode will eventually
return to the original user link register value.
This patch fixes this by introducing a new bad_el0_sync handler to cater
for the recoverable case, and restoring bad_mode to its original state,
whereby it calls panic() and never returns. The recoverable case
branches to bad_el0_sync with a bl, and returns to userspace via the
usual ret_to_user mechanism.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
Reported-by: Mark Salter <msalter@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-01-19 01:23:41 +08:00
|
|
|
|
2017-11-02 20:12:34 +08:00
|
|
|
local_daif_mask();
|
arm64: avoid returning from bad_mode
Generally, taking an unexpected exception should be a fatal event, and
bad_mode is intended to cater for this. However, it should be possible
to contain unexpected synchronous exceptions from EL0 without bringing
the kernel down, by sending a SIGILL to the task.
We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
don't kill the kernel on a bad esr from el0"), by sending a signal for
any bad_mode call resulting from an EL0 exception.
However, this also applies to other unexpected exceptions, such as
SError and FIQ. The entry paths for these exceptions branch to bad_mode
without configuring the link register, and have no kernel_exit. Thus, if
we take one of these exceptions from EL0, bad_mode will eventually
return to the original user link register value.
This patch fixes this by introducing a new bad_el0_sync handler to cater
for the recoverable case, and restoring bad_mode to its original state,
whereby it calls panic() and never returns. The recoverable case
branches to bad_el0_sync with a bl, and returns to userspace via the
usual ret_to_user mechanism.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
Reported-by: Mark Salter <msalter@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-01-19 01:23:41 +08:00
|
|
|
panic("bad mode");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
|
|
|
|
* exceptions taken from EL0. Unlike bad_mode, this returns.
|
|
|
|
*/
|
2019-10-26 00:42:15 +08:00
|
|
|
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
|
arm64: avoid returning from bad_mode
Generally, taking an unexpected exception should be a fatal event, and
bad_mode is intended to cater for this. However, it should be possible
to contain unexpected synchronous exceptions from EL0 without bringing
the kernel down, by sending a SIGILL to the task.
We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
don't kill the kernel on a bad esr from el0"), by sending a signal for
any bad_mode call resulting from an EL0 exception.
However, this also applies to other unexpected exceptions, such as
SError and FIQ. The entry paths for these exceptions branch to bad_mode
without configuring the link register, and have no kernel_exit. Thus, if
we take one of these exceptions from EL0, bad_mode will eventually
return to the original user link register value.
This patch fixes this by introducing a new bad_el0_sync handler to cater
for the recoverable case, and restoring bad_mode to its original state,
whereby it calls panic() and never returns. The recoverable case
branches to bad_el0_sync with a bl, and returns to userspace via the
usual ret_to_user mechanism.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
Reported-by: Mark Salter <msalter@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-01-19 01:23:41 +08:00
|
|
|
{
|
|
|
|
void __user *pc = (void __user *)instruction_pointer(regs);
|
2013-05-28 22:54:15 +08:00
|
|
|
|
arm64: avoid returning from bad_mode
Generally, taking an unexpected exception should be a fatal event, and
bad_mode is intended to cater for this. However, it should be possible
to contain unexpected synchronous exceptions from EL0 without bringing
the kernel down, by sending a SIGILL to the task.
We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
don't kill the kernel on a bad esr from el0"), by sending a signal for
any bad_mode call resulting from an EL0 exception.
However, this also applies to other unexpected exceptions, such as
SError and FIQ. The entry paths for these exceptions branch to bad_mode
without configuring the link register, and have no kernel_exit. Thus, if
we take one of these exceptions from EL0, bad_mode will eventually
return to the original user link register value.
This patch fixes this by introducing a new bad_el0_sync handler to cater
for the recoverable case, and restoring bad_mode to its original state,
whereby it calls panic() and never returns. The recoverable case
branches to bad_el0_sync with a bl, and returns to userspace via the
usual ret_to_user mechanism.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
Reported-by: Mark Salter <msalter@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-01-19 01:23:41 +08:00
|
|
|
current->thread.fault_address = 0;
|
2018-02-20 23:18:13 +08:00
|
|
|
current->thread.fault_code = esr;
|
arm64: avoid returning from bad_mode
Generally, taking an unexpected exception should be a fatal event, and
bad_mode is intended to cater for this. However, it should be possible
to contain unexpected synchronous exceptions from EL0 without bringing
the kernel down, by sending a SIGILL to the task.
We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
don't kill the kernel on a bad esr from el0"), by sending a signal for
any bad_mode call resulting from an EL0 exception.
However, this also applies to other unexpected exceptions, such as
SError and FIQ. The entry paths for these exceptions branch to bad_mode
without configuring the link register, and have no kernel_exit. Thus, if
we take one of these exceptions from EL0, bad_mode will eventually
return to the original user link register value.
This patch fixes this by introducing a new bad_el0_sync handler to cater
for the recoverable case, and restoring bad_mode to its original state,
whereby it calls panic() and never returns. The recoverable case
branches to bad_el0_sync with a bl, and returns to userspace via the
usual ret_to_user mechanism.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
Reported-by: Mark Salter <msalter@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-01-19 01:23:41 +08:00
|
|
|
|
2018-09-22 16:26:57 +08:00
|
|
|
arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
|
|
|
|
"Bad EL0 synchronous exception");
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
arm64: add VMAP_STACK overflow detection
This patch adds stack overflow detection to arm64, usable when vmap'd stacks
are in use.
Overflow is detected in a small preamble executed for each exception entry,
which checks whether there is enough space on the current stack for the general
purpose registers to be saved. If there is not enough space, the overflow
handler is invoked on a per-cpu overflow stack. This approach preserves the
original exception information in ESR_EL1 (and where appropriate, FAR_EL1).
Task and IRQ stacks are aligned to double their size, enabling overflow to be
detected with a single bit test. For example, a 16K stack is aligned to 32K,
ensuring that bit 14 of the SP must be zero. On an overflow (or underflow),
this bit is flipped. Thus, overflow (of less than the size of the stack) can be
detected by testing whether this bit is set.
The overflow check is performed before any attempt is made to access the
stack, avoiding recursive faults (and the loss of exception information
these would entail). As logical operations cannot be performed on the SP
directly, the SP is temporarily swapped with a general purpose register
using arithmetic operations to enable the test to be performed.
This gives us a useful error message on stack overflow, as can be trigger with
the LKDTM overflow test:
[ 305.388749] lkdtm: Performing direct entry OVERFLOW
[ 305.395444] Insufficient stack space to handle exception!
[ 305.395482] ESR: 0x96000047 -- DABT (current EL)
[ 305.399890] FAR: 0xffff00000a5e7f30
[ 305.401315] Task stack: [0xffff00000a5e8000..0xffff00000a5ec000]
[ 305.403815] IRQ stack: [0xffff000008000000..0xffff000008004000]
[ 305.407035] Overflow stack: [0xffff80003efce4e0..0xffff80003efcf4e0]
[ 305.409622] CPU: 0 PID: 1219 Comm: sh Not tainted 4.13.0-rc3-00021-g9636aea #5
[ 305.412785] Hardware name: linux,dummy-virt (DT)
[ 305.415756] task: ffff80003d051c00 task.stack: ffff00000a5e8000
[ 305.419221] PC is at recursive_loop+0x10/0x48
[ 305.421637] LR is at recursive_loop+0x38/0x48
[ 305.423768] pc : [<ffff00000859f330>] lr : [<ffff00000859f358>] pstate: 40000145
[ 305.428020] sp : ffff00000a5e7f50
[ 305.430469] x29: ffff00000a5e8350 x28: ffff80003d051c00
[ 305.433191] x27: ffff000008981000 x26: ffff000008f80400
[ 305.439012] x25: ffff00000a5ebeb8 x24: ffff00000a5ebeb8
[ 305.440369] x23: ffff000008f80138 x22: 0000000000000009
[ 305.442241] x21: ffff80003ce65000 x20: ffff000008f80188
[ 305.444552] x19: 0000000000000013 x18: 0000000000000006
[ 305.446032] x17: 0000ffffa2601280 x16: ffff0000081fe0b8
[ 305.448252] x15: ffff000008ff546d x14: 000000000047a4c8
[ 305.450246] x13: ffff000008ff7872 x12: 0000000005f5e0ff
[ 305.452953] x11: ffff000008ed2548 x10: 000000000005ee8d
[ 305.454824] x9 : ffff000008545380 x8 : ffff00000a5e8770
[ 305.457105] x7 : 1313131313131313 x6 : 00000000000000e1
[ 305.459285] x5 : 0000000000000000 x4 : 0000000000000000
[ 305.461781] x3 : 0000000000000000 x2 : 0000000000000400
[ 305.465119] x1 : 0000000000000013 x0 : 0000000000000012
[ 305.467724] Kernel panic - not syncing: kernel stack overflow
[ 305.470561] CPU: 0 PID: 1219 Comm: sh Not tainted 4.13.0-rc3-00021-g9636aea #5
[ 305.473325] Hardware name: linux,dummy-virt (DT)
[ 305.475070] Call trace:
[ 305.476116] [<ffff000008088ad8>] dump_backtrace+0x0/0x378
[ 305.478991] [<ffff000008088e64>] show_stack+0x14/0x20
[ 305.481237] [<ffff00000895a178>] dump_stack+0x98/0xb8
[ 305.483294] [<ffff0000080c3288>] panic+0x118/0x280
[ 305.485673] [<ffff0000080c2e9c>] nmi_panic+0x6c/0x70
[ 305.486216] [<ffff000008089710>] handle_bad_stack+0x118/0x128
[ 305.486612] Exception stack(0xffff80003efcf3a0 to 0xffff80003efcf4e0)
[ 305.487334] f3a0: 0000000000000012 0000000000000013 0000000000000400 0000000000000000
[ 305.488025] f3c0: 0000000000000000 0000000000000000 00000000000000e1 1313131313131313
[ 305.488908] f3e0: ffff00000a5e8770 ffff000008545380 000000000005ee8d ffff000008ed2548
[ 305.489403] f400: 0000000005f5e0ff ffff000008ff7872 000000000047a4c8 ffff000008ff546d
[ 305.489759] f420: ffff0000081fe0b8 0000ffffa2601280 0000000000000006 0000000000000013
[ 305.490256] f440: ffff000008f80188 ffff80003ce65000 0000000000000009 ffff000008f80138
[ 305.490683] f460: ffff00000a5ebeb8 ffff00000a5ebeb8 ffff000008f80400 ffff000008981000
[ 305.491051] f480: ffff80003d051c00 ffff00000a5e8350 ffff00000859f358 ffff00000a5e7f50
[ 305.491444] f4a0: ffff00000859f330 0000000040000145 0000000000000000 0000000000000000
[ 305.492008] f4c0: 0001000000000000 0000000000000000 ffff00000a5e8350 ffff00000859f330
[ 305.493063] [<ffff00000808205c>] __bad_stack+0x88/0x8c
[ 305.493396] [<ffff00000859f330>] recursive_loop+0x10/0x48
[ 305.493731] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494088] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494425] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494649] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494898] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495205] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495453] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495708] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496000] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496302] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496644] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496894] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.497138] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.497325] [<ffff00000859f3dc>] lkdtm_OVERFLOW+0x14/0x20
[ 305.497506] [<ffff00000859f314>] lkdtm_do_action+0x1c/0x28
[ 305.497786] [<ffff00000859f178>] direct_entry+0xe0/0x170
[ 305.498095] [<ffff000008345568>] full_proxy_write+0x60/0xa8
[ 305.498387] [<ffff0000081fb7f4>] __vfs_write+0x1c/0x128
[ 305.498679] [<ffff0000081fcc68>] vfs_write+0xa0/0x1b0
[ 305.498926] [<ffff0000081fe0fc>] SyS_write+0x44/0xa0
[ 305.499182] Exception stack(0xffff00000a5ebec0 to 0xffff00000a5ec000)
[ 305.499429] bec0: 0000000000000001 000000001c4cf5e0 0000000000000009 000000001c4cf5e0
[ 305.499674] bee0: 574f4c465245564f 0000000000000000 0000000000000000 8000000080808080
[ 305.499904] bf00: 0000000000000040 0000000000000038 fefefeff1b4bc2ff 7f7f7f7f7f7fff7f
[ 305.500189] bf20: 0101010101010101 0000000000000000 000000000047a4c8 0000000000000038
[ 305.500712] bf40: 0000000000000000 0000ffffa2601280 0000ffffc63f6068 00000000004b5000
[ 305.501241] bf60: 0000000000000001 000000001c4cf5e0 0000000000000009 000000001c4cf5e0
[ 305.501791] bf80: 0000000000000020 0000000000000000 00000000004b5000 000000001c4cc458
[ 305.502314] bfa0: 0000000000000000 0000ffffc63f7950 000000000040a3c4 0000ffffc63f70e0
[ 305.502762] bfc0: 0000ffffa2601268 0000000080000000 0000000000000001 0000000000000040
[ 305.503207] bfe0: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
[ 305.503680] [<ffff000008082fb0>] el0_svc_naked+0x24/0x28
[ 305.504720] Kernel Offset: disabled
[ 305.505189] CPU features: 0x002082
[ 305.505473] Memory Limit: none
[ 305.506181] ---[ end Kernel panic - not syncing: kernel stack overflow
This patch was co-authored by Ard Biesheuvel and Mark Rutland.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
2017-07-15 03:30:35 +08:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
|
|
|
|
__aligned(16);
|
|
|
|
|
|
|
|
asmlinkage void handle_bad_stack(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long tsk_stk = (unsigned long)current->stack;
|
|
|
|
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
|
|
|
|
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
|
|
|
|
unsigned int esr = read_sysreg(esr_el1);
|
|
|
|
unsigned long far = read_sysreg(far_el1);
|
|
|
|
|
|
|
|
console_verbose();
|
|
|
|
pr_emerg("Insufficient stack space to handle exception!");
|
|
|
|
|
|
|
|
pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
|
|
|
|
pr_emerg("FAR: 0x%016lx\n", far);
|
|
|
|
|
|
|
|
pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
|
|
|
|
tsk_stk, tsk_stk + THREAD_SIZE);
|
|
|
|
pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
|
|
|
|
irq_stk, irq_stk + THREAD_SIZE);
|
|
|
|
pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
|
|
|
|
ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
|
|
|
|
|
|
|
|
__show_regs(regs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use nmi_panic to limit the potential for recusive overflows, and
|
|
|
|
* to get a better stack trace.
|
|
|
|
*/
|
|
|
|
nmi_panic(NULL, "kernel stack overflow");
|
|
|
|
cpu_park_loop();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-01-16 03:38:57 +08:00
|
|
|
void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
|
2017-11-02 20:12:42 +08:00
|
|
|
{
|
|
|
|
console_verbose();
|
|
|
|
|
|
|
|
pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
|
|
|
|
smp_processor_id(), esr, esr_get_class_string(esr));
|
2018-01-16 03:38:57 +08:00
|
|
|
if (regs)
|
|
|
|
__show_regs(regs);
|
|
|
|
|
|
|
|
nmi_panic(regs, "Asynchronous SError Interrupt");
|
|
|
|
|
|
|
|
cpu_park_loop();
|
|
|
|
unreachable();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
|
|
|
|
{
|
|
|
|
u32 aet = arm64_ras_serror_get_severity(esr);
|
|
|
|
|
|
|
|
switch (aet) {
|
|
|
|
case ESR_ELx_AET_CE: /* corrected error */
|
|
|
|
case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
|
|
|
|
/*
|
|
|
|
* The CPU can make progress. We may take UEO again as
|
|
|
|
* a more severe error.
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
|
|
|
|
case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
|
|
|
|
case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
|
|
|
|
/*
|
|
|
|
* The CPU can't make progress. The exception may have
|
|
|
|
* been imprecise.
|
2019-06-18 23:17:38 +08:00
|
|
|
*
|
|
|
|
* Neoverse-N1 #1349291 means a non-KVM SError reported as
|
|
|
|
* Unrecoverable should be treated as Uncontainable. We
|
|
|
|
* call arm64_serror_panic() in both cases.
|
2018-01-16 03:38:57 +08:00
|
|
|
*/
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
|
|
|
|
default:
|
|
|
|
/* Error has been silently propagated */
|
|
|
|
arm64_serror_panic(regs, esr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
|
|
|
|
{
|
2019-01-31 22:59:00 +08:00
|
|
|
const bool was_in_nmi = in_nmi();
|
|
|
|
|
|
|
|
if (!was_in_nmi)
|
|
|
|
nmi_enter();
|
2018-01-16 03:38:57 +08:00
|
|
|
|
|
|
|
/* non-RAS errors are not containable */
|
|
|
|
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
|
|
|
|
arm64_serror_panic(regs, esr);
|
2017-11-02 20:12:42 +08:00
|
|
|
|
2019-01-31 22:59:00 +08:00
|
|
|
if (!was_in_nmi)
|
|
|
|
nmi_exit();
|
2017-11-02 20:12:42 +08:00
|
|
|
}
|
|
|
|
|
2019-08-21 01:45:57 +08:00
|
|
|
asmlinkage void enter_from_user_mode(void)
|
|
|
|
{
|
|
|
|
CT_WARN_ON(ct_state() != CONTEXT_USER);
|
|
|
|
user_exit_irqoff();
|
|
|
|
}
|
|
|
|
NOKPROBE_SYMBOL(enter_from_user_mode);
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
void __pte_error(const char *file, int line, unsigned long val)
|
|
|
|
{
|
2015-12-22 00:44:27 +08:00
|
|
|
pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __pmd_error(const char *file, int line, unsigned long val)
|
|
|
|
{
|
2015-12-22 00:44:27 +08:00
|
|
|
pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2014-05-12 17:40:51 +08:00
|
|
|
void __pud_error(const char *file, int line, unsigned long val)
|
|
|
|
{
|
2015-12-22 00:44:27 +08:00
|
|
|
pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
|
2014-05-12 17:40:51 +08:00
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
void __pgd_error(const char *file, int line, unsigned long val)
|
|
|
|
{
|
2015-12-22 00:44:27 +08:00
|
|
|
pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2015-07-24 23:37:48 +08:00
|
|
|
/* GENERIC_BUG traps */
|
|
|
|
|
|
|
|
int is_valid_bugaddr(unsigned long addr)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* bug_handler() only called for BRK #BUG_BRK_IMM.
|
|
|
|
* So the answer is trivial -- any spurious instances with no
|
|
|
|
* bug table entry will be rejected by report_bug() and passed
|
|
|
|
* back to the debug-monitors code and handled as a fatal
|
|
|
|
* unexpected debug exception.
|
|
|
|
*/
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bug_handler(struct pt_regs *regs, unsigned int esr)
|
|
|
|
{
|
|
|
|
switch (report_bug(regs->pc, regs)) {
|
|
|
|
case BUG_TRAP_TYPE_BUG:
|
|
|
|
die("Oops - BUG", regs, 0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BUG_TRAP_TYPE_WARN:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* unknown/unrecognised bug trap type */
|
|
|
|
return DBG_HOOK_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If thread survives, skip over the BUG instruction and continue: */
|
2017-10-25 17:04:33 +08:00
|
|
|
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
2015-07-24 23:37:48 +08:00
|
|
|
return DBG_HOOK_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct break_hook bug_break_hook = {
|
|
|
|
.fn = bug_handler,
|
2019-02-26 20:52:47 +08:00
|
|
|
.imm = BUG_BRK_IMM,
|
2015-07-24 23:37:48 +08:00
|
|
|
};
|
|
|
|
|
2018-12-28 16:30:54 +08:00
|
|
|
#ifdef CONFIG_KASAN_SW_TAGS
|
|
|
|
|
|
|
|
#define KASAN_ESR_RECOVER 0x20
|
|
|
|
#define KASAN_ESR_WRITE 0x10
|
|
|
|
#define KASAN_ESR_SIZE_MASK 0x0f
|
|
|
|
#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
|
|
|
|
|
|
|
|
static int kasan_handler(struct pt_regs *regs, unsigned int esr)
|
|
|
|
{
|
|
|
|
bool recover = esr & KASAN_ESR_RECOVER;
|
|
|
|
bool write = esr & KASAN_ESR_WRITE;
|
|
|
|
size_t size = KASAN_ESR_SIZE(esr);
|
|
|
|
u64 addr = regs->regs[0];
|
|
|
|
u64 pc = regs->pc;
|
|
|
|
|
|
|
|
kasan_report(addr, size, write, pc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The instrumentation allows to control whether we can proceed after
|
|
|
|
* a crash was detected. This is done by passing the -recover flag to
|
|
|
|
* the compiler. Disabling recovery allows to generate more compact
|
|
|
|
* code.
|
|
|
|
*
|
|
|
|
* Unfortunately disabling recovery doesn't work for the kernel right
|
|
|
|
* now. KASAN reporting is disabled in some contexts (for example when
|
|
|
|
* the allocator accesses slab object metadata; this is controlled by
|
|
|
|
* current->kasan_depth). All these accesses are detected by the tool,
|
|
|
|
* even though the reports for them are not printed.
|
|
|
|
*
|
|
|
|
* This is something that might be fixed at some point in the future.
|
|
|
|
*/
|
|
|
|
if (!recover)
|
|
|
|
die("Oops - KASAN", regs, 0);
|
|
|
|
|
|
|
|
/* If thread survives, skip over the brk instruction and continue: */
|
|
|
|
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
|
|
|
return DBG_HOOK_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct break_hook kasan_break_hook = {
|
2019-02-26 20:52:47 +08:00
|
|
|
.fn = kasan_handler,
|
|
|
|
.imm = KASAN_BRK_IMM,
|
|
|
|
.mask = KASAN_BRK_MASK,
|
2018-12-28 16:30:54 +08:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2015-07-24 23:37:48 +08:00
|
|
|
/*
|
|
|
|
* Initial handler for AArch64 BRK exceptions
|
|
|
|
* This handler only used until debug_traps_init().
|
|
|
|
*/
|
|
|
|
int __init early_brk64(unsigned long addr, unsigned int esr,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
2018-12-28 16:30:54 +08:00
|
|
|
#ifdef CONFIG_KASAN_SW_TAGS
|
2019-02-26 23:06:42 +08:00
|
|
|
unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
|
2019-02-26 20:52:47 +08:00
|
|
|
|
|
|
|
if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
|
2018-12-28 16:30:54 +08:00
|
|
|
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
|
|
|
|
#endif
|
2015-07-24 23:37:48 +08:00
|
|
|
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This registration must happen early, before debug_traps_init(). */
|
2012-03-05 19:49:27 +08:00
|
|
|
void __init trap_init(void)
|
|
|
|
{
|
2019-02-26 20:52:47 +08:00
|
|
|
register_kernel_break_hook(&bug_break_hook);
|
2018-12-28 16:30:54 +08:00
|
|
|
#ifdef CONFIG_KASAN_SW_TAGS
|
2019-02-26 20:52:47 +08:00
|
|
|
register_kernel_break_hook(&kasan_break_hook);
|
2018-12-28 16:30:54 +08:00
|
|
|
#endif
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|