2019-06-03 13:44:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-05 19:49:33 +08:00
|
|
|
/*
|
|
|
|
* ARMv8 single-step debug support and mdscr context switching.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Limited
|
|
|
|
*
|
|
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/ptrace.h>
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-09 00:35:48 +08:00
|
|
|
#include <linux/kprobes.h>
|
2012-03-05 19:49:33 +08:00
|
|
|
#include <linux/stat.h>
|
2013-03-16 16:48:13 +08:00
|
|
|
#include <linux/uaccess.h>
|
2017-02-09 01:51:37 +08:00
|
|
|
#include <linux/sched/task_stack.h>
|
2012-03-05 19:49:33 +08:00
|
|
|
|
2015-10-19 21:24:54 +08:00
|
|
|
#include <asm/cpufeature.h>
|
2012-03-05 19:49:33 +08:00
|
|
|
#include <asm/cputype.h>
|
2017-11-02 20:12:35 +08:00
|
|
|
#include <asm/daifflags.h>
|
2015-10-19 21:24:54 +08:00
|
|
|
#include <asm/debug-monitors.h>
|
2012-03-05 19:49:33 +08:00
|
|
|
#include <asm/system_misc.h>
|
2018-02-20 23:18:13 +08:00
|
|
|
#include <asm/traps.h>
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
/* Determine debug architecture. */
|
|
|
|
u8 debug_monitors_arch(void)
|
|
|
|
{
|
2017-03-23 23:14:39 +08:00
|
|
|
return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
|
2015-10-19 21:24:54 +08:00
|
|
|
ID_AA64DFR0_DEBUGVER_SHIFT);
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MDSCR access routines.
|
|
|
|
*/
|
|
|
|
static void mdscr_write(u32 mdscr)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2017-11-02 20:12:35 +08:00
|
|
|
flags = local_daif_save();
|
2016-09-08 20:55:38 +08:00
|
|
|
write_sysreg(mdscr, mdscr_el1);
|
2017-11-02 20:12:35 +08:00
|
|
|
local_daif_restore(flags);
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(mdscr_write);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
static u32 mdscr_read(void)
|
|
|
|
{
|
2016-09-08 20:55:38 +08:00
|
|
|
return read_sysreg(mdscr_el1);
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(mdscr_read);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow root to disable self-hosted debug from userspace.
|
|
|
|
* This is useful if you want to connect an external JTAG debugger.
|
|
|
|
*/
|
2015-09-27 06:04:07 +08:00
|
|
|
static bool debug_enabled = true;
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
static int create_debug_debugfs_entry(void)
|
|
|
|
{
|
|
|
|
debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
fs_initcall(create_debug_debugfs_entry);
|
|
|
|
|
|
|
|
static int __init early_debug_disable(char *buf)
|
|
|
|
{
|
2015-09-27 06:04:07 +08:00
|
|
|
debug_enabled = false;
|
2012-03-05 19:49:33 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
early_param("nodebugmon", early_debug_disable);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Keep track of debug users on each core.
|
|
|
|
* The ref counts are per-cpu so we use a local_t type.
|
|
|
|
*/
|
2013-10-21 20:17:08 +08:00
|
|
|
static DEFINE_PER_CPU(int, mde_ref_count);
|
|
|
|
static DEFINE_PER_CPU(int, kde_ref_count);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
2015-07-28 01:36:54 +08:00
|
|
|
void enable_debug_monitors(enum dbg_active_el el)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
|
|
|
u32 mdscr, enable = 0;
|
|
|
|
|
|
|
|
WARN_ON(preemptible());
|
|
|
|
|
2013-10-21 20:17:08 +08:00
|
|
|
if (this_cpu_inc_return(mde_ref_count) == 1)
|
2012-03-05 19:49:33 +08:00
|
|
|
enable = DBG_MDSCR_MDE;
|
|
|
|
|
|
|
|
if (el == DBG_ACTIVE_EL1 &&
|
2013-10-21 20:17:08 +08:00
|
|
|
this_cpu_inc_return(kde_ref_count) == 1)
|
2012-03-05 19:49:33 +08:00
|
|
|
enable |= DBG_MDSCR_KDE;
|
|
|
|
|
|
|
|
if (enable && debug_enabled) {
|
|
|
|
mdscr = mdscr_read();
|
|
|
|
mdscr |= enable;
|
|
|
|
mdscr_write(mdscr);
|
|
|
|
}
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(enable_debug_monitors);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
2015-07-28 01:36:54 +08:00
|
|
|
void disable_debug_monitors(enum dbg_active_el el)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
|
|
|
u32 mdscr, disable = 0;
|
|
|
|
|
|
|
|
WARN_ON(preemptible());
|
|
|
|
|
2013-10-21 20:17:08 +08:00
|
|
|
if (this_cpu_dec_return(mde_ref_count) == 0)
|
2012-03-05 19:49:33 +08:00
|
|
|
disable = ~DBG_MDSCR_MDE;
|
|
|
|
|
|
|
|
if (el == DBG_ACTIVE_EL1 &&
|
2013-10-21 20:17:08 +08:00
|
|
|
this_cpu_dec_return(kde_ref_count) == 0)
|
2012-03-05 19:49:33 +08:00
|
|
|
disable &= ~DBG_MDSCR_KDE;
|
|
|
|
|
|
|
|
if (disable) {
|
|
|
|
mdscr = mdscr_read();
|
|
|
|
mdscr &= disable;
|
|
|
|
mdscr_write(mdscr);
|
|
|
|
}
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(disable_debug_monitors);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* OS lock clearing.
|
|
|
|
*/
|
2016-08-16 18:29:17 +08:00
|
|
|
static int clear_os_lock(unsigned int cpu)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
2019-04-09 01:17:18 +08:00
|
|
|
write_sysreg(0, osdlr_el1);
|
2016-09-08 20:55:38 +08:00
|
|
|
write_sysreg(0, oslar_el1);
|
2016-08-16 18:29:17 +08:00
|
|
|
isb();
|
|
|
|
return 0;
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
|
|
|
|
2020-05-31 19:00:15 +08:00
|
|
|
static int __init debug_monitors_init(void)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
2016-08-16 18:29:17 +08:00
|
|
|
return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
|
2016-12-22 03:19:54 +08:00
|
|
|
"arm64/debug_monitors:starting",
|
2016-08-16 18:29:17 +08:00
|
|
|
clear_os_lock, NULL);
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
|
|
|
postcore_initcall(debug_monitors_init);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Single step API and exception handling.
|
|
|
|
*/
|
arm64: ptrace: Override SPSR.SS when single-stepping is enabled
Luis reports that, when reverse debugging with GDB, single-step does not
function as expected on arm64:
| I've noticed, under very specific conditions, that a PTRACE_SINGLESTEP
| request by GDB won't execute the underlying instruction. As a consequence,
| the PC doesn't move, but we return a SIGTRAP just like we would for a
| regular successful PTRACE_SINGLESTEP request.
The underlying problem is that when the CPU register state is restored
as part of a reverse step, the SPSR.SS bit is cleared and so the hardware
single-step state can transition to the "active-pending" state, causing
an unexpected step exception to be taken immediately if a step operation
is attempted.
In hindsight, we probably shouldn't have exposed SPSR.SS in the pstate
accessible by the GPR regset, but it's a bit late for that now. Instead,
simply prevent userspace from configuring the bit to a value which is
inconsistent with the TIF_SINGLESTEP state for the task being traced.
Cc: <stable@vger.kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Keno Fischer <keno@juliacomputing.com>
Link: https://lore.kernel.org/r/1eed6d69-d53d-9657-1fc9-c089be07f98c@linaro.org
Reported-by: Luis Machado <luis.machado@linaro.org>
Tested-by: Luis Machado <luis.machado@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
2020-02-13 20:06:26 +08:00
|
|
|
static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
2016-07-19 22:07:38 +08:00
|
|
|
regs->pstate |= DBG_SPSR_SS;
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
arm64: ptrace: Override SPSR.SS when single-stepping is enabled
Luis reports that, when reverse debugging with GDB, single-step does not
function as expected on arm64:
| I've noticed, under very specific conditions, that a PTRACE_SINGLESTEP
| request by GDB won't execute the underlying instruction. As a consequence,
| the PC doesn't move, but we return a SIGTRAP just like we would for a
| regular successful PTRACE_SINGLESTEP request.
The underlying problem is that when the CPU register state is restored
as part of a reverse step, the SPSR.SS bit is cleared and so the hardware
single-step state can transition to the "active-pending" state, causing
an unexpected step exception to be taken immediately if a step operation
is attempted.
In hindsight, we probably shouldn't have exposed SPSR.SS in the pstate
accessible by the GPR regset, but it's a bit late for that now. Instead,
simply prevent userspace from configuring the bit to a value which is
inconsistent with the TIF_SINGLESTEP state for the task being traced.
Cc: <stable@vger.kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Keno Fischer <keno@juliacomputing.com>
Link: https://lore.kernel.org/r/1eed6d69-d53d-9657-1fc9-c089be07f98c@linaro.org
Reported-by: Luis Machado <luis.machado@linaro.org>
Tested-by: Luis Machado <luis.machado@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
2020-02-13 20:06:26 +08:00
|
|
|
NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
arm64: ptrace: Override SPSR.SS when single-stepping is enabled
Luis reports that, when reverse debugging with GDB, single-step does not
function as expected on arm64:
| I've noticed, under very specific conditions, that a PTRACE_SINGLESTEP
| request by GDB won't execute the underlying instruction. As a consequence,
| the PC doesn't move, but we return a SIGTRAP just like we would for a
| regular successful PTRACE_SINGLESTEP request.
The underlying problem is that when the CPU register state is restored
as part of a reverse step, the SPSR.SS bit is cleared and so the hardware
single-step state can transition to the "active-pending" state, causing
an unexpected step exception to be taken immediately if a step operation
is attempted.
In hindsight, we probably shouldn't have exposed SPSR.SS in the pstate
accessible by the GPR regset, but it's a bit late for that now. Instead,
simply prevent userspace from configuring the bit to a value which is
inconsistent with the TIF_SINGLESTEP state for the task being traced.
Cc: <stable@vger.kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Keno Fischer <keno@juliacomputing.com>
Link: https://lore.kernel.org/r/1eed6d69-d53d-9657-1fc9-c089be07f98c@linaro.org
Reported-by: Luis Machado <luis.machado@linaro.org>
Tested-by: Luis Machado <luis.machado@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
2020-02-13 20:06:26 +08:00
|
|
|
static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
2016-07-19 22:07:38 +08:00
|
|
|
regs->pstate &= ~DBG_SPSR_SS;
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
arm64: ptrace: Override SPSR.SS when single-stepping is enabled
Luis reports that, when reverse debugging with GDB, single-step does not
function as expected on arm64:
| I've noticed, under very specific conditions, that a PTRACE_SINGLESTEP
| request by GDB won't execute the underlying instruction. As a consequence,
| the PC doesn't move, but we return a SIGTRAP just like we would for a
| regular successful PTRACE_SINGLESTEP request.
The underlying problem is that when the CPU register state is restored
as part of a reverse step, the SPSR.SS bit is cleared and so the hardware
single-step state can transition to the "active-pending" state, causing
an unexpected step exception to be taken immediately if a step operation
is attempted.
In hindsight, we probably shouldn't have exposed SPSR.SS in the pstate
accessible by the GPR regset, but it's a bit late for that now. Instead,
simply prevent userspace from configuring the bit to a value which is
inconsistent with the TIF_SINGLESTEP state for the task being traced.
Cc: <stable@vger.kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Keno Fischer <keno@juliacomputing.com>
Link: https://lore.kernel.org/r/1eed6d69-d53d-9657-1fc9-c089be07f98c@linaro.org
Reported-by: Luis Machado <luis.machado@linaro.org>
Tested-by: Luis Machado <luis.machado@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
2020-02-13 20:06:26 +08:00
|
|
|
NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
|
|
|
|
|
|
|
|
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
|
|
|
|
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
|
2012-03-05 19:49:33 +08:00
|
|
|
|
2019-02-26 20:52:47 +08:00
|
|
|
static DEFINE_SPINLOCK(debug_hook_lock);
|
|
|
|
static LIST_HEAD(user_step_hook);
|
|
|
|
static LIST_HEAD(kernel_step_hook);
|
2013-12-04 13:50:20 +08:00
|
|
|
|
2019-02-26 20:52:47 +08:00
|
|
|
static void register_debug_hook(struct list_head *node, struct list_head *list)
|
2013-12-04 13:50:20 +08:00
|
|
|
{
|
2019-02-26 20:52:47 +08:00
|
|
|
spin_lock(&debug_hook_lock);
|
|
|
|
list_add_rcu(node, list);
|
|
|
|
spin_unlock(&debug_hook_lock);
|
|
|
|
|
2013-12-04 13:50:20 +08:00
|
|
|
}
|
|
|
|
|
2019-02-26 20:52:47 +08:00
|
|
|
static void unregister_debug_hook(struct list_head *node)
|
2013-12-04 13:50:20 +08:00
|
|
|
{
|
2019-02-26 20:52:47 +08:00
|
|
|
spin_lock(&debug_hook_lock);
|
|
|
|
list_del_rcu(node);
|
|
|
|
spin_unlock(&debug_hook_lock);
|
2016-02-09 06:49:24 +08:00
|
|
|
synchronize_rcu();
|
2013-12-04 13:50:20 +08:00
|
|
|
}
|
|
|
|
|
2019-02-26 20:52:47 +08:00
|
|
|
void register_user_step_hook(struct step_hook *hook)
|
|
|
|
{
|
|
|
|
register_debug_hook(&hook->node, &user_step_hook);
|
|
|
|
}
|
|
|
|
|
|
|
|
void unregister_user_step_hook(struct step_hook *hook)
|
|
|
|
{
|
|
|
|
unregister_debug_hook(&hook->node);
|
|
|
|
}
|
|
|
|
|
|
|
|
void register_kernel_step_hook(struct step_hook *hook)
|
|
|
|
{
|
|
|
|
register_debug_hook(&hook->node, &kernel_step_hook);
|
|
|
|
}
|
|
|
|
|
|
|
|
void unregister_kernel_step_hook(struct step_hook *hook)
|
|
|
|
{
|
|
|
|
unregister_debug_hook(&hook->node);
|
|
|
|
}
|
|
|
|
|
2013-12-04 13:50:20 +08:00
|
|
|
/*
|
2015-09-19 05:09:00 +08:00
|
|
|
* Call registered single step handlers
|
2013-12-04 13:50:20 +08:00
|
|
|
* There is no Syndrome info to check for determining the handler.
|
|
|
|
* So we call all the registered handlers, until the right handler is
|
|
|
|
* found which returns zero.
|
|
|
|
*/
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 19:44:42 +08:00
|
|
|
static int call_step_hook(struct pt_regs *regs, unsigned long esr)
|
2013-12-04 13:50:20 +08:00
|
|
|
{
|
|
|
|
struct step_hook *hook;
|
2019-02-26 20:52:47 +08:00
|
|
|
struct list_head *list;
|
2013-12-04 13:50:20 +08:00
|
|
|
int retval = DBG_HOOK_ERROR;
|
|
|
|
|
2019-02-26 20:52:47 +08:00
|
|
|
list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
|
|
|
|
|
2019-07-25 16:16:25 +08:00
|
|
|
/*
|
|
|
|
* Since single-step exception disables interrupt, this function is
|
|
|
|
* entirely not preemptible, and we can use rcu list safely here.
|
|
|
|
*/
|
2019-02-26 20:52:47 +08:00
|
|
|
list_for_each_entry_rcu(hook, list, node) {
|
2013-12-04 13:50:20 +08:00
|
|
|
retval = hook->fn(regs, esr);
|
|
|
|
if (retval == DBG_HOOK_HANDLED)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(call_step_hook);
|
2013-12-04 13:50:20 +08:00
|
|
|
|
2016-02-11 00:05:28 +08:00
|
|
|
static void send_user_sigtrap(int si_code)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = current_pt_regs();
|
|
|
|
|
|
|
|
if (WARN_ON(!user_mode(regs)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (interrupts_enabled(regs))
|
|
|
|
local_irq_enable();
|
|
|
|
|
2020-11-21 04:33:46 +08:00
|
|
|
arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs),
|
|
|
|
"User debug trap");
|
2016-02-11 00:05:28 +08:00
|
|
|
}
|
|
|
|
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 19:44:42 +08:00
|
|
|
static int single_step_handler(unsigned long unused, unsigned long esr,
|
2012-03-05 19:49:33 +08:00
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
2016-11-02 17:10:43 +08:00
|
|
|
bool handler_found = false;
|
|
|
|
|
2012-03-05 19:49:33 +08:00
|
|
|
/*
|
|
|
|
* If we are stepping a pending breakpoint, call the hw_breakpoint
|
|
|
|
* handler first.
|
|
|
|
*/
|
|
|
|
if (!reinstall_suspended_bps(regs))
|
|
|
|
return 0;
|
|
|
|
|
2016-11-02 17:10:43 +08:00
|
|
|
if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
|
|
|
|
handler_found = true;
|
|
|
|
|
|
|
|
if (!handler_found && user_mode(regs)) {
|
2016-09-01 20:35:02 +08:00
|
|
|
send_user_sigtrap(TRAP_TRACE);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ptrace will disable single step unless explicitly
|
|
|
|
* asked to re-enable it. For other clients, it makes
|
|
|
|
* sense to leave it enabled (i.e. rewind the controls
|
|
|
|
* to the active-not-pending state).
|
|
|
|
*/
|
|
|
|
user_rewind_single_step(current);
|
2016-11-02 17:10:43 +08:00
|
|
|
} else if (!handler_found) {
|
|
|
|
pr_warn("Unexpected kernel single-step exception at EL1\n");
|
2012-03-05 19:49:33 +08:00
|
|
|
/*
|
|
|
|
* Re-enable stepping since we know that we will be
|
|
|
|
* returning to regs.
|
|
|
|
*/
|
|
|
|
set_regs_spsr_ss(regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(single_step_handler);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
2019-02-26 20:52:47 +08:00
|
|
|
static LIST_HEAD(user_break_hook);
|
|
|
|
static LIST_HEAD(kernel_break_hook);
|
2013-12-04 13:50:20 +08:00
|
|
|
|
2019-02-26 20:52:47 +08:00
|
|
|
void register_user_break_hook(struct break_hook *hook)
|
2013-12-04 13:50:20 +08:00
|
|
|
{
|
2019-02-26 20:52:47 +08:00
|
|
|
register_debug_hook(&hook->node, &user_break_hook);
|
2013-12-04 13:50:20 +08:00
|
|
|
}
|
|
|
|
|
2019-02-26 20:52:47 +08:00
|
|
|
void unregister_user_break_hook(struct break_hook *hook)
|
2013-12-04 13:50:20 +08:00
|
|
|
{
|
2019-02-26 20:52:47 +08:00
|
|
|
unregister_debug_hook(&hook->node);
|
|
|
|
}
|
|
|
|
|
|
|
|
void register_kernel_break_hook(struct break_hook *hook)
|
|
|
|
{
|
|
|
|
register_debug_hook(&hook->node, &kernel_break_hook);
|
|
|
|
}
|
|
|
|
|
|
|
|
void unregister_kernel_break_hook(struct break_hook *hook)
|
|
|
|
{
|
|
|
|
unregister_debug_hook(&hook->node);
|
2013-12-04 13:50:20 +08:00
|
|
|
}
|
|
|
|
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 19:44:42 +08:00
|
|
|
static int call_break_hook(struct pt_regs *regs, unsigned long esr)
|
2013-12-04 13:50:20 +08:00
|
|
|
{
|
|
|
|
struct break_hook *hook;
|
2019-02-26 20:52:47 +08:00
|
|
|
struct list_head *list;
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 19:44:42 +08:00
|
|
|
int (*fn)(struct pt_regs *regs, unsigned long esr) = NULL;
|
2013-12-04 13:50:20 +08:00
|
|
|
|
2019-02-26 20:52:47 +08:00
|
|
|
list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
|
|
|
|
|
2019-07-25 16:16:25 +08:00
|
|
|
/*
|
|
|
|
* Since brk exception disables interrupt, this function is
|
|
|
|
* entirely not preemptible, and we can use rcu list safely here.
|
|
|
|
*/
|
2019-02-26 20:52:47 +08:00
|
|
|
list_for_each_entry_rcu(hook, list, node) {
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 19:44:42 +08:00
|
|
|
unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
|
2019-02-26 20:52:47 +08:00
|
|
|
|
|
|
|
if ((comment & ~hook->mask) == hook->imm)
|
2013-12-04 13:50:20 +08:00
|
|
|
fn = hook->fn;
|
2019-02-26 20:52:47 +08:00
|
|
|
}
|
2013-12-04 13:50:20 +08:00
|
|
|
|
|
|
|
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(call_break_hook);
|
2013-12-04 13:50:20 +08:00
|
|
|
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 19:44:42 +08:00
|
|
|
static int brk_handler(unsigned long unused, unsigned long esr,
|
2013-03-16 16:48:13 +08:00
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
2019-02-26 23:39:47 +08:00
|
|
|
if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
|
|
|
|
return 0;
|
2016-11-02 17:10:44 +08:00
|
|
|
|
2019-02-26 23:39:47 +08:00
|
|
|
if (user_mode(regs)) {
|
2016-11-02 17:10:44 +08:00
|
|
|
send_user_sigtrap(TRAP_BRKPT);
|
2019-02-26 23:39:47 +08:00
|
|
|
} else {
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-09 00:35:48 +08:00
|
|
|
pr_warn("Unexpected kernel BRK exception at EL1\n");
|
2013-03-16 16:48:13 +08:00
|
|
|
return -EFAULT;
|
2014-07-31 18:36:08 +08:00
|
|
|
}
|
2013-03-16 16:48:13 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(brk_handler);
|
2013-03-16 16:48:13 +08:00
|
|
|
|
|
|
|
int aarch32_break_handler(struct pt_regs *regs)
|
|
|
|
{
|
2013-11-28 20:07:23 +08:00
|
|
|
u32 arm_instr;
|
|
|
|
u16 thumb_instr;
|
2013-03-16 16:48:13 +08:00
|
|
|
bool bp = false;
|
|
|
|
void __user *pc = (void __user *)instruction_pointer(regs);
|
|
|
|
|
|
|
|
if (!compat_user_mode(regs))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (compat_thumb_mode(regs)) {
|
|
|
|
/* get 16-bit Thumb instruction */
|
2017-06-28 22:55:52 +08:00
|
|
|
__le16 instr;
|
|
|
|
get_user(instr, (__le16 __user *)pc);
|
|
|
|
thumb_instr = le16_to_cpu(instr);
|
2013-11-28 20:07:23 +08:00
|
|
|
if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
|
2013-03-16 16:48:13 +08:00
|
|
|
/* get second half of 32-bit Thumb-2 instruction */
|
2017-06-28 22:55:52 +08:00
|
|
|
get_user(instr, (__le16 __user *)(pc + 2));
|
|
|
|
thumb_instr = le16_to_cpu(instr);
|
2013-11-28 20:07:23 +08:00
|
|
|
bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
|
2013-03-16 16:48:13 +08:00
|
|
|
} else {
|
2013-11-28 20:07:23 +08:00
|
|
|
bp = thumb_instr == AARCH32_BREAK_THUMB;
|
2013-03-16 16:48:13 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* 32-bit ARM instruction */
|
2017-06-28 22:55:52 +08:00
|
|
|
__le32 instr;
|
|
|
|
get_user(instr, (__le32 __user *)pc);
|
|
|
|
arm_instr = le32_to_cpu(instr);
|
2013-11-28 20:07:23 +08:00
|
|
|
bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
|
2013-03-16 16:48:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!bp)
|
|
|
|
return -EFAULT;
|
|
|
|
|
2016-02-11 00:05:28 +08:00
|
|
|
send_user_sigtrap(TRAP_BRKPT);
|
2013-03-16 16:48:13 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(aarch32_break_handler);
|
2013-03-16 16:48:13 +08:00
|
|
|
|
2020-05-14 07:06:37 +08:00
|
|
|
void __init debug_traps_init(void)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
|
|
|
hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
|
2016-09-01 20:35:02 +08:00
|
|
|
TRAP_TRACE, "single-step handler");
|
2013-03-16 16:48:13 +08:00
|
|
|
hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
|
2020-09-15 22:48:09 +08:00
|
|
|
TRAP_BRKPT, "BRK handler");
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Re-enable single step for syscall restarting. */
|
|
|
|
void user_rewind_single_step(struct task_struct *task)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If single step is active for this thread, then set SPSR.SS
|
|
|
|
* to 1 to avoid returning to the active-pending state.
|
|
|
|
*/
|
2020-02-13 20:12:26 +08:00
|
|
|
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
2012-03-05 19:49:33 +08:00
|
|
|
set_regs_spsr_ss(task_pt_regs(task));
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(user_rewind_single_step);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
void user_fastforward_single_step(struct task_struct *task)
|
|
|
|
{
|
2020-02-13 20:12:26 +08:00
|
|
|
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
2012-03-05 19:49:33 +08:00
|
|
|
clear_regs_spsr_ss(task_pt_regs(task));
|
|
|
|
}
|
|
|
|
|
arm64: ptrace: Override SPSR.SS when single-stepping is enabled
Luis reports that, when reverse debugging with GDB, single-step does not
function as expected on arm64:
| I've noticed, under very specific conditions, that a PTRACE_SINGLESTEP
| request by GDB won't execute the underlying instruction. As a consequence,
| the PC doesn't move, but we return a SIGTRAP just like we would for a
| regular successful PTRACE_SINGLESTEP request.
The underlying problem is that when the CPU register state is restored
as part of a reverse step, the SPSR.SS bit is cleared and so the hardware
single-step state can transition to the "active-pending" state, causing
an unexpected step exception to be taken immediately if a step operation
is attempted.
In hindsight, we probably shouldn't have exposed SPSR.SS in the pstate
accessible by the GPR regset, but it's a bit late for that now. Instead,
simply prevent userspace from configuring the bit to a value which is
inconsistent with the TIF_SINGLESTEP state for the task being traced.
Cc: <stable@vger.kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Keno Fischer <keno@juliacomputing.com>
Link: https://lore.kernel.org/r/1eed6d69-d53d-9657-1fc9-c089be07f98c@linaro.org
Reported-by: Luis Machado <luis.machado@linaro.org>
Tested-by: Luis Machado <luis.machado@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
2020-02-13 20:06:26 +08:00
|
|
|
void user_regs_reset_single_step(struct user_pt_regs *regs,
|
|
|
|
struct task_struct *task)
|
|
|
|
{
|
|
|
|
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
|
|
|
set_user_regs_spsr_ss(regs);
|
|
|
|
else
|
|
|
|
clear_user_regs_spsr_ss(regs);
|
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:33 +08:00
|
|
|
/* Kernel API */
|
|
|
|
void kernel_enable_single_step(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
WARN_ON(!irqs_disabled());
|
|
|
|
set_regs_spsr_ss(regs);
|
|
|
|
mdscr_write(mdscr_read() | DBG_MDSCR_SS);
|
|
|
|
enable_debug_monitors(DBG_ACTIVE_EL1);
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(kernel_enable_single_step);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
void kernel_disable_single_step(void)
|
|
|
|
{
|
|
|
|
WARN_ON(!irqs_disabled());
|
|
|
|
mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
|
|
|
|
disable_debug_monitors(DBG_ACTIVE_EL1);
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(kernel_disable_single_step);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
int kernel_active_single_step(void)
|
|
|
|
{
|
|
|
|
WARN_ON(!irqs_disabled());
|
|
|
|
return mdscr_read() & DBG_MDSCR_SS;
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(kernel_active_single_step);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
/* ptrace API */
|
|
|
|
void user_enable_single_step(struct task_struct *task)
|
|
|
|
{
|
2016-08-26 18:36:39 +08:00
|
|
|
struct thread_info *ti = task_thread_info(task);
|
|
|
|
|
|
|
|
if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP))
|
|
|
|
set_regs_spsr_ss(task_pt_regs(task));
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(user_enable_single_step);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
void user_disable_single_step(struct task_struct *task)
|
|
|
|
{
|
|
|
|
clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
|
|
|
|
}
|
2016-07-09 00:35:49 +08:00
|
|
|
NOKPROBE_SYMBOL(user_disable_single_step);
|