Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "The main item in this pull request are the Spectre variant 1.1 fixes from Julien Thierry. A few other patches to improve various areas, and removal of some obsolete mcount bits and a redundant kbuild conditional" * 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: 8802/1: Call syscall_trace_exit even when system call skipped ARM: 8797/1: spectre-v1.1: harden __copy_to_user ARM: 8796/1: spectre-v1,v1.1: provide helpers for address sanitization ARM: 8795/1: spectre-v1.1: use put_user() for __put_user() ARM: 8794/1: uaccess: Prevent speculative use of the current addr_limit ARM: 8793/1: signal: replace __put_user_error with __put_user ARM: 8792/1: oabi-compat: copy oabi events using __copy_to_user() ARM: 8791/1: vfp: use __copy_to_user() when saving VFP state ARM: 8790/1: signal: always use __copy_to_user to save iwmmxt context ARM: 8789/1: signal: copy registers using __copy_to_user() ARM: 8801/1: makefile: use ARMv3M mode for RiscPC ARM: 8800/1: use choice for kernel unwinders ARM: 8798/1: remove unnecessary KBUILD_SRC ifeq conditional ARM: 8788/1: ftrace: remove old mcount support ARM: 8786/1: Debug kernel copy by printing
This commit is contained in:
commit
07171da264
|
@ -45,35 +45,42 @@ config DEBUG_WX
|
||||||
|
|
||||||
If in doubt, say "Y".
|
If in doubt, say "Y".
|
||||||
|
|
||||||
# RMK wants arm kernels compiled with frame pointers or stack unwinding.
|
choice
|
||||||
# If you know what you are doing and are willing to live without stack
|
prompt "Choose kernel unwinder"
|
||||||
# traces, you can get a slightly smaller kernel by setting this option to
|
default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
|
||||||
# n, but then RMK will have to kill you ;).
|
default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
|
||||||
config FRAME_POINTER
|
|
||||||
bool
|
|
||||||
depends on !THUMB2_KERNEL
|
|
||||||
default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
|
|
||||||
help
|
help
|
||||||
If you say N here, the resulting kernel will be slightly smaller and
|
This determines which method will be used for unwinding kernel stack
|
||||||
faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
|
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
|
||||||
when a problem occurs with the kernel, the information that is
|
livepatch, lockdep, and more.
|
||||||
reported is severely limited.
|
|
||||||
|
|
||||||
config ARM_UNWIND
|
config UNWINDER_FRAME_POINTER
|
||||||
bool "Enable stack unwinding support (EXPERIMENTAL)"
|
bool "Frame pointer unwinder"
|
||||||
|
depends on !THUMB2_KERNEL && !CC_IS_CLANG
|
||||||
|
select ARCH_WANT_FRAME_POINTERS
|
||||||
|
select FRAME_POINTER
|
||||||
|
help
|
||||||
|
This option enables the frame pointer unwinder for unwinding
|
||||||
|
kernel stack traces.
|
||||||
|
|
||||||
|
config UNWINDER_ARM
|
||||||
|
bool "ARM EABI stack unwinder"
|
||||||
depends on AEABI
|
depends on AEABI
|
||||||
default y
|
select ARM_UNWIND
|
||||||
help
|
help
|
||||||
This option enables stack unwinding support in the kernel
|
This option enables stack unwinding support in the kernel
|
||||||
using the information automatically generated by the
|
using the information automatically generated by the
|
||||||
compiler. The resulting kernel image is slightly bigger but
|
compiler. The resulting kernel image is slightly bigger but
|
||||||
the performance is not affected. Currently, this feature
|
the performance is not affected. Currently, this feature
|
||||||
only works with EABI compilers. If unsure say Y.
|
only works with EABI compilers.
|
||||||
|
|
||||||
config OLD_MCOUNT
|
endchoice
|
||||||
|
|
||||||
|
config ARM_UNWIND
|
||||||
|
bool
|
||||||
|
|
||||||
|
config FRAME_POINTER
|
||||||
bool
|
bool
|
||||||
depends on FUNCTION_TRACER && FRAME_POINTER
|
|
||||||
default y
|
|
||||||
|
|
||||||
config DEBUG_USER
|
config DEBUG_USER
|
||||||
bool "Verbose user fault messages"
|
bool "Verbose user fault messages"
|
||||||
|
|
|
@ -74,7 +74,7 @@ endif
|
||||||
arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t)
|
arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t)
|
||||||
arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t
|
arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t
|
||||||
arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4
|
arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4
|
||||||
arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3
|
arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3m
|
||||||
|
|
||||||
# Evaluate arch cc-option calls now
|
# Evaluate arch cc-option calls now
|
||||||
arch-y := $(arch-y)
|
arch-y := $(arch-y)
|
||||||
|
@ -264,13 +264,9 @@ platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y)))
|
||||||
|
|
||||||
ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
|
ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
|
||||||
ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
|
ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
|
||||||
ifeq ($(KBUILD_SRC),)
|
|
||||||
KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs))
|
|
||||||
else
|
|
||||||
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
|
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
endif
|
|
||||||
|
|
||||||
export TEXT_OFFSET GZFLAGS MMUEXT
|
export TEXT_OFFSET GZFLAGS MMUEXT
|
||||||
|
|
||||||
|
|
|
@ -114,6 +114,35 @@
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Debug kernel copy by printing the memory addresses involved
|
||||||
|
*/
|
||||||
|
.macro dbgkc, begin, end, cbegin, cend
|
||||||
|
#ifdef DEBUG
|
||||||
|
kputc #'\n'
|
||||||
|
kputc #'C'
|
||||||
|
kputc #':'
|
||||||
|
kputc #'0'
|
||||||
|
kputc #'x'
|
||||||
|
kphex \begin, 8 /* Start of compressed kernel */
|
||||||
|
kputc #'-'
|
||||||
|
kputc #'0'
|
||||||
|
kputc #'x'
|
||||||
|
kphex \end, 8 /* End of compressed kernel */
|
||||||
|
kputc #'-'
|
||||||
|
kputc #'>'
|
||||||
|
kputc #'0'
|
||||||
|
kputc #'x'
|
||||||
|
kphex \cbegin, 8 /* Start of kernel copy */
|
||||||
|
kputc #'-'
|
||||||
|
kputc #'0'
|
||||||
|
kputc #'x'
|
||||||
|
kphex \cend, 8 /* End of kernel copy */
|
||||||
|
kputc #'\n'
|
||||||
|
kputc #'\r'
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
.section ".start", #alloc, #execinstr
|
.section ".start", #alloc, #execinstr
|
||||||
/*
|
/*
|
||||||
* sort out different calling conventions
|
* sort out different calling conventions
|
||||||
|
@ -450,6 +479,20 @@ dtb_check_done:
|
||||||
add r6, r9, r5
|
add r6, r9, r5
|
||||||
add r9, r9, r10
|
add r9, r9, r10
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
sub r10, r6, r5
|
||||||
|
sub r10, r9, r10
|
||||||
|
/*
|
||||||
|
* We are about to copy the kernel to a new memory area.
|
||||||
|
* The boundaries of the new memory area can be found in
|
||||||
|
* r10 and r9, whilst r5 and r6 contain the boundaries
|
||||||
|
* of the memory we are going to copy.
|
||||||
|
* Calling dbgkc will help with the printing of this
|
||||||
|
* information.
|
||||||
|
*/
|
||||||
|
dbgkc r5, r6, r10, r9
|
||||||
|
#endif
|
||||||
|
|
||||||
1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
|
1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
|
||||||
cmp r6, r5
|
cmp r6, r5
|
||||||
stmdb r9!, {r0 - r3, r10 - r12, lr}
|
stmdb r9!, {r0 - r3, r10 - r12, lr}
|
||||||
|
|
|
@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
|
||||||
|
#ifdef CONFIG_CPU_SPECTRE
|
||||||
|
sub \tmp, \limit, #1
|
||||||
|
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
|
||||||
|
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
|
||||||
|
subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
|
||||||
|
movlo \addr, #0 @ if (tmp < 0) addr = NULL
|
||||||
|
csdb
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
.macro uaccess_disable, tmp, isb=1
|
.macro uaccess_disable, tmp, isb=1
|
||||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -16,9 +16,6 @@ extern void __gnu_mcount_nc(void);
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
struct dyn_arch_ftrace {
|
struct dyn_arch_ftrace {
|
||||||
#ifdef CONFIG_OLD_MCOUNT
|
|
||||||
bool old_mcount;
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||||
|
|
|
@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
||||||
struct user_vfp;
|
struct user_vfp;
|
||||||
struct user_vfp_exc;
|
struct user_vfp_exc;
|
||||||
|
|
||||||
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
|
extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
|
||||||
struct user_vfp_exc __user *);
|
struct user_vfp_exc *);
|
||||||
extern int vfp_restore_user_hwstate(struct user_vfp *,
|
extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||||
struct user_vfp_exc *);
|
struct user_vfp_exc *);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -69,6 +69,14 @@ extern int __put_user_bad(void);
|
||||||
static inline void set_fs(mm_segment_t fs)
|
static inline void set_fs(mm_segment_t fs)
|
||||||
{
|
{
|
||||||
current_thread_info()->addr_limit = fs;
|
current_thread_info()->addr_limit = fs;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prevent a mispredicted conditional call to set_fs from forwarding
|
||||||
|
* the wrong address limit to access_ok under speculation.
|
||||||
|
*/
|
||||||
|
dsb(nsh);
|
||||||
|
isb();
|
||||||
|
|
||||||
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
|
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs)
|
||||||
#define __inttype(x) \
|
#define __inttype(x) \
|
||||||
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sanitise a uaccess pointer such that it becomes NULL if addr+size
|
||||||
|
* is above the current addr_limit.
|
||||||
|
*/
|
||||||
|
#define uaccess_mask_range_ptr(ptr, size) \
|
||||||
|
((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
|
||||||
|
static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
|
||||||
|
size_t size)
|
||||||
|
{
|
||||||
|
void __user *safe_ptr = (void __user *)ptr;
|
||||||
|
unsigned long tmp;
|
||||||
|
|
||||||
|
asm volatile(
|
||||||
|
" sub %1, %3, #1\n"
|
||||||
|
" subs %1, %1, %0\n"
|
||||||
|
" addhs %1, %1, #1\n"
|
||||||
|
" subhss %1, %1, %2\n"
|
||||||
|
" movlo %0, #0\n"
|
||||||
|
: "+r" (safe_ptr), "=&r" (tmp)
|
||||||
|
: "r" (size), "r" (current_thread_info()->addr_limit)
|
||||||
|
: "cc");
|
||||||
|
|
||||||
|
csdb();
|
||||||
|
return safe_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Single-value transfer routines. They automatically use the right
|
* Single-value transfer routines. They automatically use the right
|
||||||
* size if we just have the right pointer type. Note that the functions
|
* size if we just have the right pointer type. Note that the functions
|
||||||
|
@ -362,6 +396,14 @@ do { \
|
||||||
__pu_err; \
|
__pu_err; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_SPECTRE
|
||||||
|
/*
|
||||||
|
* When mitigating Spectre variant 1.1, all accessors need to include
|
||||||
|
* verification of the address space.
|
||||||
|
*/
|
||||||
|
#define __put_user(x, ptr) put_user(x, ptr)
|
||||||
|
|
||||||
|
#else
|
||||||
#define __put_user(x, ptr) \
|
#define __put_user(x, ptr) \
|
||||||
({ \
|
({ \
|
||||||
long __pu_err = 0; \
|
long __pu_err = 0; \
|
||||||
|
@ -369,12 +411,6 @@ do { \
|
||||||
__pu_err; \
|
__pu_err; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __put_user_error(x, ptr, err) \
|
|
||||||
({ \
|
|
||||||
__put_user_switch((x), (ptr), (err), __put_user_nocheck); \
|
|
||||||
(void) 0; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
|
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
|
||||||
do { \
|
do { \
|
||||||
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
|
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
|
||||||
|
@ -454,6 +490,7 @@ do { \
|
||||||
: "r" (x), "i" (-EFAULT) \
|
: "r" (x), "i" (-EFAULT) \
|
||||||
: "cc")
|
: "cc")
|
||||||
|
|
||||||
|
#endif /* !CONFIG_CPU_SPECTRE */
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
extern unsigned long __must_check
|
extern unsigned long __must_check
|
||||||
|
|
|
@ -167,9 +167,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_TRACER
|
#ifdef CONFIG_FUNCTION_TRACER
|
||||||
#ifdef CONFIG_OLD_MCOUNT
|
|
||||||
EXPORT_SYMBOL(mcount);
|
|
||||||
#endif
|
|
||||||
EXPORT_SYMBOL(__gnu_mcount_nc);
|
EXPORT_SYMBOL(__gnu_mcount_nc);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -296,16 +296,15 @@ __sys_trace:
|
||||||
cmp scno, #-1 @ skip the syscall?
|
cmp scno, #-1 @ skip the syscall?
|
||||||
bne 2b
|
bne 2b
|
||||||
add sp, sp, #S_OFF @ restore stack
|
add sp, sp, #S_OFF @ restore stack
|
||||||
b ret_slow_syscall
|
|
||||||
|
|
||||||
__sys_trace_return:
|
__sys_trace_return_nosave:
|
||||||
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
|
enable_irq_notrace
|
||||||
mov r0, sp
|
mov r0, sp
|
||||||
bl syscall_trace_exit
|
bl syscall_trace_exit
|
||||||
b ret_slow_syscall
|
b ret_slow_syscall
|
||||||
|
|
||||||
__sys_trace_return_nosave:
|
__sys_trace_return:
|
||||||
enable_irq_notrace
|
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
|
||||||
mov r0, sp
|
mov r0, sp
|
||||||
bl syscall_trace_exit
|
bl syscall_trace_exit
|
||||||
b ret_slow_syscall
|
b ret_slow_syscall
|
||||||
|
|
|
@ -15,23 +15,8 @@
|
||||||
* start of every function. In mcount, apart from the function's address (in
|
* start of every function. In mcount, apart from the function's address (in
|
||||||
* lr), we need to get hold of the function's caller's address.
|
* lr), we need to get hold of the function's caller's address.
|
||||||
*
|
*
|
||||||
* Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
|
* Newer GCCs (4.4+) solve this problem by using a version of mcount with call
|
||||||
*
|
* sites like:
|
||||||
* bl mcount
|
|
||||||
*
|
|
||||||
* These versions have the limitation that in order for the mcount routine to
|
|
||||||
* be able to determine the function's caller's address, an APCS-style frame
|
|
||||||
* pointer (which is set up with something like the code below) is required.
|
|
||||||
*
|
|
||||||
* mov ip, sp
|
|
||||||
* push {fp, ip, lr, pc}
|
|
||||||
* sub fp, ip, #4
|
|
||||||
*
|
|
||||||
* With EABI, these frame pointers are not available unless -mapcs-frame is
|
|
||||||
* specified, and if building as Thumb-2, not even then.
|
|
||||||
*
|
|
||||||
* Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
|
|
||||||
* with call sites like:
|
|
||||||
*
|
*
|
||||||
* push {lr}
|
* push {lr}
|
||||||
* bl __gnu_mcount_nc
|
* bl __gnu_mcount_nc
|
||||||
|
@ -46,17 +31,10 @@
|
||||||
* allows it to be clobbered in subroutines and doesn't use it to hold
|
* allows it to be clobbered in subroutines and doesn't use it to hold
|
||||||
* parameters.)
|
* parameters.)
|
||||||
*
|
*
|
||||||
* When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
|
* When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
|
||||||
* for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
|
* instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
|
||||||
* arch/arm/kernel/ftrace.c).
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef CONFIG_OLD_MCOUNT
|
|
||||||
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
|
|
||||||
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
.macro mcount_adjust_addr rd, rn
|
.macro mcount_adjust_addr rd, rn
|
||||||
bic \rd, \rn, #1 @ clear the Thumb bit if present
|
bic \rd, \rn, #1 @ clear the Thumb bit if present
|
||||||
sub \rd, \rd, #MCOUNT_INSN_SIZE
|
sub \rd, \rd, #MCOUNT_INSN_SIZE
|
||||||
|
@ -209,51 +187,6 @@ ftrace_graph_call\suffix:
|
||||||
mcount_exit
|
mcount_exit
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#ifdef CONFIG_OLD_MCOUNT
|
|
||||||
/*
|
|
||||||
* mcount
|
|
||||||
*/
|
|
||||||
|
|
||||||
.macro mcount_enter
|
|
||||||
stmdb sp!, {r0-r3, lr}
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro mcount_get_lr reg
|
|
||||||
ldr \reg, [fp, #-4]
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro mcount_exit
|
|
||||||
ldr lr, [fp, #-4]
|
|
||||||
ldmia sp!, {r0-r3, pc}
|
|
||||||
.endm
|
|
||||||
|
|
||||||
ENTRY(mcount)
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
||||||
stmdb sp!, {lr}
|
|
||||||
ldr lr, [fp, #-4]
|
|
||||||
ldmia sp!, {pc}
|
|
||||||
#else
|
|
||||||
__mcount _old
|
|
||||||
#endif
|
|
||||||
ENDPROC(mcount)
|
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
||||||
ENTRY(ftrace_caller_old)
|
|
||||||
__ftrace_caller _old
|
|
||||||
ENDPROC(ftrace_caller_old)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
||||||
ENTRY(ftrace_graph_caller_old)
|
|
||||||
__ftrace_graph_caller
|
|
||||||
ENDPROC(ftrace_graph_caller_old)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
.purgem mcount_enter
|
|
||||||
.purgem mcount_get_lr
|
|
||||||
.purgem mcount_exit
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __gnu_mcount_nc
|
* __gnu_mcount_nc
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -47,30 +47,6 @@ void arch_ftrace_update_code(int command)
|
||||||
stop_machine(__ftrace_modify_code, &command, NULL);
|
stop_machine(__ftrace_modify_code, &command, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_OLD_MCOUNT
|
|
||||||
#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
|
|
||||||
#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
|
|
||||||
|
|
||||||
#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
|
|
||||||
|
|
||||||
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
|
|
||||||
{
|
|
||||||
return rec->arch.old_mcount ? OLD_NOP : NOP;
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
|
|
||||||
{
|
|
||||||
if (!rec->arch.old_mcount)
|
|
||||||
return addr;
|
|
||||||
|
|
||||||
if (addr == MCOUNT_ADDR)
|
|
||||||
addr = OLD_MCOUNT_ADDR;
|
|
||||||
else if (addr == FTRACE_ADDR)
|
|
||||||
addr = OLD_FTRACE_ADDR;
|
|
||||||
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
|
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
|
||||||
{
|
{
|
||||||
return NOP;
|
return NOP;
|
||||||
|
@ -80,7 +56,6 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
|
||||||
{
|
{
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
int ftrace_arch_code_modify_prepare(void)
|
int ftrace_arch_code_modify_prepare(void)
|
||||||
{
|
{
|
||||||
|
@ -150,15 +125,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_OLD_MCOUNT
|
|
||||||
if (!ret) {
|
|
||||||
pc = (unsigned long)&ftrace_call_old;
|
|
||||||
new = ftrace_call_replace(pc, (unsigned long)func);
|
|
||||||
|
|
||||||
ret = ftrace_modify_code(pc, 0, new, false);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,16 +169,6 @@ int ftrace_make_nop(struct module *mod,
|
||||||
new = ftrace_nop_replace(rec);
|
new = ftrace_nop_replace(rec);
|
||||||
ret = ftrace_modify_code(ip, old, new, true);
|
ret = ftrace_modify_code(ip, old, new, true);
|
||||||
|
|
||||||
#ifdef CONFIG_OLD_MCOUNT
|
|
||||||
if (ret == -EINVAL && addr == MCOUNT_ADDR) {
|
|
||||||
rec->arch.old_mcount = true;
|
|
||||||
|
|
||||||
old = ftrace_call_replace(ip, adjust_address(rec, addr));
|
|
||||||
new = ftrace_nop_replace(rec);
|
|
||||||
ret = ftrace_modify_code(ip, old, new, true);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,13 +246,6 @@ static int ftrace_modify_graph_caller(bool enable)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_OLD_MCOUNT
|
|
||||||
if (!ret)
|
|
||||||
ret = __ftrace_modify_caller(&ftrace_graph_call_old,
|
|
||||||
ftrace_graph_caller_old,
|
|
||||||
enable);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
|
||||||
kframe->magic = IWMMXT_MAGIC;
|
kframe->magic = IWMMXT_MAGIC;
|
||||||
kframe->size = IWMMXT_STORAGE_SIZE;
|
kframe->size = IWMMXT_STORAGE_SIZE;
|
||||||
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
|
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
|
||||||
|
|
||||||
err = __copy_to_user(frame, kframe, sizeof(*frame));
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* For bug-compatibility with older kernels, some space
|
* For bug-compatibility with older kernels, some space
|
||||||
|
@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
|
||||||
* Set the magic and size appropriately so that properly
|
* Set the magic and size appropriately so that properly
|
||||||
* written userspace can skip it reliably:
|
* written userspace can skip it reliably:
|
||||||
*/
|
*/
|
||||||
__put_user_error(DUMMY_MAGIC, &frame->magic, err);
|
*kframe = (struct iwmmxt_sigframe) {
|
||||||
__put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
|
.magic = DUMMY_MAGIC,
|
||||||
|
.size = IWMMXT_STORAGE_SIZE,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = __copy_to_user(frame, kframe, sizeof(*kframe));
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp)
|
||||||
|
|
||||||
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
|
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
|
||||||
{
|
{
|
||||||
const unsigned long magic = VFP_MAGIC;
|
struct vfp_sigframe kframe;
|
||||||
const unsigned long size = VFP_STORAGE_SIZE;
|
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
__put_user_error(magic, &frame->magic, err);
|
memset(&kframe, 0, sizeof(kframe));
|
||||||
__put_user_error(size, &frame->size, err);
|
kframe.magic = VFP_MAGIC;
|
||||||
|
kframe.size = VFP_STORAGE_SIZE;
|
||||||
|
|
||||||
|
err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
|
||||||
if (err)
|
if (err)
|
||||||
return -EFAULT;
|
return err;
|
||||||
|
|
||||||
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
|
return __copy_to_user(frame, &kframe, sizeof(kframe));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int restore_vfp_context(char __user **auxp)
|
static int restore_vfp_context(char __user **auxp)
|
||||||
|
@ -288,30 +291,35 @@ static int
|
||||||
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
|
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
|
||||||
{
|
{
|
||||||
struct aux_sigframe __user *aux;
|
struct aux_sigframe __user *aux;
|
||||||
|
struct sigcontext context;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
|
context = (struct sigcontext) {
|
||||||
__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
|
.arm_r0 = regs->ARM_r0,
|
||||||
__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
|
.arm_r1 = regs->ARM_r1,
|
||||||
__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
|
.arm_r2 = regs->ARM_r2,
|
||||||
__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
|
.arm_r3 = regs->ARM_r3,
|
||||||
__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
|
.arm_r4 = regs->ARM_r4,
|
||||||
__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
|
.arm_r5 = regs->ARM_r5,
|
||||||
__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
|
.arm_r6 = regs->ARM_r6,
|
||||||
__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
|
.arm_r7 = regs->ARM_r7,
|
||||||
__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
|
.arm_r8 = regs->ARM_r8,
|
||||||
__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
|
.arm_r9 = regs->ARM_r9,
|
||||||
__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
|
.arm_r10 = regs->ARM_r10,
|
||||||
__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
|
.arm_fp = regs->ARM_fp,
|
||||||
__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
|
.arm_ip = regs->ARM_ip,
|
||||||
__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
|
.arm_sp = regs->ARM_sp,
|
||||||
__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
|
.arm_lr = regs->ARM_lr,
|
||||||
__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
|
.arm_pc = regs->ARM_pc,
|
||||||
|
.arm_cpsr = regs->ARM_cpsr,
|
||||||
|
|
||||||
__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
|
.trap_no = current->thread.trap_no,
|
||||||
__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
|
.error_code = current->thread.error_code,
|
||||||
__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
|
.fault_address = current->thread.address,
|
||||||
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
|
.oldmask = set->sig[0],
|
||||||
|
};
|
||||||
|
|
||||||
|
err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
|
||||||
|
|
||||||
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
|
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
|
||||||
|
|
||||||
|
@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
|
||||||
if (err == 0)
|
if (err == 0)
|
||||||
err |= preserve_vfp_context(&aux->vfp);
|
err |= preserve_vfp_context(&aux->vfp);
|
||||||
#endif
|
#endif
|
||||||
__put_user_error(0, &aux->end_magic, err);
|
err |= __put_user(0, &aux->end_magic);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
||||||
/*
|
/*
|
||||||
* Set uc.uc_flags to a value which sc.trap_no would never have.
|
* Set uc.uc_flags to a value which sc.trap_no would never have.
|
||||||
*/
|
*/
|
||||||
__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
|
err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
|
||||||
|
|
||||||
err |= setup_sigframe(frame, regs, set);
|
err |= setup_sigframe(frame, regs, set);
|
||||||
if (err == 0)
|
if (err == 0)
|
||||||
|
@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
||||||
|
|
||||||
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
||||||
|
|
||||||
__put_user_error(0, &frame->sig.uc.uc_flags, err);
|
err |= __put_user(0, &frame->sig.uc.uc_flags);
|
||||||
__put_user_error(NULL, &frame->sig.uc.uc_link, err);
|
err |= __put_user(NULL, &frame->sig.uc.uc_link);
|
||||||
|
|
||||||
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
|
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
|
||||||
err |= setup_sigframe(&frame->sig, regs, set);
|
err |= setup_sigframe(&frame->sig, regs, set);
|
||||||
|
|
|
@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
||||||
int maxevents, int timeout)
|
int maxevents, int timeout)
|
||||||
{
|
{
|
||||||
struct epoll_event *kbuf;
|
struct epoll_event *kbuf;
|
||||||
|
struct oabi_epoll_event e;
|
||||||
mm_segment_t fs;
|
mm_segment_t fs;
|
||||||
long ret, err, i;
|
long ret, err, i;
|
||||||
|
|
||||||
|
@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
||||||
set_fs(fs);
|
set_fs(fs);
|
||||||
err = 0;
|
err = 0;
|
||||||
for (i = 0; i < ret; i++) {
|
for (i = 0; i < ret; i++) {
|
||||||
__put_user_error(kbuf[i].events, &events->events, err);
|
e.events = kbuf[i].events;
|
||||||
__put_user_error(kbuf[i].data, &events->data, err);
|
e.data = kbuf[i].data;
|
||||||
|
err = __copy_to_user(events, &e, sizeof(e));
|
||||||
|
if (err)
|
||||||
|
break;
|
||||||
events++;
|
events++;
|
||||||
}
|
}
|
||||||
kfree(kbuf);
|
kfree(kbuf);
|
||||||
|
|
|
@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
|
||||||
#ifdef CONFIG_CPU_SPECTRE
|
#ifdef CONFIG_CPU_SPECTRE
|
||||||
get_thread_info r3
|
get_thread_info r3
|
||||||
ldr r3, [r3, #TI_ADDR_LIMIT]
|
ldr r3, [r3, #TI_ADDR_LIMIT]
|
||||||
adds ip, r1, r2 @ ip=addr+size
|
uaccess_mask_range_ptr r1, r2, r3, ip
|
||||||
sub r3, r3, #1 @ addr_limit - 1
|
|
||||||
cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
|
|
||||||
movcs r1, #0 @ addr = NULL
|
|
||||||
csdb
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "copy_template.S"
|
#include "copy_template.S"
|
||||||
|
|
|
@ -94,6 +94,11 @@
|
||||||
|
|
||||||
ENTRY(__copy_to_user_std)
|
ENTRY(__copy_to_user_std)
|
||||||
WEAK(arm_copy_to_user)
|
WEAK(arm_copy_to_user)
|
||||||
|
#ifdef CONFIG_CPU_SPECTRE
|
||||||
|
get_thread_info r3
|
||||||
|
ldr r3, [r3, #TI_ADDR_LIMIT]
|
||||||
|
uaccess_mask_range_ptr r0, r2, r3, ip
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "copy_template.S"
|
#include "copy_template.S"
|
||||||
|
|
||||||
|
@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
|
||||||
rsb r0, r0, r2
|
rsb r0, r0, r2
|
||||||
copy_abort_end
|
copy_abort_end
|
||||||
.popsection
|
.popsection
|
||||||
|
|
||||||
|
|
|
@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
n = __copy_to_user_std(to, from, n);
|
n = __copy_to_user_std(to, from, n);
|
||||||
uaccess_restore(ua_flags);
|
uaccess_restore(ua_flags);
|
||||||
} else {
|
} else {
|
||||||
n = __copy_to_user_memcpy(to, from, n);
|
n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
|
||||||
|
from, n);
|
||||||
}
|
}
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
|
@ -553,12 +553,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
|
||||||
* Save the current VFP state into the provided structures and prepare
|
* Save the current VFP state into the provided structures and prepare
|
||||||
* for entry into a new function (signal handler).
|
* for entry into a new function (signal handler).
|
||||||
*/
|
*/
|
||||||
int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
|
||||||
struct user_vfp_exc __user *ufp_exc)
|
struct user_vfp_exc *ufp_exc)
|
||||||
{
|
{
|
||||||
struct thread_info *thread = current_thread_info();
|
struct thread_info *thread = current_thread_info();
|
||||||
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
|
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
/* Ensure that the saved hwstate is up-to-date. */
|
/* Ensure that the saved hwstate is up-to-date. */
|
||||||
vfp_sync_hwstate(thread);
|
vfp_sync_hwstate(thread);
|
||||||
|
@ -567,22 +566,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
||||||
* Copy the floating point registers. There can be unused
|
* Copy the floating point registers. There can be unused
|
||||||
* registers see asm/hwcap.h for details.
|
* registers see asm/hwcap.h for details.
|
||||||
*/
|
*/
|
||||||
err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
|
memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
|
||||||
sizeof(hwstate->fpregs));
|
|
||||||
/*
|
/*
|
||||||
* Copy the status and control register.
|
* Copy the status and control register.
|
||||||
*/
|
*/
|
||||||
__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
|
ufp->fpscr = hwstate->fpscr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy the exception registers.
|
* Copy the exception registers.
|
||||||
*/
|
*/
|
||||||
__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
|
ufp_exc->fpexc = hwstate->fpexc;
|
||||||
__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
|
ufp_exc->fpinst = hwstate->fpinst;
|
||||||
__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
|
ufp_exc->fpinst2 = ufp_exc->fpinst2;
|
||||||
|
|
||||||
if (err)
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
/* Ensure that VFP is disabled. */
|
/* Ensure that VFP is disabled. */
|
||||||
vfp_flush_hwstate(thread);
|
vfp_flush_hwstate(thread);
|
||||||
|
|
|
@ -1179,7 +1179,7 @@ config LOCKDEP
|
||||||
bool
|
bool
|
||||||
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
|
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
|
||||||
select STACKTRACE
|
select STACKTRACE
|
||||||
select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !X86
|
select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
|
||||||
select KALLSYMS
|
select KALLSYMS
|
||||||
select KALLSYMS_ALL
|
select KALLSYMS_ALL
|
||||||
|
|
||||||
|
@ -1590,7 +1590,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
|
||||||
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
|
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
|
||||||
depends on !X86_64
|
depends on !X86_64
|
||||||
select STACKTRACE
|
select STACKTRACE
|
||||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
|
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
|
||||||
help
|
help
|
||||||
Provide stacktrace filter for fault-injection capabilities
|
Provide stacktrace filter for fault-injection capabilities
|
||||||
|
|
||||||
|
@ -1599,7 +1599,7 @@ config LATENCYTOP
|
||||||
depends on DEBUG_KERNEL
|
depends on DEBUG_KERNEL
|
||||||
depends on STACKTRACE_SUPPORT
|
depends on STACKTRACE_SUPPORT
|
||||||
depends on PROC_FS
|
depends on PROC_FS
|
||||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
|
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
|
||||||
select KALLSYMS
|
select KALLSYMS
|
||||||
select KALLSYMS_ALL
|
select KALLSYMS_ALL
|
||||||
select STACKTRACE
|
select STACKTRACE
|
||||||
|
|
Loading…
Reference in New Issue