Merge branch 'rework/printk_safe-removal' into for-linus
This commit is contained in:
commit
c985aafb60
|
@ -667,9 +667,9 @@ static void do_handle_IPI(int ipinr)
|
|||
break;
|
||||
|
||||
case IPI_CPU_BACKTRACE:
|
||||
printk_nmi_enter();
|
||||
printk_deferred_enter();
|
||||
nmi_cpu_backtrace(get_irq_regs());
|
||||
printk_nmi_exit();
|
||||
printk_deferred_exit();
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -171,7 +171,6 @@ extern void panic_flush_kmsg_start(void)
|
|||
|
||||
extern void panic_flush_kmsg_end(void)
|
||||
{
|
||||
printk_safe_flush_on_panic();
|
||||
kmsg_dump(KMSG_DUMP_PANIC);
|
||||
bust_spinlocks(0);
|
||||
debug_locks_off();
|
||||
|
|
|
@ -184,11 +184,6 @@ static void watchdog_smp_panic(int cpu, u64 tb)
|
|||
|
||||
wd_smp_unlock(&flags);
|
||||
|
||||
printk_safe_flush();
|
||||
/*
|
||||
* printk_safe_flush() seems to require another print
|
||||
* before anything actually goes out to console.
|
||||
*/
|
||||
if (sysctl_hardlockup_all_cpu_backtrace)
|
||||
trigger_allbutself_cpu_backtrace();
|
||||
|
||||
|
|
|
@ -313,7 +313,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
|
|||
int (*old_handler)(struct pt_regs *regs);
|
||||
|
||||
/* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
|
||||
printk_nmi_enter();
|
||||
printk_deferred_enter();
|
||||
|
||||
/*
|
||||
* This function is only called after the system
|
||||
|
|
|
@ -116,7 +116,6 @@ extern void rcu_nmi_exit(void);
|
|||
do { \
|
||||
lockdep_off(); \
|
||||
arch_nmi_enter(); \
|
||||
printk_nmi_enter(); \
|
||||
BUG_ON(in_nmi() == NMI_MASK); \
|
||||
__preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
|
||||
} while (0)
|
||||
|
@ -135,7 +134,6 @@ extern void rcu_nmi_exit(void);
|
|||
do { \
|
||||
BUG_ON(!in_nmi()); \
|
||||
__preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
|
||||
printk_nmi_exit(); \
|
||||
arch_nmi_exit(); \
|
||||
lockdep_on(); \
|
||||
} while (0)
|
||||
|
|
|
@ -141,18 +141,6 @@ static inline __printf(1, 2) __cold
|
|||
void early_printk(const char *s, ...) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
extern void printk_nmi_enter(void);
|
||||
extern void printk_nmi_exit(void);
|
||||
extern void printk_nmi_direct_enter(void);
|
||||
extern void printk_nmi_direct_exit(void);
|
||||
#else
|
||||
static inline void printk_nmi_enter(void) { }
|
||||
static inline void printk_nmi_exit(void) { }
|
||||
static inline void printk_nmi_direct_enter(void) { }
|
||||
static inline void printk_nmi_direct_exit(void) { }
|
||||
#endif /* PRINTK_NMI */
|
||||
|
||||
struct dev_printk_info;
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
|
@ -172,6 +160,16 @@ int _printk(const char *fmt, ...);
|
|||
*/
|
||||
__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
|
||||
|
||||
extern void __printk_safe_enter(void);
|
||||
extern void __printk_safe_exit(void);
|
||||
/*
|
||||
* The printk_deferred_enter/exit macros are available only as a hack for
|
||||
* some code paths that need to defer all printk console printing. Interrupts
|
||||
* must be disabled for the deferred duration.
|
||||
*/
|
||||
#define printk_deferred_enter __printk_safe_enter
|
||||
#define printk_deferred_exit __printk_safe_exit
|
||||
|
||||
/*
|
||||
* Please don't use printk_ratelimit(), because it shares ratelimiting state
|
||||
* with all other unrelated printk_ratelimit() callsites. Instead use
|
||||
|
@ -200,8 +198,6 @@ void dump_stack_print_info(const char *log_lvl);
|
|||
void show_regs_print_info(const char *log_lvl);
|
||||
extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
|
||||
extern asmlinkage void dump_stack(void) __cold;
|
||||
extern void printk_safe_flush(void);
|
||||
extern void printk_safe_flush_on_panic(void);
|
||||
#else
|
||||
static inline __printf(1, 0)
|
||||
int vprintk(const char *s, va_list args)
|
||||
|
@ -218,6 +214,15 @@ int _printk_deferred(const char *s, ...)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void printk_deferred_enter(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void printk_deferred_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int printk_ratelimit(void)
|
||||
{
|
||||
return 0;
|
||||
|
@ -269,14 +274,6 @@ static inline void dump_stack_lvl(const char *log_lvl)
|
|||
static inline void dump_stack(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void printk_safe_flush(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void printk_safe_flush_on_panic(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -1523,11 +1523,6 @@ config PRINTK
|
|||
very difficult to diagnose system problems, saying N here is
|
||||
strongly discouraged.
|
||||
|
||||
config PRINTK_NMI
|
||||
def_bool y
|
||||
depends on PRINTK
|
||||
depends on HAVE_NMI
|
||||
|
||||
config BUG
|
||||
bool "BUG() support" if EXPERT
|
||||
default y
|
||||
|
|
|
@ -979,7 +979,6 @@ void crash_kexec(struct pt_regs *regs)
|
|||
old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
|
||||
if (old_cpu == PANIC_CPU_INVALID) {
|
||||
/* This is the 1st CPU which comes here, so go ahead. */
|
||||
printk_safe_flush_on_panic();
|
||||
__crash_kexec(regs);
|
||||
|
||||
/*
|
||||
|
|
|
@ -248,7 +248,6 @@ void panic(const char *fmt, ...)
|
|||
* Bypass the panic_cpu check and call __crash_kexec directly.
|
||||
*/
|
||||
if (!_crash_kexec_post_notifiers) {
|
||||
printk_safe_flush_on_panic();
|
||||
__crash_kexec(NULL);
|
||||
|
||||
/*
|
||||
|
@ -272,8 +271,6 @@ void panic(const char *fmt, ...)
|
|||
*/
|
||||
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
|
||||
|
||||
/* Call flush even twice. It tries harder with a single online CPU */
|
||||
printk_safe_flush_on_panic();
|
||||
kmsg_dump(KMSG_DUMP_PANIC);
|
||||
|
||||
/*
|
||||
|
|
|
@ -6,12 +6,6 @@
|
|||
|
||||
#ifdef CONFIG_PRINTK
|
||||
|
||||
#define PRINTK_SAFE_CONTEXT_MASK 0x007ffffff
|
||||
#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x008000000
|
||||
#define PRINTK_NMI_CONTEXT_MASK 0xff0000000
|
||||
|
||||
#define PRINTK_NMI_CONTEXT_OFFSET 0x010000000
|
||||
|
||||
/* Flags for a single printk record. */
|
||||
enum printk_info_flags {
|
||||
LOG_NEWLINE = 2, /* text ended with a newline */
|
||||
|
@ -25,10 +19,7 @@ int vprintk_store(int facility, int level,
|
|||
|
||||
__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
|
||||
__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
|
||||
void __printk_safe_enter(void);
|
||||
void __printk_safe_exit(void);
|
||||
|
||||
void printk_safe_init(void);
|
||||
bool printk_percpu_data_ready(void);
|
||||
|
||||
#define printk_safe_enter_irqsave(flags) \
|
||||
|
@ -43,18 +34,6 @@ bool printk_percpu_data_ready(void);
|
|||
local_irq_restore(flags); \
|
||||
} while (0)
|
||||
|
||||
#define printk_safe_enter_irq() \
|
||||
do { \
|
||||
local_irq_disable(); \
|
||||
__printk_safe_enter(); \
|
||||
} while (0)
|
||||
|
||||
#define printk_safe_exit_irq() \
|
||||
do { \
|
||||
__printk_safe_exit(); \
|
||||
local_irq_enable(); \
|
||||
} while (0)
|
||||
|
||||
void defer_console_output(void);
|
||||
|
||||
u16 printk_parse_prefix(const char *text, int *level,
|
||||
|
@ -69,9 +48,5 @@ u16 printk_parse_prefix(const char *text, int *level,
|
|||
#define printk_safe_enter_irqsave(flags) local_irq_save(flags)
|
||||
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
|
||||
|
||||
#define printk_safe_enter_irq() local_irq_disable()
|
||||
#define printk_safe_exit_irq() local_irq_enable()
|
||||
|
||||
static inline void printk_safe_init(void) { }
|
||||
static inline bool printk_percpu_data_ready(void) { return false; }
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
|
|
@ -351,7 +351,7 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
|
|||
*/
|
||||
|
||||
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
|
||||
static DEFINE_RAW_SPINLOCK(syslog_lock);
|
||||
static DEFINE_MUTEX(syslog_lock);
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
DECLARE_WAIT_QUEUE_HEAD(log_wait);
|
||||
|
@ -727,27 +727,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
printk_safe_enter_irq();
|
||||
if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) {
|
||||
if (file->f_flags & O_NONBLOCK) {
|
||||
ret = -EAGAIN;
|
||||
printk_safe_exit_irq();
|
||||
goto out;
|
||||
}
|
||||
|
||||
printk_safe_exit_irq();
|
||||
ret = wait_event_interruptible(log_wait,
|
||||
prb_read_valid(prb, atomic64_read(&user->seq), r));
|
||||
if (ret)
|
||||
goto out;
|
||||
printk_safe_enter_irq();
|
||||
}
|
||||
|
||||
if (r->info->seq != atomic64_read(&user->seq)) {
|
||||
/* our last seen message is gone, return error and reset */
|
||||
atomic64_set(&user->seq, r->info->seq);
|
||||
ret = -EPIPE;
|
||||
printk_safe_exit_irq();
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -757,7 +752,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
|
|||
&r->info->dev_info);
|
||||
|
||||
atomic64_set(&user->seq, r->info->seq + 1);
|
||||
printk_safe_exit_irq();
|
||||
|
||||
if (len > count) {
|
||||
ret = -EINVAL;
|
||||
|
@ -792,7 +786,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
|
|||
if (offset)
|
||||
return -ESPIPE;
|
||||
|
||||
printk_safe_enter_irq();
|
||||
switch (whence) {
|
||||
case SEEK_SET:
|
||||
/* the first record */
|
||||
|
@ -813,7 +806,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
|
|||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
printk_safe_exit_irq();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -828,7 +820,6 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
|
|||
|
||||
poll_wait(file, &log_wait, wait);
|
||||
|
||||
printk_safe_enter_irq();
|
||||
if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
|
||||
/* return error when data has vanished underneath us */
|
||||
if (info.seq != atomic64_read(&user->seq))
|
||||
|
@ -836,7 +827,6 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
|
|||
else
|
||||
ret = EPOLLIN|EPOLLRDNORM;
|
||||
}
|
||||
printk_safe_exit_irq();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -869,9 +859,7 @@ static int devkmsg_open(struct inode *inode, struct file *file)
|
|||
prb_rec_init_rd(&user->record, &user->info,
|
||||
&user->text_buf[0], sizeof(user->text_buf));
|
||||
|
||||
printk_safe_enter_irq();
|
||||
atomic64_set(&user->seq, prb_first_valid_seq(prb));
|
||||
printk_safe_exit_irq();
|
||||
|
||||
file->private_data = user;
|
||||
return 0;
|
||||
|
@ -1037,9 +1025,6 @@ static inline void log_buf_add_cpu(void) {}
|
|||
|
||||
static void __init set_percpu_data_ready(void)
|
||||
{
|
||||
printk_safe_init();
|
||||
/* Make sure we set this flag only after printk_safe() init is done */
|
||||
barrier();
|
||||
__printk_percpu_data_ready = true;
|
||||
}
|
||||
|
||||
|
@ -1077,6 +1062,7 @@ void __init setup_log_buf(int early)
|
|||
struct prb_desc *new_descs;
|
||||
struct printk_info info;
|
||||
struct printk_record r;
|
||||
unsigned int text_size;
|
||||
size_t new_descs_size;
|
||||
size_t new_infos_size;
|
||||
unsigned long flags;
|
||||
|
@ -1137,24 +1123,37 @@ void __init setup_log_buf(int early)
|
|||
new_descs, ilog2(new_descs_count),
|
||||
new_infos);
|
||||
|
||||
printk_safe_enter_irqsave(flags);
|
||||
local_irq_save(flags);
|
||||
|
||||
log_buf_len = new_log_buf_len;
|
||||
log_buf = new_log_buf;
|
||||
new_log_buf_len = 0;
|
||||
|
||||
free = __LOG_BUF_LEN;
|
||||
prb_for_each_record(0, &printk_rb_static, seq, &r)
|
||||
free -= add_to_rb(&printk_rb_dynamic, &r);
|
||||
prb_for_each_record(0, &printk_rb_static, seq, &r) {
|
||||
text_size = add_to_rb(&printk_rb_dynamic, &r);
|
||||
if (text_size > free)
|
||||
free = 0;
|
||||
else
|
||||
free -= text_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is early enough that everything is still running on the
|
||||
* boot CPU and interrupts are disabled. So no new messages will
|
||||
* appear during the transition to the dynamic buffer.
|
||||
*/
|
||||
prb = &printk_rb_dynamic;
|
||||
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* Copy any remaining messages that might have appeared from
|
||||
* NMI context after copying but before switching to the
|
||||
* dynamic buffer.
|
||||
*/
|
||||
prb_for_each_record(seq, &printk_rb_static, seq, &r) {
|
||||
text_size = add_to_rb(&printk_rb_dynamic, &r);
|
||||
if (text_size > free)
|
||||
free = 0;
|
||||
else
|
||||
free -= text_size;
|
||||
}
|
||||
|
||||
if (seq != prb_next_seq(&printk_rb_static)) {
|
||||
pr_err("dropped %llu messages\n",
|
||||
|
@ -1476,12 +1475,14 @@ static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
|
|||
return seq;
|
||||
}
|
||||
|
||||
/* The caller is responsible for making sure @size is greater than 0. */
|
||||
static int syslog_print(char __user *buf, int size)
|
||||
{
|
||||
struct printk_info info;
|
||||
struct printk_record r;
|
||||
char *text;
|
||||
int len = 0;
|
||||
u64 seq;
|
||||
|
||||
text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
|
||||
if (!text)
|
||||
|
@ -1489,17 +1490,35 @@ static int syslog_print(char __user *buf, int size)
|
|||
|
||||
prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
|
||||
|
||||
while (size > 0) {
|
||||
mutex_lock(&syslog_lock);
|
||||
|
||||
/*
|
||||
* Wait for the @syslog_seq record to be available. @syslog_seq may
|
||||
* change while waiting.
|
||||
*/
|
||||
do {
|
||||
seq = syslog_seq;
|
||||
|
||||
mutex_unlock(&syslog_lock);
|
||||
len = wait_event_interruptible(log_wait, prb_read_valid(prb, seq, NULL));
|
||||
mutex_lock(&syslog_lock);
|
||||
|
||||
if (len)
|
||||
goto out;
|
||||
} while (syslog_seq != seq);
|
||||
|
||||
/*
|
||||
* Copy records that fit into the buffer. The above cycle makes sure
|
||||
* that the first record is always available.
|
||||
*/
|
||||
do {
|
||||
size_t n;
|
||||
size_t skip;
|
||||
int err;
|
||||
|
||||
printk_safe_enter_irq();
|
||||
raw_spin_lock(&syslog_lock);
|
||||
if (!prb_read_valid(prb, syslog_seq, &r)) {
|
||||
raw_spin_unlock(&syslog_lock);
|
||||
printk_safe_exit_irq();
|
||||
if (!prb_read_valid(prb, syslog_seq, &r))
|
||||
break;
|
||||
}
|
||||
|
||||
if (r.info->seq != syslog_seq) {
|
||||
/* message is gone, move to next valid one */
|
||||
syslog_seq = r.info->seq;
|
||||
|
@ -1526,13 +1545,15 @@ static int syslog_print(char __user *buf, int size)
|
|||
syslog_partial += n;
|
||||
} else
|
||||
n = 0;
|
||||
raw_spin_unlock(&syslog_lock);
|
||||
printk_safe_exit_irq();
|
||||
|
||||
if (!n)
|
||||
break;
|
||||
|
||||
if (copy_to_user(buf, text + skip, n)) {
|
||||
mutex_unlock(&syslog_lock);
|
||||
err = copy_to_user(buf, text + skip, n);
|
||||
mutex_lock(&syslog_lock);
|
||||
|
||||
if (err) {
|
||||
if (!len)
|
||||
len = -EFAULT;
|
||||
break;
|
||||
|
@ -1541,8 +1562,9 @@ static int syslog_print(char __user *buf, int size)
|
|||
len += n;
|
||||
size -= n;
|
||||
buf += n;
|
||||
}
|
||||
|
||||
} while (size);
|
||||
out:
|
||||
mutex_unlock(&syslog_lock);
|
||||
kfree(text);
|
||||
return len;
|
||||
}
|
||||
|
@ -1561,7 +1583,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
|
|||
return -ENOMEM;
|
||||
|
||||
time = printk_time;
|
||||
printk_safe_enter_irq();
|
||||
/*
|
||||
* Find first record that fits, including all following records,
|
||||
* into the user-provided buffer for this dump.
|
||||
|
@ -1582,23 +1603,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
|
|||
break;
|
||||
}
|
||||
|
||||
printk_safe_exit_irq();
|
||||
if (copy_to_user(buf + len, text, textlen))
|
||||
len = -EFAULT;
|
||||
else
|
||||
len += textlen;
|
||||
printk_safe_enter_irq();
|
||||
|
||||
if (len < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (clear) {
|
||||
raw_spin_lock(&syslog_lock);
|
||||
mutex_lock(&syslog_lock);
|
||||
latched_seq_write(&clear_seq, seq);
|
||||
raw_spin_unlock(&syslog_lock);
|
||||
mutex_unlock(&syslog_lock);
|
||||
}
|
||||
printk_safe_exit_irq();
|
||||
|
||||
kfree(text);
|
||||
return len;
|
||||
|
@ -1606,23 +1624,9 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
|
|||
|
||||
static void syslog_clear(void)
|
||||
{
|
||||
printk_safe_enter_irq();
|
||||
raw_spin_lock(&syslog_lock);
|
||||
mutex_lock(&syslog_lock);
|
||||
latched_seq_write(&clear_seq, prb_next_seq(prb));
|
||||
raw_spin_unlock(&syslog_lock);
|
||||
printk_safe_exit_irq();
|
||||
}
|
||||
|
||||
/* Return a consistent copy of @syslog_seq. */
|
||||
static u64 read_syslog_seq_irq(void)
|
||||
{
|
||||
u64 seq;
|
||||
|
||||
raw_spin_lock_irq(&syslog_lock);
|
||||
seq = syslog_seq;
|
||||
raw_spin_unlock_irq(&syslog_lock);
|
||||
|
||||
return seq;
|
||||
mutex_unlock(&syslog_lock);
|
||||
}
|
||||
|
||||
int do_syslog(int type, char __user *buf, int len, int source)
|
||||
|
@ -1648,11 +1652,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
|
|||
return 0;
|
||||
if (!access_ok(buf, len))
|
||||
return -EFAULT;
|
||||
|
||||
error = wait_event_interruptible(log_wait,
|
||||
prb_read_valid(prb, read_syslog_seq_irq(), NULL));
|
||||
if (error)
|
||||
return error;
|
||||
error = syslog_print(buf, len);
|
||||
break;
|
||||
/* Read/clear last kernel messages */
|
||||
|
@ -1698,12 +1697,10 @@ int do_syslog(int type, char __user *buf, int len, int source)
|
|||
break;
|
||||
/* Number of chars in the log buffer */
|
||||
case SYSLOG_ACTION_SIZE_UNREAD:
|
||||
printk_safe_enter_irq();
|
||||
raw_spin_lock(&syslog_lock);
|
||||
mutex_lock(&syslog_lock);
|
||||
if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
|
||||
/* No unread messages. */
|
||||
raw_spin_unlock(&syslog_lock);
|
||||
printk_safe_exit_irq();
|
||||
mutex_unlock(&syslog_lock);
|
||||
return 0;
|
||||
}
|
||||
if (info.seq != syslog_seq) {
|
||||
|
@ -1731,8 +1728,7 @@ int do_syslog(int type, char __user *buf, int len, int source)
|
|||
}
|
||||
error -= syslog_partial;
|
||||
}
|
||||
raw_spin_unlock(&syslog_lock);
|
||||
printk_safe_exit_irq();
|
||||
mutex_unlock(&syslog_lock);
|
||||
break;
|
||||
/* Size of the log buffer */
|
||||
case SYSLOG_ACTION_SIZE_BUFFER:
|
||||
|
@ -1935,6 +1931,76 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Recursion is tracked separately on each CPU. If NMIs are supported, an
|
||||
* additional NMI context per CPU is also separately tracked. Until per-CPU
|
||||
* is available, a separate "early tracking" is performed.
|
||||
*/
|
||||
static DEFINE_PER_CPU(u8, printk_count);
|
||||
static u8 printk_count_early;
|
||||
#ifdef CONFIG_HAVE_NMI
|
||||
static DEFINE_PER_CPU(u8, printk_count_nmi);
|
||||
static u8 printk_count_nmi_early;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Recursion is limited to keep the output sane. printk() should not require
|
||||
* more than 1 level of recursion (allowing, for example, printk() to trigger
|
||||
* a WARN), but a higher value is used in case some printk-internal errors
|
||||
* exist, such as the ringbuffer validation checks failing.
|
||||
*/
|
||||
#define PRINTK_MAX_RECURSION 3
|
||||
|
||||
/*
|
||||
* Return a pointer to the dedicated counter for the CPU+context of the
|
||||
* caller.
|
||||
*/
|
||||
static u8 *__printk_recursion_counter(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_NMI
|
||||
if (in_nmi()) {
|
||||
if (printk_percpu_data_ready())
|
||||
return this_cpu_ptr(&printk_count_nmi);
|
||||
return &printk_count_nmi_early;
|
||||
}
|
||||
#endif
|
||||
if (printk_percpu_data_ready())
|
||||
return this_cpu_ptr(&printk_count);
|
||||
return &printk_count_early;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter recursion tracking. Interrupts are disabled to simplify tracking.
|
||||
* The caller must check the boolean return value to see if the recursion is
|
||||
* allowed. On failure, interrupts are not disabled.
|
||||
*
|
||||
* @recursion_ptr must be a variable of type (u8 *) and is the same variable
|
||||
* that is passed to printk_exit_irqrestore().
|
||||
*/
|
||||
#define printk_enter_irqsave(recursion_ptr, flags) \
|
||||
({ \
|
||||
bool success = true; \
|
||||
\
|
||||
typecheck(u8 *, recursion_ptr); \
|
||||
local_irq_save(flags); \
|
||||
(recursion_ptr) = __printk_recursion_counter(); \
|
||||
if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
|
||||
local_irq_restore(flags); \
|
||||
success = false; \
|
||||
} else { \
|
||||
(*(recursion_ptr))++; \
|
||||
} \
|
||||
success; \
|
||||
})
|
||||
|
||||
/* Exit recursion tracking, restoring interrupts. */
|
||||
#define printk_exit_irqrestore(recursion_ptr, flags) \
|
||||
do { \
|
||||
typecheck(u8 *, recursion_ptr); \
|
||||
(*(recursion_ptr))--; \
|
||||
local_irq_restore(flags); \
|
||||
} while (0)
|
||||
|
||||
int printk_delay_msec __read_mostly;
|
||||
|
||||
static inline void printk_delay(void)
|
||||
|
@ -2037,11 +2103,14 @@ int vprintk_store(int facility, int level,
|
|||
struct prb_reserved_entry e;
|
||||
enum printk_info_flags flags = 0;
|
||||
struct printk_record r;
|
||||
unsigned long irqflags;
|
||||
u16 trunc_msg_len = 0;
|
||||
char prefix_buf[8];
|
||||
u8 *recursion_ptr;
|
||||
u16 reserve_size;
|
||||
va_list args2;
|
||||
u16 text_len;
|
||||
int ret = 0;
|
||||
u64 ts_nsec;
|
||||
|
||||
/*
|
||||
|
@ -2052,6 +2121,9 @@ int vprintk_store(int facility, int level,
|
|||
*/
|
||||
ts_nsec = local_clock();
|
||||
|
||||
if (!printk_enter_irqsave(recursion_ptr, irqflags))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The sprintf needs to come first since the syslog prefix might be
|
||||
* passed in as a parameter. An extra byte must be reserved so that
|
||||
|
@ -2089,7 +2161,8 @@ int vprintk_store(int facility, int level,
|
|||
prb_commit(&e);
|
||||
}
|
||||
|
||||
return text_len;
|
||||
ret = text_len;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2105,7 +2178,7 @@ int vprintk_store(int facility, int level,
|
|||
|
||||
prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
|
||||
if (!prb_reserve(&e, prb, &r))
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* fill message */
|
||||
|
@ -2127,7 +2200,10 @@ int vprintk_store(int facility, int level,
|
|||
else
|
||||
prb_final_commit(&e);
|
||||
|
||||
return (text_len + trunc_msg_len);
|
||||
ret = text_len + trunc_msg_len;
|
||||
out:
|
||||
printk_exit_irqrestore(recursion_ptr, irqflags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage int vprintk_emit(int facility, int level,
|
||||
|
@ -2136,7 +2212,6 @@ asmlinkage int vprintk_emit(int facility, int level,
|
|||
{
|
||||
int printed_len;
|
||||
bool in_sched = false;
|
||||
unsigned long flags;
|
||||
|
||||
/* Suppress unimportant messages after panic happens */
|
||||
if (unlikely(suppress_printk))
|
||||
|
@ -2150,9 +2225,7 @@ asmlinkage int vprintk_emit(int facility, int level,
|
|||
boot_delay_msec(level);
|
||||
printk_delay();
|
||||
|
||||
printk_safe_enter_irqsave(flags);
|
||||
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
|
||||
/* If called from the scheduler, we can not call up(). */
|
||||
if (!in_sched) {
|
||||
|
@ -2573,9 +2646,9 @@ again:
|
|||
|
||||
for (;;) {
|
||||
size_t ext_len = 0;
|
||||
int handover;
|
||||
size_t len;
|
||||
|
||||
printk_safe_enter_irqsave(flags);
|
||||
skip:
|
||||
if (!prb_read_valid(prb, console_seq, &r))
|
||||
break;
|
||||
|
@ -2625,19 +2698,22 @@ skip:
|
|||
* were to occur on another CPU, it may wait for this one to
|
||||
* finish. This task can not be preempted if there is a
|
||||
* waiter waiting to take over.
|
||||
*
|
||||
* Interrupts are disabled because the hand over to a waiter
|
||||
* must not be interrupted until the hand over is completed
|
||||
* (@console_waiter is cleared).
|
||||
*/
|
||||
printk_safe_enter_irqsave(flags);
|
||||
console_lock_spinning_enable();
|
||||
|
||||
stop_critical_timings(); /* don't trace print latency */
|
||||
call_console_drivers(ext_text, ext_len, text, len);
|
||||
start_critical_timings();
|
||||
|
||||
if (console_lock_spinning_disable_and_check()) {
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
return;
|
||||
}
|
||||
|
||||
handover = console_lock_spinning_disable_and_check();
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
if (handover)
|
||||
return;
|
||||
|
||||
if (do_cond_resched)
|
||||
cond_resched();
|
||||
|
@ -2656,8 +2732,6 @@ skip:
|
|||
* flush, no worries.
|
||||
*/
|
||||
retry = prb_read_valid(prb, next_seq, NULL);
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
|
||||
if (retry && console_trylock())
|
||||
goto again;
|
||||
}
|
||||
|
@ -2719,13 +2793,8 @@ void console_flush_on_panic(enum con_flush_mode mode)
|
|||
console_trylock();
|
||||
console_may_schedule = 0;
|
||||
|
||||
if (mode == CONSOLE_REPLAY_ALL) {
|
||||
unsigned long flags;
|
||||
|
||||
printk_safe_enter_irqsave(flags);
|
||||
if (mode == CONSOLE_REPLAY_ALL)
|
||||
console_seq = prb_first_valid_seq(prb);
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
}
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
|
@ -2860,7 +2929,6 @@ static int try_enable_new_console(struct console *newcon, bool user_specified)
|
|||
*/
|
||||
void register_console(struct console *newcon)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct console *bcon = NULL;
|
||||
int err;
|
||||
|
||||
|
@ -2965,9 +3033,9 @@ void register_console(struct console *newcon)
|
|||
exclusive_console_stop_seq = console_seq;
|
||||
|
||||
/* Get a consistent copy of @syslog_seq. */
|
||||
raw_spin_lock_irqsave(&syslog_lock, flags);
|
||||
mutex_lock(&syslog_lock);
|
||||
console_seq = syslog_seq;
|
||||
raw_spin_unlock_irqrestore(&syslog_lock, flags);
|
||||
mutex_unlock(&syslog_lock);
|
||||
}
|
||||
console_unlock();
|
||||
console_sysfs_notify();
|
||||
|
@ -3377,14 +3445,12 @@ bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
|
|||
struct printk_info info;
|
||||
unsigned int line_count;
|
||||
struct printk_record r;
|
||||
unsigned long flags;
|
||||
size_t l = 0;
|
||||
bool ret = false;
|
||||
|
||||
if (iter->cur_seq < min_seq)
|
||||
iter->cur_seq = min_seq;
|
||||
|
||||
printk_safe_enter_irqsave(flags);
|
||||
prb_rec_init_rd(&r, &info, line, size);
|
||||
|
||||
/* Read text or count text lines? */
|
||||
|
@ -3405,7 +3471,6 @@ bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
|
|||
iter->cur_seq = r.info->seq + 1;
|
||||
ret = true;
|
||||
out:
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
if (len)
|
||||
*len = l;
|
||||
return ret;
|
||||
|
@ -3437,7 +3502,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
|
|||
u64 min_seq = latched_seq_read_nolock(&clear_seq);
|
||||
struct printk_info info;
|
||||
struct printk_record r;
|
||||
unsigned long flags;
|
||||
u64 seq;
|
||||
u64 next_seq;
|
||||
size_t len = 0;
|
||||
|
@ -3450,7 +3514,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
|
|||
if (iter->cur_seq < min_seq)
|
||||
iter->cur_seq = min_seq;
|
||||
|
||||
printk_safe_enter_irqsave(flags);
|
||||
if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
|
||||
if (info.seq != iter->cur_seq) {
|
||||
/* messages are gone, move to first available one */
|
||||
|
@ -3459,10 +3522,8 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
|
|||
}
|
||||
|
||||
/* last entry */
|
||||
if (iter->cur_seq >= iter->next_seq) {
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
if (iter->cur_seq >= iter->next_seq)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find first record that fits, including all following records,
|
||||
|
@ -3494,7 +3555,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
|
|||
|
||||
iter->next_seq = next_seq;
|
||||
ret = true;
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
out:
|
||||
if (len_out)
|
||||
*len_out = len;
|
||||
|
@ -3512,12 +3572,8 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
|
|||
*/
|
||||
void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
printk_safe_enter_irqsave(flags);
|
||||
iter->cur_seq = latched_seq_read_nolock(&clear_seq);
|
||||
iter->next_seq = prb_next_seq(prb);
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
|
||||
|
||||
|
|
|
@ -4,347 +4,16 @@
|
|||
*/
|
||||
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/kdb.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* In NMI and safe mode, printk() avoids taking locks. Instead,
|
||||
* it uses an alternative implementation that temporary stores
|
||||
* the strings into a per-CPU buffer. The content of the buffer
|
||||
* is later flushed into the main ring buffer via IRQ work.
|
||||
*
|
||||
* The alternative implementation is chosen transparently
|
||||
* by examining current printk() context mask stored in @printk_context
|
||||
* per-CPU variable.
|
||||
*
|
||||
* The implementation allows to flush the strings also from another CPU.
|
||||
* There are situations when we want to make sure that all buffers
|
||||
* were handled or when IRQs are blocked.
|
||||
*/
|
||||
|
||||
#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
|
||||
sizeof(atomic_t) - \
|
||||
sizeof(atomic_t) - \
|
||||
sizeof(struct irq_work))
|
||||
|
||||
struct printk_safe_seq_buf {
|
||||
atomic_t len; /* length of written data */
|
||||
atomic_t message_lost;
|
||||
struct irq_work work; /* IRQ work that flushes the buffer */
|
||||
unsigned char buffer[SAFE_LOG_BUF_LEN];
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
|
||||
static DEFINE_PER_CPU(int, printk_context);
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(safe_read_lock);
|
||||
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
|
||||
#endif
|
||||
|
||||
/* Get flushed in a more safe context. */
|
||||
static void queue_flush_work(struct printk_safe_seq_buf *s)
|
||||
{
|
||||
if (printk_percpu_data_ready())
|
||||
irq_work_queue(&s->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a message to per-CPU context-dependent buffer. NMI and printk-safe
|
||||
* have dedicated buffers, because otherwise printk-safe preempted by
|
||||
* NMI-printk would have overwritten the NMI messages.
|
||||
*
|
||||
* The messages are flushed from irq work (or from panic()), possibly,
|
||||
* from other CPU, concurrently with printk_safe_log_store(). Should this
|
||||
* happen, printk_safe_log_store() will notice the buffer->len mismatch
|
||||
* and repeat the write.
|
||||
*/
|
||||
static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
|
||||
const char *fmt, va_list args)
|
||||
{
|
||||
int add;
|
||||
size_t len;
|
||||
va_list ap;
|
||||
|
||||
again:
|
||||
len = atomic_read(&s->len);
|
||||
|
||||
/* The trailing '\0' is not counted into len. */
|
||||
if (len >= sizeof(s->buffer) - 1) {
|
||||
atomic_inc(&s->message_lost);
|
||||
queue_flush_work(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that all old data have been read before the buffer
|
||||
* was reset. This is not needed when we just append data.
|
||||
*/
|
||||
if (!len)
|
||||
smp_rmb();
|
||||
|
||||
va_copy(ap, args);
|
||||
add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
|
||||
va_end(ap);
|
||||
if (!add)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Do it once again if the buffer has been flushed in the meantime.
|
||||
* Note that atomic_cmpxchg() is an implicit memory barrier that
|
||||
* makes sure that the data were written before updating s->len.
|
||||
*/
|
||||
if (atomic_cmpxchg(&s->len, len, len + add) != len)
|
||||
goto again;
|
||||
|
||||
queue_flush_work(s);
|
||||
return add;
|
||||
}
|
||||
|
||||
static inline void printk_safe_flush_line(const char *text, int len)
|
||||
{
|
||||
/*
|
||||
* Avoid any console drivers calls from here, because we may be
|
||||
* in NMI or printk_safe context (when in panic). The messages
|
||||
* must go only into the ring buffer at this stage. Consoles will
|
||||
* get explicitly called later when a crashdump is not generated.
|
||||
*/
|
||||
printk_deferred("%.*s", len, text);
|
||||
}
|
||||
|
||||
/* printk part of the temporary buffer line by line */
|
||||
static int printk_safe_flush_buffer(const char *start, size_t len)
|
||||
{
|
||||
const char *c, *end;
|
||||
bool header;
|
||||
|
||||
c = start;
|
||||
end = start + len;
|
||||
header = true;
|
||||
|
||||
/* Print line by line. */
|
||||
while (c < end) {
|
||||
if (*c == '\n') {
|
||||
printk_safe_flush_line(start, c - start + 1);
|
||||
start = ++c;
|
||||
header = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Handle continuous lines or missing new line. */
|
||||
if ((c + 1 < end) && printk_get_level(c)) {
|
||||
if (header) {
|
||||
c = printk_skip_level(c);
|
||||
continue;
|
||||
}
|
||||
|
||||
printk_safe_flush_line(start, c - start);
|
||||
start = c++;
|
||||
header = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
header = false;
|
||||
c++;
|
||||
}
|
||||
|
||||
/* Check if there was a partial line. Ignore pure header. */
|
||||
if (start < end && !header) {
|
||||
static const char newline[] = KERN_CONT "\n";
|
||||
|
||||
printk_safe_flush_line(start, end - start);
|
||||
printk_safe_flush_line(newline, strlen(newline));
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void report_message_lost(struct printk_safe_seq_buf *s)
|
||||
{
|
||||
int lost = atomic_xchg(&s->message_lost, 0);
|
||||
|
||||
if (lost)
|
||||
printk_deferred("Lost %d message(s)!\n", lost);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush data from the associated per-CPU buffer. The function
|
||||
* can be called either via IRQ work or independently.
|
||||
*/
|
||||
static void __printk_safe_flush(struct irq_work *work)
|
||||
{
|
||||
struct printk_safe_seq_buf *s =
|
||||
container_of(work, struct printk_safe_seq_buf, work);
|
||||
unsigned long flags;
|
||||
size_t len;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* The lock has two functions. First, one reader has to flush all
|
||||
* available message to make the lockless synchronization with
|
||||
* writers easier. Second, we do not want to mix messages from
|
||||
* different CPUs. This is especially important when printing
|
||||
* a backtrace.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&safe_read_lock, flags);
|
||||
|
||||
i = 0;
|
||||
more:
|
||||
len = atomic_read(&s->len);
|
||||
|
||||
/*
|
||||
* This is just a paranoid check that nobody has manipulated
|
||||
* the buffer an unexpected way. If we printed something then
|
||||
* @len must only increase. Also it should never overflow the
|
||||
* buffer size.
|
||||
*/
|
||||
if ((i && i >= len) || len > sizeof(s->buffer)) {
|
||||
const char *msg = "printk_safe_flush: internal error\n";
|
||||
|
||||
printk_safe_flush_line(msg, strlen(msg));
|
||||
len = 0;
|
||||
}
|
||||
|
||||
if (!len)
|
||||
goto out; /* Someone else has already flushed the buffer. */
|
||||
|
||||
/* Make sure that data has been written up to the @len */
|
||||
smp_rmb();
|
||||
i += printk_safe_flush_buffer(s->buffer + i, len - i);
|
||||
|
||||
/*
|
||||
* Check that nothing has got added in the meantime and truncate
|
||||
* the buffer. Note that atomic_cmpxchg() is an implicit memory
|
||||
* barrier that makes sure that the data were copied before
|
||||
* updating s->len.
|
||||
*/
|
||||
if (atomic_cmpxchg(&s->len, len, 0) != len)
|
||||
goto more;
|
||||
|
||||
out:
|
||||
report_message_lost(s);
|
||||
raw_spin_unlock_irqrestore(&safe_read_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* printk_safe_flush - flush all per-cpu nmi buffers.
|
||||
*
|
||||
* The buffers are flushed automatically via IRQ work. This function
|
||||
* is useful only when someone wants to be sure that all buffers have
|
||||
* been flushed at some point.
|
||||
*/
|
||||
void printk_safe_flush(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
|
||||
#endif
|
||||
__printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
|
||||
* goes down.
|
||||
*
|
||||
* Similar to printk_safe_flush() but it can be called even in NMI context when
|
||||
* the system goes down. It does the best effort to get NMI messages into
|
||||
* the main ring buffer.
|
||||
*
|
||||
* Note that it could try harder when there is only one CPU online.
|
||||
*/
|
||||
void printk_safe_flush_on_panic(void)
|
||||
{
|
||||
/*
|
||||
* Make sure that we could access the safe buffers.
|
||||
* Do not risk a double release when more CPUs are up.
|
||||
*/
|
||||
if (raw_spin_is_locked(&safe_read_lock)) {
|
||||
if (num_online_cpus() > 1)
|
||||
return;
|
||||
|
||||
debug_locks_off();
|
||||
raw_spin_lock_init(&safe_read_lock);
|
||||
}
|
||||
|
||||
printk_safe_flush();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
/*
|
||||
* Safe printk() for NMI context. It uses a per-CPU buffer to
|
||||
* store the message. NMIs are not nested, so there is always only
|
||||
* one writer running. But the buffer might get flushed from another
|
||||
* CPU, so we need to be careful.
|
||||
*/
|
||||
static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
|
||||
{
|
||||
struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
|
||||
|
||||
return printk_safe_log_store(s, fmt, args);
|
||||
}
|
||||
|
||||
void noinstr printk_nmi_enter(void)
|
||||
{
|
||||
this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
|
||||
}
|
||||
|
||||
void noinstr printk_nmi_exit(void)
|
||||
{
|
||||
this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
|
||||
}
|
||||
|
||||
/*
|
||||
* Marks a code that might produce many messages in NMI context
|
||||
* and the risk of losing them is more critical than eventual
|
||||
* reordering.
|
||||
*
|
||||
* It has effect only when called in NMI context. Then printk()
|
||||
* will store the messages into the main logbuf directly.
|
||||
*/
|
||||
void printk_nmi_direct_enter(void)
|
||||
{
|
||||
if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
|
||||
this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
|
||||
}
|
||||
|
||||
void printk_nmi_direct_exit(void)
|
||||
{
|
||||
this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PRINTK_NMI */
|
||||
|
||||
/*
|
||||
* Lock-less printk(), to avoid deadlocks should the printk() recurse
|
||||
* into itself. It uses a per-CPU buffer to store the message, just like
|
||||
* NMI.
|
||||
*/
|
||||
static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
|
||||
{
|
||||
struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
|
||||
|
||||
return printk_safe_log_store(s, fmt, args);
|
||||
}
|
||||
|
||||
/* Can be preempted by NMI. */
|
||||
void __printk_safe_enter(void)
|
||||
{
|
||||
|
@ -369,46 +38,15 @@ asmlinkage int vprintk(const char *fmt, va_list args)
|
|||
* Use the main logbuf even in NMI. But avoid calling console
|
||||
* drivers that might have their own locks.
|
||||
*/
|
||||
if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK)) {
|
||||
unsigned long flags;
|
||||
if (this_cpu_read(printk_context) || in_nmi()) {
|
||||
int len;
|
||||
|
||||
printk_safe_enter_irqsave(flags);
|
||||
len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
defer_console_output();
|
||||
return len;
|
||||
}
|
||||
|
||||
/* Use extra buffer in NMI. */
|
||||
if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
|
||||
return vprintk_nmi(fmt, args);
|
||||
|
||||
/* Use extra buffer to prevent a recursion deadlock in safe mode. */
|
||||
if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
|
||||
return vprintk_safe(fmt, args);
|
||||
|
||||
/* No obstacles. */
|
||||
return vprintk_default(fmt, args);
|
||||
}
|
||||
EXPORT_SYMBOL(vprintk);
|
||||
|
||||
void __init printk_safe_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct printk_safe_seq_buf *s;
|
||||
|
||||
s = &per_cpu(safe_print_seq, cpu);
|
||||
init_irq_work(&s->work, __printk_safe_flush);
|
||||
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
s = &per_cpu(nmi_print_seq, cpu);
|
||||
init_irq_work(&s->work, __printk_safe_flush);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Flush pending messages that did not have scheduled IRQ works. */
|
||||
printk_safe_flush();
|
||||
}
|
||||
|
|
|
@ -9797,7 +9797,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|||
tracing_off();
|
||||
|
||||
local_irq_save(flags);
|
||||
printk_nmi_direct_enter();
|
||||
|
||||
/* Simulate the iterator */
|
||||
trace_init_global_iter(&iter);
|
||||
|
@ -9879,7 +9878,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|||
atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
|
||||
}
|
||||
atomic_dec(&dump_running);
|
||||
printk_nmi_direct_exit();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ftrace_dump);
|
||||
|
|
|
@ -75,12 +75,6 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
|
|||
touch_softlockup_watchdog();
|
||||
}
|
||||
|
||||
/*
|
||||
* Force flush any remote buffers that might be stuck in IRQ context
|
||||
* and therefore could not run their irq_work.
|
||||
*/
|
||||
printk_safe_flush();
|
||||
|
||||
clear_bit_unlock(0, &backtrace_flag);
|
||||
put_cpu();
|
||||
}
|
||||
|
@ -92,8 +86,14 @@ module_param(backtrace_idle, bool, 0644);
|
|||
bool nmi_cpu_backtrace(struct pt_regs *regs)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
|
||||
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
|
||||
/*
|
||||
* Allow nested NMI backtraces while serializing
|
||||
* against other CPUs.
|
||||
*/
|
||||
printk_cpu_lock_irqsave(flags);
|
||||
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
|
||||
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
|
||||
cpu, (void *)instruction_pointer(regs));
|
||||
|
@ -104,6 +104,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
|
|||
else
|
||||
dump_stack();
|
||||
}
|
||||
printk_cpu_unlock_irqrestore(flags);
|
||||
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
||||
return true;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue