Merge branch 'rework/kthreads' into for-linus
This commit is contained in:
commit
1c6fd59943
|
@ -578,6 +578,7 @@ void __handle_sysrq(int key, bool check_mask)
|
|||
|
||||
rcu_sysrq_start();
|
||||
rcu_read_lock();
|
||||
printk_prefer_direct_enter();
|
||||
/*
|
||||
* Raise the apparent loglevel to maximum so that the sysrq header
|
||||
* is shown to provide the user with positive feedback. We do not
|
||||
|
@ -619,6 +620,7 @@ void __handle_sysrq(int key, bool check_mask)
|
|||
pr_cont("\n");
|
||||
console_loglevel = orig_log_level;
|
||||
}
|
||||
printk_prefer_direct_exit();
|
||||
rcu_read_unlock();
|
||||
rcu_sysrq_end();
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
struct vc_data;
|
||||
struct console_font_op;
|
||||
|
@ -151,6 +152,24 @@ struct console {
|
|||
int cflag;
|
||||
uint ispeed;
|
||||
uint ospeed;
|
||||
u64 seq;
|
||||
unsigned long dropped;
|
||||
struct task_struct *thread;
|
||||
bool blocked;
|
||||
|
||||
/*
|
||||
* The per-console lock is used by printing kthreads to synchronize
|
||||
* this console with callers of console_lock(). This is necessary in
|
||||
* order to allow printing kthreads to run in parallel to each other,
|
||||
* while each safely accessing the @blocked field and synchronizing
|
||||
* against direct printing via console_lock/console_unlock.
|
||||
*
|
||||
* Note: For synchronizing against direct printing via
|
||||
* console_trylock/console_unlock, see the static global
|
||||
* variable @console_kthreads_active.
|
||||
*/
|
||||
struct mutex lock;
|
||||
|
||||
void *data;
|
||||
struct console *next;
|
||||
};
|
||||
|
|
|
@ -170,6 +170,11 @@ extern void __printk_safe_exit(void);
|
|||
#define printk_deferred_enter __printk_safe_enter
|
||||
#define printk_deferred_exit __printk_safe_exit
|
||||
|
||||
extern void printk_prefer_direct_enter(void);
|
||||
extern void printk_prefer_direct_exit(void);
|
||||
|
||||
extern bool pr_flush(int timeout_ms, bool reset_on_progress);
|
||||
|
||||
/*
|
||||
* Please don't use printk_ratelimit(), because it shares ratelimiting state
|
||||
* with all other unrelated printk_ratelimit() callsites. Instead use
|
||||
|
@ -220,6 +225,19 @@ static inline void printk_deferred_exit(void)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void printk_prefer_direct_enter(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void printk_prefer_direct_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int printk_ratelimit(void)
|
||||
{
|
||||
return 0;
|
||||
|
@ -277,46 +295,58 @@ static inline void printk_trigger_flush(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern int __printk_cpu_trylock(void);
|
||||
extern void __printk_wait_on_cpu_lock(void);
|
||||
extern void __printk_cpu_unlock(void);
|
||||
|
||||
/**
|
||||
* printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
|
||||
* lock and disable interrupts.
|
||||
* @flags: Stack-allocated storage for saving local interrupt state,
|
||||
* to be passed to printk_cpu_unlock_irqrestore().
|
||||
*
|
||||
* If the lock is owned by another CPU, spin until it becomes available.
|
||||
* Interrupts are restored while spinning.
|
||||
*/
|
||||
#define printk_cpu_lock_irqsave(flags) \
|
||||
for (;;) { \
|
||||
local_irq_save(flags); \
|
||||
if (__printk_cpu_trylock()) \
|
||||
break; \
|
||||
local_irq_restore(flags); \
|
||||
__printk_wait_on_cpu_lock(); \
|
||||
}
|
||||
|
||||
/**
|
||||
* printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
|
||||
* lock and restore interrupts.
|
||||
* @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
|
||||
*/
|
||||
#define printk_cpu_unlock_irqrestore(flags) \
|
||||
do { \
|
||||
__printk_cpu_unlock(); \
|
||||
local_irq_restore(flags); \
|
||||
} while (0) \
|
||||
extern int __printk_cpu_sync_try_get(void);
|
||||
extern void __printk_cpu_sync_wait(void);
|
||||
extern void __printk_cpu_sync_put(void);
|
||||
|
||||
#else
|
||||
|
||||
#define printk_cpu_lock_irqsave(flags) ((void)flags)
|
||||
#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
|
||||
|
||||
#define __printk_cpu_sync_try_get() true
|
||||
#define __printk_cpu_sync_wait()
|
||||
#define __printk_cpu_sync_put()
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/**
|
||||
* printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk
|
||||
* cpu-reentrant spinning lock.
|
||||
* @flags: Stack-allocated storage for saving local interrupt state,
|
||||
* to be passed to printk_cpu_sync_put_irqrestore().
|
||||
*
|
||||
* If the lock is owned by another CPU, spin until it becomes available.
|
||||
* Interrupts are restored while spinning.
|
||||
*
|
||||
* CAUTION: This function must be used carefully. It does not behave like a
|
||||
* typical lock. Here are important things to watch out for...
|
||||
*
|
||||
* * This function is reentrant on the same CPU. Therefore the calling
|
||||
* code must not assume exclusive access to data if code accessing the
|
||||
* data can run reentrant or within NMI context on the same CPU.
|
||||
*
|
||||
* * If there exists usage of this function from NMI context, it becomes
|
||||
* unsafe to perform any type of locking or spinning to wait for other
|
||||
* CPUs after calling this function from any context. This includes
|
||||
* using spinlocks or any other busy-waiting synchronization methods.
|
||||
*/
|
||||
#define printk_cpu_sync_get_irqsave(flags) \
|
||||
for (;;) { \
|
||||
local_irq_save(flags); \
|
||||
if (__printk_cpu_sync_try_get()) \
|
||||
break; \
|
||||
local_irq_restore(flags); \
|
||||
__printk_cpu_sync_wait(); \
|
||||
}
|
||||
|
||||
/**
|
||||
* printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
|
||||
* lock and restore interrupts.
|
||||
* @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
|
||||
*/
|
||||
#define printk_cpu_sync_put_irqrestore(flags) \
|
||||
do { \
|
||||
__printk_cpu_sync_put(); \
|
||||
local_irq_restore(flags); \
|
||||
} while (0)
|
||||
|
||||
extern int kptr_restrict;
|
||||
|
||||
/**
|
||||
|
|
|
@ -127,6 +127,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
|||
* complain:
|
||||
*/
|
||||
if (sysctl_hung_task_warnings) {
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
if (sysctl_hung_task_warnings > 0)
|
||||
sysctl_hung_task_warnings--;
|
||||
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
|
||||
|
@ -142,6 +144,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
|||
|
||||
if (sysctl_hung_task_all_cpu_backtrace)
|
||||
hung_task_show_all_bt = true;
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
touch_nmi_watchdog();
|
||||
|
@ -204,12 +208,17 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
|
|||
}
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
if (hung_task_show_lock)
|
||||
if (hung_task_show_lock) {
|
||||
printk_prefer_direct_enter();
|
||||
debug_show_all_locks();
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
if (hung_task_show_all_bt) {
|
||||
hung_task_show_all_bt = false;
|
||||
printk_prefer_direct_enter();
|
||||
trigger_all_cpu_backtrace();
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
if (hung_task_call_panic)
|
||||
|
|
|
@ -560,6 +560,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
|||
{
|
||||
disable_trace_on_warning();
|
||||
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
if (file)
|
||||
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
|
||||
raw_smp_processor_id(), current->pid, file, line,
|
||||
|
@ -597,6 +599,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
|||
|
||||
/* Just a warning, don't kill lockdep. */
|
||||
add_taint(taint, LOCKDEP_STILL_OK);
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
#ifndef __WARN_FLAGS
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -619,6 +619,7 @@ static void print_cpu_stall(unsigned long gps)
|
|||
* See Documentation/RCU/stallwarn.rst for info on how to debug
|
||||
* RCU CPU stall warnings.
|
||||
*/
|
||||
printk_prefer_direct_enter();
|
||||
trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
|
||||
pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
|
||||
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
|
||||
|
@ -656,6 +657,7 @@ static void print_cpu_stall(unsigned long gps)
|
|||
*/
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
static void check_cpu_stall(struct rcu_data *rdp)
|
||||
|
|
|
@ -447,9 +447,11 @@ static int __orderly_reboot(void)
|
|||
ret = run_cmd(reboot_cmd);
|
||||
|
||||
if (ret) {
|
||||
printk_prefer_direct_enter();
|
||||
pr_warn("Failed to start orderly reboot: forcing the issue\n");
|
||||
emergency_sync();
|
||||
kernel_restart(NULL);
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -462,6 +464,7 @@ static int __orderly_poweroff(bool force)
|
|||
ret = run_cmd(poweroff_cmd);
|
||||
|
||||
if (ret && force) {
|
||||
printk_prefer_direct_enter();
|
||||
pr_warn("Failed to start orderly shutdown: forcing the issue\n");
|
||||
|
||||
/*
|
||||
|
@ -471,6 +474,7 @@ static int __orderly_poweroff(bool force)
|
|||
*/
|
||||
emergency_sync();
|
||||
kernel_power_off();
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -528,6 +532,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
|
|||
*/
|
||||
static void hw_failure_emergency_poweroff_func(struct work_struct *work)
|
||||
{
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
/*
|
||||
* We have reached here after the emergency shutdown waiting period has
|
||||
* expired. This means orderly_poweroff has not been able to shut off
|
||||
|
@ -544,6 +550,8 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
|
|||
*/
|
||||
pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
|
||||
emergency_restart();
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
|
||||
|
@ -582,11 +590,13 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
|
|||
{
|
||||
static atomic_t allow_proceed = ATOMIC_INIT(1);
|
||||
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
|
||||
|
||||
/* Shutdown should be initiated only once. */
|
||||
if (!atomic_dec_and_test(&allow_proceed))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Queue a backup emergency shutdown in the event of
|
||||
|
@ -594,6 +604,8 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
|
|||
*/
|
||||
hw_failure_emergency_poweroff(ms_until_forced);
|
||||
orderly_poweroff(true);
|
||||
out:
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hw_protection_shutdown);
|
||||
|
||||
|
|
|
@ -424,6 +424,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
|||
/* Start period for the next softlockup warning. */
|
||||
update_report_ts();
|
||||
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
|
||||
smp_processor_id(), duration,
|
||||
current->comm, task_pid_nr(current));
|
||||
|
@ -442,6 +444,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
|||
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
|
||||
if (softlockup_panic)
|
||||
panic("softlockup: hung tasks");
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
|
|
|
@ -135,6 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
|
|||
if (__this_cpu_read(hard_watchdog_warn) == true)
|
||||
return;
|
||||
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
|
||||
this_cpu);
|
||||
print_modules();
|
||||
|
@ -155,6 +157,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
|
|||
if (hardlockup_panic)
|
||||
nmi_panic(regs, "Hard LOCKUP");
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
|
||||
__this_cpu_write(hard_watchdog_warn, true);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
|
|||
* Permit this cpu to perform nested stack dumps while serialising
|
||||
* against other CPUs
|
||||
*/
|
||||
printk_cpu_lock_irqsave(flags);
|
||||
printk_cpu_sync_get_irqsave(flags);
|
||||
__dump_stack(log_lvl);
|
||||
printk_cpu_unlock_irqrestore(flags);
|
||||
printk_cpu_sync_put_irqrestore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(dump_stack_lvl);
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
|
|||
* Allow nested NMI backtraces while serializing
|
||||
* against other CPUs.
|
||||
*/
|
||||
printk_cpu_lock_irqsave(flags);
|
||||
printk_cpu_sync_get_irqsave(flags);
|
||||
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
|
||||
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
|
||||
cpu, (void *)instruction_pointer(regs));
|
||||
|
@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
|
|||
else
|
||||
dump_stack();
|
||||
}
|
||||
printk_cpu_unlock_irqrestore(flags);
|
||||
printk_cpu_sync_put_irqrestore(flags);
|
||||
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
||||
return true;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue