From e7f7c99ba911f56bc338845c1cd72954ba591707 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 15 Nov 2021 11:55:57 -0600 Subject: [PATCH 01/44] signal: In get_signal test for signal_group_exit every time through the loop Recently while investigating a problem with rr and signals I noticed that siglock is dropped in ptrace_signal and get_signal does not jump to relock. Looking farther to see if the problem is anywhere else I see that do_signal_stop also returns if signal_group_exit is true. I believe that test can now never be true, but it is a bit hard to trace through and be certain. Testing signal_group_exit is not expensive, so move the test for signal_group_exit into the for loop inside of get_signal to ensure the test is never skipped improperly. This has been a potential problem since I added the test for signal_group_exit was added. Fixes: 35634ffa1751 ("signal: Always notice exiting tasks") Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/875yssekcd.fsf_-_@email.froward.int.ebiederm.org Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c index 7c4b7ae714d4..986fa69c15c5 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2662,19 +2662,19 @@ relock: goto relock; } - /* Has this task already been marked for death? */ - if (signal_group_exit(signal)) { - ksig->info.si_signo = signr = SIGKILL; - sigdelset(¤t->pending.signal, SIGKILL); - trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, - &sighand->action[SIGKILL - 1]); - recalc_sigpending(); - goto fatal; - } - for (;;) { struct k_sigaction *ka; + /* Has this task already been marked for death? */ + if (signal_group_exit(signal)) { + ksig->info.si_signo = signr = SIGKILL; + sigdelset(¤t->pending.signal, SIGKILL); + trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, + &sighand->action[SIGKILL - 1]); + recalc_sigpending(); + goto fatal; + } + if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && do_signal_stop(0)) goto relock; From 5768d8906bc23d512b1a736c1e198aa833a6daa4 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 15 Nov 2021 13:47:13 -0600 Subject: [PATCH 02/44] signal: Requeue signals in the appropriate queue In the event that a tracer changes which signal needs to be delivered and that signal is currently blocked then the signal needs to be requeued for later delivery. With the advent of CLONE_THREAD the kernel has 2 signal queues per task. The per process queue and the per task queue. Update the code so that if the signal is removed from the per process queue it is requeued on the per process queue. This is necessary to make it appear the signal was never dequeued. The rr debugger reasonably believes that the state of the process from the last ptrace_stop it observed until PTRACE_EVENT_EXIT can be recreated by simply letting a process run. If a SIGKILL interrupts a ptrace_stop this is not true today. So return signals to their original queue in ptrace_signal so that signals that are not delivered appear like they were never dequeued. Fixes: 794aa320b79d ("[PATCH] sigfix-2.5.40-D6") History Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.gi Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/87zgq4d5r4.fsf_-_@email.froward.int.ebiederm.org Signed-off-by: "Eric W. Biederman" --- fs/signalfd.c | 5 +++-- include/linux/sched/signal.h | 7 ++++--- kernel/signal.c | 21 ++++++++++++++------- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/fs/signalfd.c b/fs/signalfd.c index 040e1cf90528..74f134cd1ff6 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -165,11 +165,12 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info, int nonblock) { + enum pid_type type; ssize_t ret; DECLARE_WAITQUEUE(wait, current); spin_lock_irq(¤t->sighand->siglock); - ret = dequeue_signal(current, &ctx->sigmask, info); + ret = dequeue_signal(current, &ctx->sigmask, info, &type); switch (ret) { case 0: if (!nonblock) @@ -184,7 +185,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info add_wait_queue(¤t->sighand->signalfd_wqh, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); - ret = dequeue_signal(current, &ctx->sigmask, info); + ret = dequeue_signal(current, &ctx->sigmask, info, &type); if (ret != 0) break; if (signal_pending(current)) { diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 23505394ef70..167995d471da 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -286,17 +286,18 @@ static inline int signal_group_exit(const struct signal_struct *sig) extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); -extern int dequeue_signal(struct task_struct *task, - sigset_t *mask, kernel_siginfo_t *info); +extern int dequeue_signal(struct task_struct *task, sigset_t *mask, + kernel_siginfo_t *info, enum pid_type *type); static inline int kernel_dequeue_signal(void) { struct task_struct *task = current; kernel_siginfo_t __info; + enum pid_type __type; int ret; spin_lock_irq(&task->sighand->siglock); - ret = dequeue_signal(task, &task->blocked, &__info); + ret = dequeue_signal(task, &task->blocked, &__info, &__type); spin_unlock_irq(&task->sighand->siglock); return ret; diff --git a/kernel/signal.c b/kernel/signal.c index 986fa69c15c5..43e8b7e362b0 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -626,7 +626,8 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, * * All callers have to hold the siglock. */ -int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info) +int dequeue_signal(struct task_struct *tsk, sigset_t *mask, + kernel_siginfo_t *info, enum pid_type *type) { bool resched_timer = false; int signr; @@ -634,8 +635,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ + *type = PIDTYPE_PID; signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); if (!signr) { + *type = PIDTYPE_TGID; signr = __dequeue_signal(&tsk->signal->shared_pending, mask, info, &resched_timer); #ifdef CONFIG_POSIX_TIMERS @@ -2522,7 +2525,7 @@ static void do_freezer_trap(void) freezable_schedule(); } -static int ptrace_signal(int signr, kernel_siginfo_t *info) +static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) { /* * We do not check sig_kernel_stop(signr) but set this marker @@ -2563,7 +2566,7 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info) /* If the (new) signal is now blocked, requeue it. */ if (sigismember(¤t->blocked, signr)) { - send_signal(signr, info, current, PIDTYPE_PID); + send_signal(signr, info, current, type); signr = 0; } @@ -2664,6 +2667,7 @@ relock: for (;;) { struct k_sigaction *ka; + enum pid_type type; /* Has this task already been marked for death? */ if (signal_group_exit(signal)) { @@ -2706,16 +2710,18 @@ relock: * so that the instruction pointer in the signal stack * frame points to the faulting instruction. */ + type = PIDTYPE_PID; signr = dequeue_synchronous_signal(&ksig->info); if (!signr) - signr = dequeue_signal(current, ¤t->blocked, &ksig->info); + signr = dequeue_signal(current, ¤t->blocked, + &ksig->info, &type); if (!signr) break; /* will return 0 */ if (unlikely(current->ptrace) && (signr != SIGKILL) && !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) { - signr = ptrace_signal(signr, &ksig->info); + signr = ptrace_signal(signr, &ksig->info, type); if (!signr) continue; } @@ -3540,6 +3546,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, ktime_t *to = NULL, timeout = KTIME_MAX; struct task_struct *tsk = current; sigset_t mask = *which; + enum pid_type type; int sig, ret = 0; if (ts) { @@ -3556,7 +3563,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, signotset(&mask); spin_lock_irq(&tsk->sighand->siglock); - sig = dequeue_signal(tsk, &mask, info); + sig = dequeue_signal(tsk, &mask, info, &type); if (!sig && timeout) { /* * None ready, temporarily unblock those we're interested @@ -3575,7 +3582,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, spin_lock_irq(&tsk->sighand->siglock); __set_task_blocked(tsk, &tsk->real_blocked); sigemptyset(&tsk->real_blocked); - sig = dequeue_signal(tsk, &mask, info); + sig = dequeue_signal(tsk, &mask, info, &type); } spin_unlock_irq(&tsk->sighand->siglock); From b171f667f3787946a8ba9644305339e93ae799c9 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 15 Nov 2021 13:49:45 -0600 Subject: [PATCH 03/44] signal: Requeue ptrace signals MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kyle Huey writes: > rr, a userspace record and replay debugger[0], uses the recorded register > state at PTRACE_EVENT_EXIT to find the point in time at which to cease > executing the program during replay. > > If a SIGKILL races with processing another signal in get_signal, it is > possible for the kernel to decline to notify the tracer of the original > signal. But if the original signal had a handler, the kernel proceeds > with setting up a signal handler frame as if the tracer had chosen to > deliver the signal unmodified to the tracee. When the kernel goes to > execute the signal handler that it has now modified the stack and registers > for, it will discover the pending SIGKILL, and terminate the tracee > without executing the handler. When PTRACE_EVENT_EXIT is delivered to > the tracer, however, the effects of handler setup will be visible to > the tracer. > > Because rr (the tracer) was never notified of the signal, it is not aware > that a signal handler frame was set up and expects the state of the program > at PTRACE_EVENT_EXIT to be a state that will be reconstructed naturally > by allowing the program to execute from the last event. When that fails > to happen during replay, rr will assert and die. > > The following patches add an explicit check for a newly pending SIGKILL > after the ptracer has been notified and the siglock has been reacquired. > If this happens, we stop processing the current signal and proceed > immediately to handling the SIGKILL. This makes the state reported at > PTRACE_EVENT_EXIT the unmodified state of the program, and also avoids the > work to set up a signal handler frame that will never be used. > > [0] https://rr-project.org/ The problem is that while the traced process makes it into ptrace_stop, the tracee is killed before the tracer manages to wait for the tracee and discover which signal was about to be delivered. More generally the problem is that while siglock was dropped a signal with process wide effect is short cirucit delivered to the entire process killing it, but the process continues to try and deliver another signal. In general it impossible to avoid all cases where work is performed after the process has been killed. In particular if the process is killed after get_signal returns the code will simply not know it has been killed until after delivering the signal frame to userspace. On the other hand when the code has already discovered the process has been killed and taken user space visible action that shows the kernel knows the process has been killed, it is just silly to then write the signal frame to the user space stack. Instead of being silly detect the process has been killed in ptrace_signal and requeue the signal so the code can pretend it was simply never dequeued for delivery. To test the process has been killed I use fatal_signal_pending rather than signal_group_exit to match the test in signal_pending_state which is used in schedule which is where ptrace_stop detects the process has been killed. Requeuing the signal so the code can pretend it was simply never dequeued improves the user space visible behavior that has been present since ebf5ebe31d2c ("[PATCH] signal-fixes-2.5.59-A4"). Kyle Huey verified that this change in behavior and makes rr happy. Reported-by: Kyle Huey Reported-by: Marko Mäkelä History Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.gi Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/87tugcd5p2.fsf_-_@email.froward.int.ebiederm.org Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/signal.c b/kernel/signal.c index 43e8b7e362b0..621401550f0f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2565,7 +2565,8 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) } /* If the (new) signal is now blocked, requeue it. */ - if (sigismember(¤t->blocked, signr)) { + if (sigismember(¤t->blocked, signr) || + fatal_signal_pending(current)) { send_signal(signr, info, current, type); signr = 0; } From 5e354747b2c91f64544b97760d38e2d3280307b2 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 7 Dec 2021 16:42:35 -0600 Subject: [PATCH 04/44] exit/s390: Remove dead reference to do_exit from copy_thread My s390 assembly is not particularly good so I have read the history of the reference to do_exit copy_thread and have been able to verify that do_exit is not used. The general argument is that s390 has been changed to use the generic kernel_thread and kernel_execve and the generic versions do not call do_exit. So it is strange to see a do_exit reference sitting there. The history of the do_exit reference in s390's version of copy_thread seems conclusive that the do_exit reference is something that lingers and should have been removed several years ago. Up through 8d19f15a60be ("[PATCH] s390 update (1/27): arch.") the s390 code made a call to the exit(2) system call when a kernel thread finished. Then kernel_thread_starter was added which branched directly to the value in register 11 when the kernel thread finshed. The value in register 11 was set in kernel_thread to "regs.gprs[11] = (unsigned long) do_exit" In commit 37fe5d41f640 ("s390: fold kernel_thread_helper() into ret_from_fork()") kernel_thread_starter was moved into entry.S and entry64.S unchanged (except for the syntax differences between inline assemly and in the assembly file). In commit f9a7e025dfc2 ("s390: switch to generic kernel_thread()") the assignment to "gprs[11]" was moved into copy_thread from the old kernel_thread. The helper kernel_thread_starter was still being used and was still branching to "%r11" at the end. In commit 30dcb0996e40 ("s390: switch to saner kernel_execve() semantics") kernel_thread_starter was changed to unconditionally branch to sysc_tracenogo instead to %r11 which held the value of do_exit. Unfortunately copy_thread was not updated to stop passing do_exit in "gprs[11]". In commit 56e62a737028 ("s390: convert to generic entry") kernel_thread_starter was replaced by __ret_from_fork. And the code still continued to pass do_exit in "gprs[11]" despite __ret_from_fork not caring in the slightest. Remove this dead reference to do_exit to make it clear that s390 is not doing anything with do_exit in copy_thread. Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Christian Borntraeger Cc: Alexander Gordeev Cc: Martin Schwidefsky Cc: Al Viro Fixes: 30dcb0996e40 ("s390: switch to saner kernel_execve() semantics") History Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git Acked-by: Heiko Carstens Signed-off-by: "Eric W. Biederman" --- arch/s390/kernel/process.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index e8858b2de24b..71d86f73b02c 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -139,7 +139,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, (unsigned long)__ret_from_fork; frame->childregs.gprs[9] = new_stackp; /* function */ frame->childregs.gprs[10] = arg; - frame->childregs.gprs[11] = (unsigned long)do_exit; frame->childregs.orig_gpr2 = -1; frame->childregs.last_break = 1; return 0; From 0e25498f8cd43c1b5aa327f373dd094e9a006da7 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 28 Jun 2021 14:52:01 -0500 Subject: [PATCH 05/44] exit: Add and use make_task_dead. There are two big uses of do_exit. The first is it's design use to be the guts of the exit(2) system call. The second use is to terminate a task after something catastrophic has happened like a NULL pointer in kernel code. Add a function make_task_dead that is initialy exactly the same as do_exit to cover the cases where do_exit is called to handle catastrophic failure. In time this can probably be reduced to just a light wrapper around do_task_dead. For now keep it exactly the same so that there will be no behavioral differences introducing this new concept. Replace all of the uses of do_exit that use it for catastraphic task cleanup with make_task_dead to make it clear what the code is doing. As part of this rename rewind_stack_do_exit rewind_stack_and_make_dead. Signed-off-by: "Eric W. Biederman" --- arch/alpha/kernel/traps.c | 6 +++--- arch/alpha/mm/fault.c | 2 +- arch/arm/kernel/traps.c | 2 +- arch/arm/mm/fault.c | 2 +- arch/arm64/kernel/traps.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/csky/abiv1/alignment.c | 2 +- arch/csky/kernel/traps.c | 2 +- arch/csky/mm/fault.c | 2 +- arch/h8300/kernel/traps.c | 2 +- arch/h8300/mm/fault.c | 2 +- arch/hexagon/kernel/traps.c | 2 +- arch/ia64/kernel/mca_drv.c | 2 +- arch/ia64/kernel/traps.c | 2 +- arch/ia64/mm/fault.c | 2 +- arch/m68k/kernel/traps.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/microblaze/kernel/exceptions.c | 4 ++-- arch/mips/kernel/traps.c | 2 +- arch/nds32/kernel/fpu.c | 2 +- arch/nds32/kernel/traps.c | 8 ++++---- arch/nios2/kernel/traps.c | 4 ++-- arch/openrisc/kernel/traps.c | 2 +- arch/parisc/kernel/traps.c | 2 +- arch/powerpc/kernel/traps.c | 8 ++++---- arch/riscv/kernel/traps.c | 2 +- arch/riscv/mm/fault.c | 2 +- arch/s390/kernel/dumpstack.c | 2 +- arch/s390/kernel/nmi.c | 2 +- arch/sh/kernel/traps.c | 2 +- arch/sparc/kernel/traps_32.c | 4 +--- arch/sparc/kernel/traps_64.c | 4 +--- arch/x86/entry/entry_32.S | 6 +++--- arch/x86/entry/entry_64.S | 6 +++--- arch/x86/kernel/dumpstack.c | 4 ++-- arch/xtensa/kernel/traps.c | 2 +- include/linux/sched/task.h | 1 + kernel/exit.c | 9 +++++++++ tools/objtool/check.c | 3 ++- 39 files changed, 63 insertions(+), 56 deletions(-) diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 2ae34702456c..8a66fe544c69 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -190,7 +190,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) local_irq_enable(); while (1); } - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } #ifndef CONFIG_MATHEMU @@ -575,7 +575,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", pc, va, opcode, reg); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); got_exception: /* Ok, we caught the exception, but we don't want it. Is there @@ -630,7 +630,7 @@ got_exception: local_irq_enable(); while (1); } - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index eee5102c3d88..e9193d52222e 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -204,7 +204,7 @@ retry: printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); - do_exit(SIGKILL); + make_task_dead(SIGKILL); /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 195dff58bafc..b4bd2e5f17c1 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -333,7 +333,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) if (panic_on_oops) panic("Fatal exception"); if (signr) - do_exit(signr); + make_task_dead(signr); } /* diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc8779d54a64..bf1a0c618c49 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -111,7 +111,7 @@ static void die_kernel_fault(const char *msg, struct mm_struct *mm, show_pte(KERN_ALERT, mm, addr); die("Oops", regs, fsr); bust_spinlocks(0); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } /* diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 7b21213a570f..bdd456e4e7f4 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -235,7 +235,7 @@ void die(const char *str, struct pt_regs *regs, int err) raw_spin_unlock_irqrestore(&die_lock, flags); if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } static void arm64_show_signal(int signo, const char *str) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9ae24e3b72be..11a28cace2d2 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -302,7 +302,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr, show_pte(addr); die("Oops", regs, esr); bust_spinlocks(0); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } #ifdef CONFIG_KASAN_HW_TAGS diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c index cb2a0d94a144..5e2fb45d605c 100644 --- a/arch/csky/abiv1/alignment.c +++ b/arch/csky/abiv1/alignment.c @@ -294,7 +294,7 @@ bad_area: __func__, opcode, rz, rx, imm, addr); show_regs(regs); bust_spinlocks(0); - do_exit(SIGKILL); + make_dead_task(SIGKILL); } force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c index e5fbf8653a21..88a47035b925 100644 --- a/arch/csky/kernel/traps.c +++ b/arch/csky/kernel/traps.c @@ -109,7 +109,7 @@ void die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); + make_dead_task(SIGSEGV); } void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 466ad949818a..7215a46b6b8e 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -67,7 +67,7 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) pr_alert("Unable to handle kernel paging request at virtual " "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc); die(regs, "Oops"); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index bdbe988d8dbc..3d4e0bde37ae 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -106,7 +106,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err) dump(fp); spin_unlock_irq(&die_lock); - do_exit(SIGSEGV); + make_dead_task(SIGSEGV); } static int kstack_depth_to_print = 24; diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c index d4bc9c16f2df..0223528565dd 100644 --- a/arch/h8300/mm/fault.c +++ b/arch/h8300/mm/fault.c @@ -51,7 +51,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, printk(" at virtual address %08lx\n", address); if (!user_mode(regs)) die("Oops", regs, error_code); - do_exit(SIGKILL); + make_dead_task(SIGKILL); return 1; } diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index edfc35dafeb1..6dd6cf0ab711 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -214,7 +214,7 @@ int die(const char *str, struct pt_regs *regs, long err) panic("Fatal exception"); oops_exit(); - do_exit(err); + make_dead_task(err); return 0; } diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 5bfc79be4cef..23c203639a96 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -176,7 +176,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) spin_unlock(&mca_bh_lock); /* This process is about to be killed itself */ - do_exit(SIGKILL); + make_task_dead(SIGKILL); } /** diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index e13cb905930f..753642366e12 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); return 0; } diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 02de2e70c587..4796cccbf74f 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -259,7 +259,7 @@ retry: regs = NULL; bust_spinlocks(0); if (regs) - do_exit(SIGKILL); + make_task_dead(SIGKILL); return; out_of_memory: diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 34d6458340b0..59fc63feb0dc 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -1131,7 +1131,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr) pr_crit("%s: %08x\n", str, nr); show_registers(fp); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } asmlinkage void set_esp0(unsigned long ssp) diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index ef46e77e97a5..fcb3a0d8421c 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs) pr_alert("Unable to handle kernel access"); pr_cont(" at virtual address %p\n", addr); die_if_kernel("Oops", regs, 0 /*error_code*/); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } return 1; diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index 908788497b28..fd153d5fab98 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c @@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err) pr_warn("Oops: %s, sig: %ld\n", str, err); show_regs(fp); spin_unlock_irq(&die_lock); - /* do_exit() should take care of panic'ing from an interrupt + /* make_task_dead() should take care of panic'ing from an interrupt * context so we don't handle it here */ - do_exit(err); + make_task_dead(err); } /* for user application debugging */ diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index d26b0fb8ea06..a486486b2355 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -422,7 +422,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) if (regs && kexec_should_crash(current)) crash_kexec(regs); - do_exit(sig); + make_task_dead(sig); } extern struct exception_table_entry __start___dbe_table[]; diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c index 9edd7ed7d7bf..701c09a668de 100644 --- a/arch/nds32/kernel/fpu.c +++ b/arch/nds32/kernel/fpu.c @@ -223,7 +223,7 @@ inline void handle_fpu_exception(struct pt_regs *regs) } } else if (fpcsr & FPCSR_mskRIT) { if (!user_mode(regs)) - do_exit(SIGILL); + make_task_dead(SIGILL); si_signo = SIGILL; } diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index ca75d475eda4..c0a8f3344fb9 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -141,7 +141,7 @@ void __noreturn die(const char *str, struct pt_regs *regs, int err) bust_spinlocks(0); spin_unlock_irq(&die_lock); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } EXPORT_SYMBOL(die); @@ -240,7 +240,7 @@ void unhandled_interruption(struct pt_regs *regs) pr_emerg("unhandled_interruption\n"); show_regs(regs); if (!user_mode(regs)) - do_exit(SIGKILL); + make_task_dead(SIGKILL); force_sig(SIGKILL); } @@ -251,7 +251,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr, addr, type); show_regs(regs); if (!user_mode(regs)) - do_exit(SIGKILL); + make_task_dead(SIGKILL); force_sig(SIGKILL); } @@ -278,7 +278,7 @@ void do_revinsn(struct pt_regs *regs) pr_emerg("Reserved Instruction\n"); show_regs(regs); if (!user_mode(regs)) - do_exit(SIGILL); + make_task_dead(SIGILL); force_sig(SIGILL); } diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c index 596986a74a26..85ac49d64cf7 100644 --- a/arch/nios2/kernel/traps.c +++ b/arch/nios2/kernel/traps.c @@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err) show_regs(regs); spin_unlock_irq(&die_lock); /* - * do_exit() should take care of panic'ing from an interrupt + * make_task_dead() should take care of panic'ing from an interrupt * context so we don't handle it here */ - do_exit(err); + make_task_dead(err); } void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr) diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 0898cb159fac..0446a3c34372 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -212,7 +212,7 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err) __asm__ __volatile__("l.nop 1"); do {} while (1); #endif - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* This is normally the 'Oops' routine */ diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index b11fb26ce299..df2122c50d78 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -269,7 +269,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) panic("Fatal exception"); oops_exit(); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* gdb uses break 4,8 */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 11741703d26e..a08bb7cefdc5 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -245,7 +245,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, if (panic_on_oops) panic("Fatal exception"); - do_exit(signr); + make_task_dead(signr); } NOKPROBE_SYMBOL(oops_end); @@ -792,9 +792,9 @@ int machine_check_generic(struct pt_regs *regs) void die_mce(const char *str, struct pt_regs *regs, long err) { /* - * The machine check wants to kill the interrupted context, but - * do_exit() checks for in_interrupt() and panics in that case, so - * exit the irq/nmi before calling die. + * The machine check wants to kill the interrupted context, + * but make_task_dead() checks for in_interrupt() and panics + * in that case, so exit the irq/nmi before calling die. */ if (in_nmi()) nmi_exit(); diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 0daaa3e4630d..fe92e119e6a3 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -54,7 +54,7 @@ void die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index aa08dd2f8fae..42118bc728f9 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -31,7 +31,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr, bust_spinlocks(0); die(regs, "Oops"); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } static inline void no_context(struct pt_regs *regs, unsigned long addr) diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 0681c55e831d..1e3233eb510a 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -224,5 +224,5 @@ void __noreturn die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception: panic_on_oops"); oops_exit(); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 20f8e1868853..a4d8c058dd27 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -175,7 +175,7 @@ void __s390_handle_mcck(void) "malfunction (code 0x%016lx).\n", mcck.mcck_code); printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", current->comm, current->pid); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } } diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index cbe3201d4f21..01884054aeb2 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -57,7 +57,7 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } void die_if_kernel(const char *str, struct pt_regs *regs, long err) diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 5630e5a395e0..179aabfa712e 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) } printk("Instruction DUMP:"); instruction_dump ((unsigned long *) regs->pc); - if(regs->psr & PSR_PS) - do_exit(SIGKILL); - do_exit(SIGSEGV); + make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV); } void do_hw_interrupt(struct pt_regs *regs, unsigned long type) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 6863025ed56d..21077821f427 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2559,9 +2559,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) } if (panic_on_oops) panic("Fatal exception"); - if (regs->tstate & TSTATE_PRIV) - do_exit(SIGKILL); - do_exit(SIGSEGV); + make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV); } EXPORT_SYMBOL(die_if_kernel); diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index ccb9d32768f3..7738fad6a85e 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -1248,14 +1248,14 @@ SYM_CODE_START(asm_exc_nmi) SYM_CODE_END(asm_exc_nmi) .pushsection .text, "ax" -SYM_CODE_START(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_and_make_dead) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movl PER_CPU_VAR(cpu_current_top_of_stack), %esi leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp - call do_exit + call make_task_dead 1: jmp 1b -SYM_CODE_END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_and_make_dead) .popsection diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index e38a4cf795d9..f09276457942 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1429,7 +1429,7 @@ SYM_CODE_END(ignore_sysret) #endif .pushsection .text, "ax" -SYM_CODE_START(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_and_make_dead) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1438,6 +1438,6 @@ SYM_CODE_START(rewind_stack_do_exit) leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_REGS - call do_exit -SYM_CODE_END(rewind_stack_do_exit) + call make_task_dead +SYM_CODE_END(rewind_stack_and_make_dead) .popsection diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index ea4fe192189d..53de044e5654 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -351,7 +351,7 @@ unsigned long oops_begin(void) } NOKPROBE_SYMBOL(oops_begin); -void __noreturn rewind_stack_do_exit(int signr); +void __noreturn rewind_stack_and_make_dead(int signr); void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { @@ -386,7 +386,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) * reuse the task stack and that existing poisons are invalid. */ kasan_unpoison_task_stack(current); - rewind_stack_do_exit(signr); + rewind_stack_and_make_dead(signr); } NOKPROBE_SYMBOL(oops_end); diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 4b4dbeb2d612..9345007d474d 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -552,5 +552,5 @@ void __noreturn die(const char * str, struct pt_regs * regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(err); + make_task_dead(err); } diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index ba88a6987400..2d4bbd9c3278 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -59,6 +59,7 @@ extern void sched_post_fork(struct task_struct *p, extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); +void __noreturn make_task_dead(int signr); extern void proc_caches_init(void); diff --git a/kernel/exit.c b/kernel/exit.c index f702a6a63686..bfa513c5b227 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -884,6 +884,15 @@ void __noreturn do_exit(long code) } EXPORT_SYMBOL_GPL(do_exit); +void __noreturn make_task_dead(int signr) +{ + /* + * Take the task off the cpu after something catastrophic has + * happened. + */ + do_exit(signr); +} + void complete_and_exit(struct completion *comp, long code) { if (comp) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 21735829b860..e6ab5687770b 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -168,6 +168,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "panic", "do_exit", "do_task_dead", + "make_task_dead", "__module_put_and_exit", "complete_and_exit", "__reiserfs_panic", @@ -175,7 +176,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "fortify_panic", "usercopy_abort", "machine_real_restart", - "rewind_stack_do_exit", + "rewind_stack_and_make_dead" "kunit_try_catch_throw", "xen_start_kernel", "cpu_bringup_and_idle", From 05ea0424f0e21c0ef9b47c89826e7c22ae137975 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 22 Nov 2021 09:33:00 -0600 Subject: [PATCH 06/44] exit: Move oops specific logic from do_exit into make_task_dead The beginning of do_exit has become cluttered and difficult to read as it is filled with checks to handle things that can only happen when the kernel is operating improperly. Now that we have a dedicated function for cleaning up a task when the kernel is operating improperly move the checks there. Signed-off-by: "Eric W. Biederman" --- kernel/exit.c | 78 ++++++++++++++++++++++----------------------- kernel/futex/core.c | 2 +- kernel/kexec_core.c | 2 +- 3 files changed, 41 insertions(+), 41 deletions(-) diff --git a/kernel/exit.c b/kernel/exit.c index bfa513c5b227..d0ec6f6b41cb 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -735,36 +735,8 @@ void __noreturn do_exit(long code) struct task_struct *tsk = current; int group_dead; - /* - * We can get here from a kernel oops, sometimes with preemption off. - * Start by checking for critical errors. - * Then fix up important state like USER_DS and preemption. - * Then do everything else. - */ - WARN_ON(blk_needs_flush_plug(tsk)); - if (unlikely(in_interrupt())) - panic("Aiee, killing interrupt handler!"); - if (unlikely(!tsk->pid)) - panic("Attempted to kill the idle task!"); - - /* - * If do_exit is called because this processes oopsed, it's possible - * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before - * continuing. Amongst other possible reasons, this is to prevent - * mm_release()->clear_child_tid() from writing to a user-controlled - * kernel address. - */ - force_uaccess_begin(); - - if (unlikely(in_atomic())) { - pr_info("note: %s[%d] exited with preempt_count %d\n", - current->comm, task_pid_nr(current), - preempt_count()); - preempt_count_set(PREEMPT_ENABLED); - } - profile_task_exit(tsk); kcov_task_exit(tsk); @@ -773,17 +745,6 @@ void __noreturn do_exit(long code) validate_creds_for_do_exit(tsk); - /* - * We're taking recursive faults here in do_exit. Safest is to just - * leave this task alone and wait for reboot. - */ - if (unlikely(tsk->flags & PF_EXITING)) { - pr_alert("Fixing recursive fault but reboot is needed!\n"); - futex_exit_recursive(tsk); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule(); - } - io_uring_files_cancel(); exit_signals(tsk); /* sets PF_EXITING */ @@ -889,7 +850,46 @@ void __noreturn make_task_dead(int signr) /* * Take the task off the cpu after something catastrophic has * happened. + * + * We can get here from a kernel oops, sometimes with preemption off. + * Start by checking for critical errors. + * Then fix up important state like USER_DS and preemption. + * Then do everything else. */ + struct task_struct *tsk = current; + + if (unlikely(in_interrupt())) + panic("Aiee, killing interrupt handler!"); + if (unlikely(!tsk->pid)) + panic("Attempted to kill the idle task!"); + + /* + * If make_task_dead is called because this processes oopsed, it's possible + * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before + * continuing. Amongst other possible reasons, this is to prevent + * mm_release()->clear_child_tid() from writing to a user-controlled + * kernel address. + */ + force_uaccess_begin(); + + if (unlikely(in_atomic())) { + pr_info("note: %s[%d] exited with preempt_count %d\n", + current->comm, task_pid_nr(current), + preempt_count()); + preempt_count_set(PREEMPT_ENABLED); + } + + /* + * We're taking recursive faults here in make_task_dead. Safest is to just + * leave this task alone and wait for reboot. + */ + if (unlikely(tsk->flags & PF_EXITING)) { + pr_alert("Fixing recursive fault but reboot is needed!\n"); + futex_exit_recursive(tsk); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); + } + do_exit(signr); } diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 25d8a88b32e5..39a1522865b5 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -1044,7 +1044,7 @@ static void futex_cleanup(struct task_struct *tsk) * actually finished the futex cleanup. The worst case for this is that the * waiter runs through the wait loop until the state becomes visible. * - * This is called from the recursive fault handling path in do_exit(). + * This is called from the recursive fault handling path in make_task_dead(). * * This is best effort. Either the futex exit code has run already or * not. If the OWNER_DIED bit has been set on the futex then the waiter can diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 5a5d192a89ac..68480f731192 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -81,7 +81,7 @@ int kexec_should_crash(struct task_struct *p) if (crash_kexec_post_notifiers) return 0; /* - * There are 4 panic() calls in do_exit() path, each of which + * There are 4 panic() calls in make_task_dead() path, each of which * corresponds to each of these 4 conditions. */ if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) From 7f80a2fd7db9a55894fd841915236aca611291b5 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 22 Nov 2021 09:51:03 -0600 Subject: [PATCH 07/44] exit: Stop poorly open coding do_task_dead in make_task_dead When the kernel detects it is oops or otherwise force killing a task while it exits the code poorly attempts to permanently stop the task from scheduling. I say poorly because it is possible for a task in TASK_UINTERRUPTIBLE to be woken up. As it makes no sense for the task to continue call do_task_dead instead which actually does the work and permanently removes the task from the scheduler. Guaranteeing the task will never be woken up again. Signed-off-by: "Eric W. Biederman" --- kernel/exit.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/exit.c b/kernel/exit.c index d0ec6f6b41cb..f975cd8a2ed8 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -886,8 +886,7 @@ void __noreturn make_task_dead(int signr) if (unlikely(tsk->flags & PF_EXITING)) { pr_alert("Fixing recursive fault but reboot is needed!\n"); futex_exit_recursive(tsk); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule(); + do_task_dead(); } do_exit(signr); From eb55e716ac1aa0de13ef5abbf1479d995582d967 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 22 Nov 2021 10:01:59 -0600 Subject: [PATCH 08/44] exit: Stop exporting do_exit Now that there are no more modular uses of do_exit remove the EXPORT_SYMBOL. Suggested-by: Christoph Hellwig Signed-off-by: "Eric W. Biederman" --- kernel/exit.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/exit.c b/kernel/exit.c index f975cd8a2ed8..57afac845a0a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -843,7 +843,6 @@ void __noreturn do_exit(long code) lockdep_free_task(tsk); do_task_dead(); } -EXPORT_SYMBOL_GPL(do_exit); void __noreturn make_task_dead(int signr) { From bbda86e988d4c124e4cfa816291cbd583ae8bfb1 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 22 Nov 2021 10:27:36 -0600 Subject: [PATCH 09/44] exit: Implement kthread_exit The way the per task_struct exit_code is used by kernel threads is not quite compatible how it is used by userspace applications. The low byte of the userspace exit_code value encodes the exit signal. While kthreads just use the value as an int holding ordinary kernel function exit status like -EPERM. Add kthread_exit to clearly separate the two kinds of uses. Signed-off-by: "Eric W. Biederman" --- include/linux/kthread.h | 1 + kernel/kthread.c | 23 +++++++++++++++++++---- tools/objtool/check.c | 1 + 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 346b0f269161..22c43d419687 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -70,6 +70,7 @@ void *kthread_probe_data(struct task_struct *k); int kthread_park(struct task_struct *k); void kthread_unpark(struct task_struct *k); void kthread_parkme(void); +void kthread_exit(long result) __noreturn; int kthreadd(void *unused); extern struct task_struct *kthreadd_task; diff --git a/kernel/kthread.c b/kernel/kthread.c index 7113003fab63..77b7c3f23f18 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -268,6 +268,21 @@ void kthread_parkme(void) } EXPORT_SYMBOL_GPL(kthread_parkme); +/** + * kthread_exit - Cause the current kthread return @result to kthread_stop(). + * @result: The integer value to return to kthread_stop(). + * + * While kthread_exit can be called directly, it exists so that + * functions which do some additional work in non-modular code such as + * module_put_and_kthread_exit can be implemented. + * + * Does not return. + */ +void __noreturn kthread_exit(long result) +{ + do_exit(result); +} + static int kthread(void *_create) { static const struct sched_param param = { .sched_priority = 0 }; @@ -286,13 +301,13 @@ static int kthread(void *_create) done = xchg(&create->done, NULL); if (!done) { kfree(create); - do_exit(-EINTR); + kthread_exit(-EINTR); } if (!self) { create->result = ERR_PTR(-ENOMEM); complete(done); - do_exit(-ENOMEM); + kthread_exit(-ENOMEM); } self->threadfn = threadfn; @@ -326,7 +341,7 @@ static int kthread(void *_create) __kthread_parkme(self); ret = threadfn(data); } - do_exit(ret); + kthread_exit(ret); } /* called from kernel_clone() to get node information for about to be created task */ @@ -627,7 +642,7 @@ EXPORT_SYMBOL_GPL(kthread_park); * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * - * If threadfn() may call do_exit() itself, the caller must ensure + * If threadfn() may call kthread_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() diff --git a/tools/objtool/check.c b/tools/objtool/check.c index e6ab5687770b..90108fe5610d 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -168,6 +168,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "panic", "do_exit", "do_task_dead", + "kthread_exit", "make_task_dead", "__module_put_and_exit", "complete_and_exit", From ca3574bd653aba234a4b31955f2778947403be16 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 3 Dec 2021 11:00:19 -0600 Subject: [PATCH 10/44] exit: Rename module_put_and_exit to module_put_and_kthread_exit Update module_put_and_exit to call kthread_exit instead of do_exit. Change the name to reflect this change in functionality. All of the users of module_put_and_exit are causing the current kthread to exit so this change makes it clear what is happening. There is no functional change. Signed-off-by: "Eric W. Biederman" --- crypto/algboss.c | 4 ++-- fs/cifs/connect.c | 2 +- fs/nfs/callback.c | 4 ++-- fs/nfs/nfs4state.c | 2 +- fs/nfsd/nfssvc.c | 2 +- include/linux/module.h | 6 +++--- kernel/module.c | 6 +++--- net/bluetooth/bnep/core.c | 2 +- net/bluetooth/cmtp/core.c | 2 +- net/bluetooth/hidp/core.c | 2 +- tools/objtool/check.c | 2 +- 11 files changed, 17 insertions(+), 17 deletions(-) diff --git a/crypto/algboss.c b/crypto/algboss.c index 1814d2c5188a..eb5fe84efb83 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -67,7 +67,7 @@ out: complete_all(¶m->larval->completion); crypto_alg_put(¶m->larval->alg); kfree(param); - module_put_and_exit(0); + module_put_and_kthread_exit(0); } static int cryptomgr_schedule_probe(struct crypto_larval *larval) @@ -190,7 +190,7 @@ skiptest: crypto_alg_tested(param->driver, err); kfree(param); - module_put_and_exit(0); + module_put_and_kthread_exit(0); } static int cryptomgr_schedule_test(struct crypto_alg *alg) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 82577a7a5bb1..39fbe9acbf51 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1139,7 +1139,7 @@ next_pdu: } memalloc_noreclaim_restore(noreclaim_flag); - module_put_and_exit(0); + module_put_and_kthread_exit(0); } /* diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 86d856de1389..3c86a559a321 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -93,7 +93,7 @@ nfs4_callback_svc(void *vrqstp) svc_process(rqstp); } svc_exit_thread(rqstp); - module_put_and_exit(0); + module_put_and_kthread_exit(0); return 0; } @@ -137,7 +137,7 @@ nfs41_callback_svc(void *vrqstp) } } svc_exit_thread(rqstp); - module_put_and_exit(0); + module_put_and_kthread_exit(0); return 0; } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index ecc4594299d6..ea41af731978 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2689,6 +2689,6 @@ static int nfs4_run_state_manager(void *ptr) allow_signal(SIGKILL); nfs4_state_manager(clp); nfs_put_client(clp); - module_put_and_exit(0); + module_put_and_kthread_exit(0); return 0; } diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 80431921e5d7..5ce9f14318c4 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -986,7 +986,7 @@ out: /* Release module */ mutex_unlock(&nfsd_mutex); - module_put_and_exit(0); + module_put_and_kthread_exit(0); return 0; } diff --git a/include/linux/module.h b/include/linux/module.h index c9f1200b2312..f03be97e9ec1 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -595,9 +595,9 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name); -extern void __noreturn __module_put_and_exit(struct module *mod, +extern void __noreturn __module_put_and_kthread_exit(struct module *mod, long code); -#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code) +#define module_put_and_kthread_exit(code) __module_put_and_kthread_exit(THIS_MODULE, code) #ifdef CONFIG_MODULE_UNLOAD int module_refcount(struct module *mod); @@ -790,7 +790,7 @@ static inline int unregister_module_notifier(struct notifier_block *nb) return 0; } -#define module_put_and_exit(code) do_exit(code) +#define module_put_and_kthread_exit(code) kthread_exit(code) static inline void print_modules(void) { diff --git a/kernel/module.c b/kernel/module.c index 84a9141a5e15..a3aa00bf270d 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -337,12 +337,12 @@ static inline void add_taint_module(struct module *mod, unsigned flag, * A thread that wants to hold a reference to a module only while it * is running can call this to safely exit. nfsd and lockd use this. */ -void __noreturn __module_put_and_exit(struct module *mod, long code) +void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) { module_put(mod); - do_exit(code); + kthread_exit(code); } -EXPORT_SYMBOL(__module_put_and_exit); +EXPORT_SYMBOL(__module_put_and_kthread_exit); /* Find a module section: 0 means not found. */ static unsigned int find_sec(const struct load_info *info, const char *name) diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index c9add7753b9f..40baa6b7321a 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -535,7 +535,7 @@ static int bnep_session(void *arg) up_write(&bnep_session_sem); free_netdev(dev); - module_put_and_exit(0); + module_put_and_kthread_exit(0); return 0; } diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 0a2d78e811cf..9bfded6b74b3 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -323,7 +323,7 @@ static int cmtp_session(void *arg) up_write(&cmtp_session_sem); kfree(session); - module_put_and_exit(0); + module_put_and_kthread_exit(0); return 0; } diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 80848dfc01db..5940744a8cd8 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -1305,7 +1305,7 @@ static int hidp_session_thread(void *arg) l2cap_unregister_user(session->conn, &session->user); hidp_session_put(session); - module_put_and_exit(0); + module_put_and_kthread_exit(0); return 0; } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 90108fe5610d..120e9598c11a 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -170,7 +170,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "do_task_dead", "kthread_exit", "make_task_dead", - "__module_put_and_exit", + "__module_put_and_kthread_exit", "complete_and_exit", "__reiserfs_panic", "lbug_with_loc", From cead18552660702a4a46f58e65188fe5f36e9dfe Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 22 Nov 2021 11:15:19 -0600 Subject: [PATCH 11/44] exit: Rename complete_and_exit to kthread_complete_and_exit Update complete_and_exit to call kthread_exit instead of do_exit. Change the name to reflect this change in functionality. All of the users of complete_and_exit are causing the current kthread to exit so this change makes it clear what is happening. Move the implementation of kthread_complete_and_exit from kernel/exit.c to to kernel/kthread.c. As this function is kthread specific it makes most sense to live with the kthread functions. There are no functional change. Signed-off-by: "Eric W. Biederman" --- drivers/net/wireless/rsi/rsi_91x_coex.c | 2 +- drivers/net/wireless/rsi/rsi_91x_main.c | 2 +- drivers/net/wireless/rsi/rsi_91x_sdio_ops.c | 2 +- drivers/net/wireless/rsi/rsi_91x_usb_ops.c | 2 +- drivers/pnp/pnpbios/core.c | 6 +++--- drivers/staging/rts5208/rtsx.c | 16 +++++++-------- drivers/usb/atm/usbatm.c | 2 +- drivers/usb/gadget/function/f_mass_storage.c | 2 +- fs/jffs2/background.c | 2 +- include/linux/kernel.h | 1 - include/linux/kthread.h | 1 + kernel/exit.c | 9 --------- kernel/kthread.c | 21 ++++++++++++++++++++ lib/kunit/try-catch.c | 4 ++-- tools/objtool/check.c | 2 +- 15 files changed, 43 insertions(+), 31 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c index a0c5d02ae88c..8a3d86897ea8 100644 --- a/drivers/net/wireless/rsi/rsi_91x_coex.c +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -63,7 +63,7 @@ static void rsi_coex_scheduler_thread(struct rsi_common *common) rsi_coex_sched_tx_pkts(coex_cb); } while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0); - complete_and_exit(&coex_cb->coex_tx_thread.completion, 0); + kthread_complete_and_exit(&coex_cb->coex_tx_thread.completion, 0); } int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg) diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index f1bf71e6c608..c7f5cec5e446 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -260,7 +260,7 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common) if (common->init_done) rsi_core_qos_processor(common); } while (atomic_read(&common->tx_thread.thread_done) == 0); - complete_and_exit(&common->tx_thread.completion, 0); + kthread_complete_and_exit(&common->tx_thread.completion, 0); } #ifdef CONFIG_RSI_COEX diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index 8ace1874e5cb..b2b47a0abcbf 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -75,7 +75,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common) rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__); atomic_inc(&sdev->rx_thread.thread_done); - complete_and_exit(&sdev->rx_thread.completion, 0); + kthread_complete_and_exit(&sdev->rx_thread.completion, 0); } /** diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 4ffcdde1acb1..5130b0e72adc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -56,6 +56,6 @@ void rsi_usb_rx_thread(struct rsi_common *common) out: rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__); skb_queue_purge(&dev->rx_q); - complete_and_exit(&dev->rx_thread.completion, 0); + kthread_complete_and_exit(&dev->rx_thread.completion, 0); } diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index 669ef4700c1a..f7e86ae9f72f 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -160,7 +160,7 @@ static int pnp_dock_thread(void *unused) * No dock to manage */ case PNP_FUNCTION_NOT_SUPPORTED: - complete_and_exit(&unload_sem, 0); + kthread_complete_and_exit(&unload_sem, 0); case PNP_SYSTEM_NOT_DOCKED: d = 0; break; @@ -170,7 +170,7 @@ static int pnp_dock_thread(void *unused) default: pnpbios_print_status("pnp_dock_thread", status); printk(KERN_WARNING "PnPBIOS: disabling dock monitoring.\n"); - complete_and_exit(&unload_sem, 0); + kthread_complete_and_exit(&unload_sem, 0); } if (d != docked) { if (pnp_dock_event(d, &now) == 0) { @@ -183,7 +183,7 @@ static int pnp_dock_thread(void *unused) } } } - complete_and_exit(&unload_sem, 0); + kthread_complete_and_exit(&unload_sem, 0); } static int pnpbios_get_resources(struct pnp_dev *dev) diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index 91fcf85e150a..5a58dac76c88 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -450,13 +450,13 @@ skip_for_abort: * after the down() -- that's necessary for the thread-shutdown * case. * - * complete_and_exit() goes even further than this -- it is safe in - * the case that the thread of the caller is going away (not just - * the structure) -- this is necessary for the module-remove case. - * This is important in preemption kernels, which transfer the flow - * of execution immediately upon a complete(). + * kthread_complete_and_exit() goes even further than this -- + * it is safe in the case that the thread of the caller is going away + * (not just the structure) -- this is necessary for the module-remove + * case. This is important in preemption kernels, which transfer the + * flow of execution immediately upon a complete(). */ - complete_and_exit(&dev->control_exit, 0); + kthread_complete_and_exit(&dev->control_exit, 0); } static int rtsx_polling_thread(void *__dev) @@ -501,7 +501,7 @@ static int rtsx_polling_thread(void *__dev) mutex_unlock(&dev->dev_mutex); } - complete_and_exit(&dev->polling_exit, 0); + kthread_complete_and_exit(&dev->polling_exit, 0); } /* @@ -682,7 +682,7 @@ static int rtsx_scan_thread(void *__dev) /* Should we unbind if no devices were detected? */ } - complete_and_exit(&dev->scanning_done, 0); + kthread_complete_and_exit(&dev->scanning_done, 0); } static void rtsx_init_options(struct rtsx_chip *chip) diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index da17be1ef64e..e3a49d837609 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -969,7 +969,7 @@ static int usbatm_do_heavy_init(void *arg) instance->thread = NULL; mutex_unlock(&instance->serialize); - complete_and_exit(&instance->thread_exited, ret); + kthread_complete_and_exit(&instance->thread_exited, ret); } static int usbatm_heavy_init(struct usbatm_data *instance) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 752439690fda..46dd11dcb3a8 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2547,7 +2547,7 @@ static int fsg_main_thread(void *common_) up_write(&common->filesem); /* Let fsg_unbind() know the thread has exited */ - complete_and_exit(&common->thread_notifier, 0); + kthread_complete_and_exit(&common->thread_notifier, 0); } diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 2b4d5013dc5d..6da92ecaf66d 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c @@ -161,5 +161,5 @@ static int jffs2_garbage_collect_thread(void *_c) spin_lock(&c->erase_completion_lock); c->gc_task = NULL; spin_unlock(&c->erase_completion_lock); - complete_and_exit(&c->gc_thread_exit, 0); + kthread_complete_and_exit(&c->gc_thread_exit, 0); } diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 77755ac3e189..055eb203c00e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -187,7 +187,6 @@ static inline void might_fault(void) { } #endif void do_exit(long error_code) __noreturn; -void complete_and_exit(struct completion *, long) __noreturn; extern int num_to_str(char *buf, int size, unsigned long long num, unsigned int width); diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 22c43d419687..d86a7e3b9a52 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -71,6 +71,7 @@ int kthread_park(struct task_struct *k); void kthread_unpark(struct task_struct *k); void kthread_parkme(void); void kthread_exit(long result) __noreturn; +void kthread_complete_and_exit(struct completion *, long) __noreturn; int kthreadd(void *unused); extern struct task_struct *kthreadd_task; diff --git a/kernel/exit.c b/kernel/exit.c index 57afac845a0a..6c4b04531f17 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -891,15 +891,6 @@ void __noreturn make_task_dead(int signr) do_exit(signr); } -void complete_and_exit(struct completion *comp, long code) -{ - if (comp) - complete(comp); - - do_exit(code); -} -EXPORT_SYMBOL(complete_and_exit); - SYSCALL_DEFINE1(exit, int, error_code) { do_exit((error_code&0xff)<<8); diff --git a/kernel/kthread.c b/kernel/kthread.c index 77b7c3f23f18..4388d6694a7f 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -283,6 +283,27 @@ void __noreturn kthread_exit(long result) do_exit(result); } +/** + * kthread_complete_and exit - Exit the current kthread. + * @comp: Completion to complete + * @code: The integer value to return to kthread_stop(). + * + * If present complete @comp and the reuturn code to kthread_stop(). + * + * A kernel thread whose module may be removed after the completion of + * @comp can use this function exit safely. + * + * Does not return. + */ +void __noreturn kthread_complete_and_exit(struct completion *comp, long code) +{ + if (comp) + complete(comp); + + kthread_exit(code); +} +EXPORT_SYMBOL(kthread_complete_and_exit); + static int kthread(void *_create) { static const struct sched_param param = { .sched_priority = 0 }; diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c index 0dd434e40487..be38a2c5ecc2 100644 --- a/lib/kunit/try-catch.c +++ b/lib/kunit/try-catch.c @@ -17,7 +17,7 @@ void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch) { try_catch->try_result = -EFAULT; - complete_and_exit(try_catch->try_completion, -EFAULT); + kthread_complete_and_exit(try_catch->try_completion, -EFAULT); } EXPORT_SYMBOL_GPL(kunit_try_catch_throw); @@ -27,7 +27,7 @@ static int kunit_generic_run_threadfn_adapter(void *data) try_catch->try(try_catch->context); - complete_and_exit(try_catch->try_completion, 0); + kthread_complete_and_exit(try_catch->try_completion, 0); } static unsigned long kunit_test_timeout(void) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 120e9598c11a..282273a1ffa5 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -171,7 +171,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "kthread_exit", "make_task_dead", "__module_put_and_kthread_exit", - "complete_and_exit", + "kthread_complete_and_exit", "__reiserfs_panic", "lbug_with_loc", "fortify_panic", From 40966e316f86b8cfd83abd31ccb4df729309d3e7 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 2 Dec 2021 09:56:14 -0600 Subject: [PATCH 12/44] kthread: Ensure struct kthread is present for all kthreads Today the rules are a bit iffy and arbitrary about which kernel threads have struct kthread present. Both idle threads and thread started with create_kthread want struct kthread present so that is effectively all kernel threads. Make the rule that if PF_KTHREAD and the task is running then struct kthread is present. This will allow the kernel thread code to using tsk->exit_code with different semantics from ordinary processes. To make ensure that struct kthread is present for all kernel threads move it's allocation into copy_process. Add a deallocation of struct kthread in exec for processes that were kernel threads. Move the allocation of struct kthread for the initial thread earlier so that it is not repeated for each additional idle thread. Move the initialization of struct kthread into set_kthread_struct so that the structure is always and reliably initailized. Clear set_child_tid in free_kthread_struct to ensure the kthread struct is reliably freed during exec. The function free_kthread_struct does not need to clear vfork_done during exec as exec_mm_release called from exec_mmap has already cleared vfork_done. Signed-off-by: "Eric W. Biederman" --- fs/exec.c | 2 ++ include/linux/kthread.h | 2 +- kernel/fork.c | 4 ++++ kernel/kthread.c | 31 ++++++++++++++----------------- kernel/sched/core.c | 16 ++++++++-------- 5 files changed, 29 insertions(+), 26 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 537d92c41105..59cac7c18178 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1307,6 +1307,8 @@ int begin_new_exec(struct linux_binprm * bprm) */ force_uaccess_begin(); + if (me->flags & PF_KTHREAD) + free_kthread_struct(me); me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE | PF_NO_SETAFFINITY); flush_thread(); diff --git a/include/linux/kthread.h b/include/linux/kthread.h index d86a7e3b9a52..4f3433afb54b 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -33,7 +33,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), unsigned int cpu, const char *namefmt); -void set_kthread_struct(struct task_struct *p); +bool set_kthread_struct(struct task_struct *p); void kthread_set_per_cpu(struct task_struct *k, int cpu); bool kthread_is_per_cpu(struct task_struct *k); diff --git a/kernel/fork.c b/kernel/fork.c index 3244cc56b697..04fa3e5d97af 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2118,6 +2118,10 @@ static __latent_entropy struct task_struct *copy_process( p->io_context = NULL; audit_set_context(p, NULL); cgroup_fork(p); + if (p->flags & PF_KTHREAD) { + if (!set_kthread_struct(p)) + goto bad_fork_cleanup_threadgroup_lock; + } #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { diff --git a/kernel/kthread.c b/kernel/kthread.c index 4388d6694a7f..8e5f44bed027 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -93,20 +93,27 @@ static inline struct kthread *__to_kthread(struct task_struct *p) return kthread; } -void set_kthread_struct(struct task_struct *p) +bool set_kthread_struct(struct task_struct *p) { struct kthread *kthread; - if (__to_kthread(p)) - return; + if (WARN_ON_ONCE(to_kthread(p))) + return false; kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); + if (!kthread) + return false; + + init_completion(&kthread->exited); + init_completion(&kthread->parked); + p->vfork_done = &kthread->exited; + /* * We abuse ->set_child_tid to avoid the new member and because it - * can't be wrongly copied by copy_process(). We also rely on fact - * that the caller can't exec, so PF_KTHREAD can't be cleared. + * can't be wrongly copied by copy_process(). */ p->set_child_tid = (__force void __user *)kthread; + return true; } void free_kthread_struct(struct task_struct *k) @@ -114,13 +121,13 @@ void free_kthread_struct(struct task_struct *k) struct kthread *kthread; /* - * Can be NULL if this kthread was created by kernel_thread() - * or if kmalloc() in kthread() failed. + * Can be NULL if kmalloc() in set_kthread_struct() failed. */ kthread = to_kthread(k); #ifdef CONFIG_BLK_CGROUP WARN_ON_ONCE(kthread && kthread->blkcg_css); #endif + k->set_child_tid = (__force void __user *)NULL; kfree(kthread); } @@ -315,7 +322,6 @@ static int kthread(void *_create) struct kthread *self; int ret; - set_kthread_struct(current); self = to_kthread(current); /* If user was SIGKILLed, I release the structure. */ @@ -325,17 +331,8 @@ static int kthread(void *_create) kthread_exit(-EINTR); } - if (!self) { - create->result = ERR_PTR(-ENOMEM); - complete(done); - kthread_exit(-ENOMEM); - } - self->threadfn = threadfn; self->data = data; - init_completion(&self->exited); - init_completion(&self->parked); - current->vfork_done = &self->exited; /* * The new thread inherited kthreadd's priority and CPU mask. Reset diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3c9b0fda64ac..0404a8c572a1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8599,14 +8599,6 @@ void __init init_idle(struct task_struct *idle, int cpu) __sched_fork(0, idle); - /* - * The idle task doesn't need the kthread struct to function, but it - * is dressed up as a per-CPU kthread and thus needs to play the part - * if we want to avoid special-casing it in code that deals with per-CPU - * kthreads. - */ - set_kthread_struct(idle); - raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_rq_lock(rq); @@ -9427,6 +9419,14 @@ void __init sched_init(void) mmgrab(&init_mm); enter_lazy_tlb(&init_mm, current); + /* + * The idle task doesn't need the kthread struct to function, but it + * is dressed up as a per-CPU kthread and thus needs to play the part + * if we want to avoid special-casing it in code that deals with per-CPU + * kthreads. + */ + WARN_ON(set_kthread_struct(current)); + /* * Make us the idle thread. Technically, schedule() should not be * called from this thread, however somewhere below it might be, From 6b1248798eb6f6d5285db214299996ecc5dc1e6b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 3 Dec 2021 11:42:49 -0600 Subject: [PATCH 13/44] exit/kthread: Move the exit code for kernel threads into struct kthread The exit code of kernel threads has different semantics than the exit_code of userspace tasks. To avoid confusion and allow the userspace implementation to change as needed move the kernel thread exit code into struct kthread. Signed-off-by: "Eric W. Biederman" --- kernel/kthread.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index 8e5f44bed027..9c6c532047c4 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -52,6 +52,7 @@ struct kthread_create_info struct kthread { unsigned long flags; unsigned int cpu; + int result; int (*threadfn)(void *); void *data; mm_segment_t oldfs; @@ -287,7 +288,9 @@ EXPORT_SYMBOL_GPL(kthread_parkme); */ void __noreturn kthread_exit(long result) { - do_exit(result); + struct kthread *kthread = to_kthread(current); + kthread->result = result; + do_exit(0); } /** @@ -679,7 +682,7 @@ int kthread_stop(struct task_struct *k) kthread_unpark(k); wake_up_process(k); wait_for_completion(&kthread->exited); - ret = k->exit_code; + ret = kthread->result; put_task_struct(k); trace_sched_kthread_stop_ret(ret); From 5eb6f22823e023ee13391978c329c4bd18f82552 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 14 Dec 2021 11:25:01 -0600 Subject: [PATCH 14/44] exit/kthread: Fix the kerneldoc comment for kthread_complete_and_exit I misspelled kthread_complete_and_exit in the kernel doc comment fix it. Link: https://lkml.kernel.org/r/202112141329.KBkyJ5ql-lkp@intel.com Link: https://lkml.kernel.org/r/202112141422.Cykr6YUS-lkp@intel.com Reported-by: kernel test robot Fixes: cead18552660 ("exit: Rename complete_and_exit to kthread_complete_and_exit") Signed-off-by: "Eric W. Biederman" --- kernel/kthread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index 9c6c532047c4..c14707d15341 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -294,7 +294,7 @@ void __noreturn kthread_exit(long result) } /** - * kthread_complete_and exit - Exit the current kthread. + * kthread_complete_and_exit - Exit the current kthread. * @comp: Completion to complete * @code: The integer value to return to kthread_stop(). * From 1fb466dff904e4a72282af336f2c355f011eec61 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 15 Dec 2021 11:24:50 -0600 Subject: [PATCH 15/44] objtool: Add a missing comma to avoid string concatenation Recently the kbuild robot reported two new errors: >> lib/kunit/kunit-example-test.o: warning: objtool: .text.unlikely: unexpected end of section >> arch/x86/kernel/dumpstack.o: warning: objtool: oops_end() falls through to next function show_opcodes() I don't know why they did not occur in my test setup but after digging it I realized I had accidentally dropped a comma in tools/objtool/check.c when I renamed rewind_stack_do_exit to rewind_stack_and_make_dead. Add that comma back to fix objtool errors. Link: https://lkml.kernel.org/r/202112140949.Uq5sFKR1-lkp@intel.com Fixes: 0e25498f8cd4 ("exit: Add and use make_task_dead.") Reported-by: kernel test robot Signed-off-by: "Eric W. Biederman" --- tools/objtool/check.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 282273a1ffa5..3fc2c57f6124 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -177,7 +177,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "fortify_panic", "usercopy_abort", "machine_real_restart", - "rewind_stack_and_make_dead" + "rewind_stack_and_make_dead", "kunit_try_catch_throw", "xen_start_kernel", "cpu_bringup_and_idle", From 6692c98c7df53502adb8b8b73ab9bcbd399f7a06 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 20 Dec 2021 10:20:14 -0600 Subject: [PATCH 16/44] fork: Stop protecting back_fork_cleanup_cgroup_lock with CONFIG_NUMA Mark Brown reported: > This is also causing further build errors including but not limited to: > > /tmp/next/build/kernel/fork.c: In function 'copy_process': > /tmp/next/build/kernel/fork.c:2106:4: error: label 'bad_fork_cleanup_threadgroup_lock' used but not defined > 2106 | goto bad_fork_cleanup_threadgroup_lock; > | ^~~~ It turns out that I messed up and was depending upon a label protected by an ifdef. Move the label out of the ifdef as the ifdef around the label no longer makes sense (if it ever did). Link: https://lkml.kernel.org/r/YbugCP144uxXvRsk@sirena.org.uk Fixes: 40966e316f86 ("kthread: Ensure struct kthread is present for all kthreads") Signed-off-by: "Eric W. Biederman" --- kernel/fork.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/fork.c b/kernel/fork.c index 04fa3e5d97af..23ad62965fbf 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2464,8 +2464,8 @@ bad_fork_cleanup_policy: lockdep_free_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); -bad_fork_cleanup_threadgroup_lock: #endif +bad_fork_cleanup_threadgroup_lock: delayacct_tsk_free(p); bad_fork_cleanup_count: dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); From ff8288ff475e47544569359772f88f2b39fd2cf9 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 20 Dec 2021 10:42:18 -0600 Subject: [PATCH 17/44] fork: Rename bad_fork_cleanup_threadgroup_lock to bad_fork_cleanup_delayacct I just fixed a bug in copy_process when using the label bad_fork_cleanup_threadgroup_lock. While fixing the bug I looked closer at the label and realized it has been misnamed since 568ac888215c ("cgroup: reduce read locked section of cgroup_threadgroup_rwsem during fork"). Fix the name so that fork is easier to understand. Signed-off-by: "Eric W. Biederman" --- kernel/fork.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index 23ad62965fbf..0816be1bb044 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2120,14 +2120,14 @@ static __latent_entropy struct task_struct *copy_process( cgroup_fork(p); if (p->flags & PF_KTHREAD) { if (!set_kthread_struct(p)) - goto bad_fork_cleanup_threadgroup_lock; + goto bad_fork_cleanup_delayacct; } #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; - goto bad_fork_cleanup_threadgroup_lock; + goto bad_fork_cleanup_delayacct; } #endif #ifdef CONFIG_CPUSETS @@ -2465,7 +2465,7 @@ bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_put(p->mempolicy); #endif -bad_fork_cleanup_threadgroup_lock: +bad_fork_cleanup_delayacct: delayacct_tsk_free(p); bad_fork_cleanup_count: dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); From dd621ee0cf8eb32445c8f5f26d3b7555953071d8 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 21 Dec 2021 11:41:14 -0600 Subject: [PATCH 18/44] kthread: Warn about failed allocations for the init kthread Failed allocates are not expected when setting up the initial task and it is not really possible to handle them either. So I added a warning to report if such an allocation failure ever happens. Correct the sense of the warning so it warns when an allocation failure happens not when the allocation succeeded. Oops. Reported-by: kernel test robot Reported-by: Stephen Rothwell Reported-by: Naresh Kamboju Link: https://lkml.kernel.org/r/20211221231611.785b74cf@canb.auug.org.au Link: https://lkml.kernel.org/r/CA+G9fYvLaR5CF777CKeWTO+qJFTN6vAvm95gtzN+7fw3Wi5hkA@mail.gmail.com Link: https://lkml.kernel.org/r/20211216102956.GC10708@xsang-OptiPlex-9020 Fixes: 40966e316f86 ("kthread: Ensure struct kthread is present for all kthreads") Signed-off-by: "Eric W. Biederman" --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0404a8c572a1..ee222b89c692 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9425,7 +9425,7 @@ void __init sched_init(void) * if we want to avoid special-casing it in code that deals with per-CPU * kthreads. */ - WARN_ON(set_kthread_struct(current)); + WARN_ON(!set_kthread_struct(current)); /* * Make us the idle thread. Technically, schedule() should not be From 00580f03af5eb2a527875b4a80a5effd95bda2fa Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 22 Dec 2021 16:57:50 -0600 Subject: [PATCH 19/44] kthread: Never put_user the set_child_tid address Kernel threads abuse set_child_tid. Historically that has been fine as set_child_tid was initialized after the kernel thread had been forked. Unfortunately storing struct kthread in set_child_tid after the thread is running makes struct kthread being unusable for storing result codes of the thread. When set_child_tid is set to struct kthread during fork that results in schedule_tail writing the thread id to the beggining of struct kthread (if put_user does not realize it is a kernel address). Solve this by skipping the put_user for all kthreads. Reported-by: Nathan Chancellor Link: https://lkml.kernel.org/r/YcNsG0Lp94V13whH@archlinux-ax161 Signed-off-by: "Eric W. Biederman" --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ee222b89c692..d8adbea77be1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4908,7 +4908,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) finish_task_switch(prev); preempt_enable(); - if (current->set_child_tid) + if (!(current->flags & PF_KTHREAD) && current->set_child_tid) put_user(task_pid_vnr(current), current->set_child_tid); calculate_sigpending(); From e32cf5dfbe227b355776948b2c9b5691b84d1cbd Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 22 Dec 2021 22:10:09 -0600 Subject: [PATCH 20/44] kthread: Generalize pf_io_worker so it can point to struct kthread The point of using set_child_tid to hold the kthread pointer was that it already did what is necessary. There are now restrictions on when set_child_tid can be initialized and when set_child_tid can be used in schedule_tail. Which indicates that continuing to use set_child_tid to hold the kthread pointer is a bad idea. Instead of continuing to use the set_child_tid field of task_struct generalize the pf_io_worker field of task_struct and use it to hold the kthread pointer. Rename pf_io_worker (which is a void * pointer) to worker_private so it can be used to store kthreads struct kthread pointer. Update the kthread code to store the kthread pointer in the worker_private field. Remove the places where set_child_tid had to be dealt with carefully because kthreads also used it. Link: https://lkml.kernel.org/r/CAHk-=wgtFAA9SbVYg0gR1tqPMC17-NYcs0GQkaYg1bGhh1uJQQ@mail.gmail.com Link: https://lkml.kernel.org/r/87a6grvqy8.fsf_-_@email.froward.int.ebiederm.org Suggested-by: Linus Torvalds Signed-off-by: "Eric W. Biederman" --- fs/io-wq.c | 6 +++--- fs/io-wq.h | 2 +- include/linux/sched.h | 4 ++-- kernel/fork.c | 8 +------- kernel/kthread.c | 14 +++++--------- kernel/sched/core.c | 2 +- 6 files changed, 13 insertions(+), 23 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 88202de519f6..e4fc7384b40c 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -657,7 +657,7 @@ loop: */ void io_wq_worker_running(struct task_struct *tsk) { - struct io_worker *worker = tsk->pf_io_worker; + struct io_worker *worker = tsk->worker_private; if (!worker) return; @@ -675,7 +675,7 @@ void io_wq_worker_running(struct task_struct *tsk) */ void io_wq_worker_sleeping(struct task_struct *tsk) { - struct io_worker *worker = tsk->pf_io_worker; + struct io_worker *worker = tsk->worker_private; if (!worker) return; @@ -694,7 +694,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk) static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, struct task_struct *tsk) { - tsk->pf_io_worker = worker; + tsk->worker_private = worker; worker->task = tsk; set_cpus_allowed_ptr(tsk, wqe->cpu_mask); tsk->flags |= PF_NO_SETAFFINITY; diff --git a/fs/io-wq.h b/fs/io-wq.h index 41bf37674a49..c7c23947cbcd 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -200,6 +200,6 @@ static inline void io_wq_worker_running(struct task_struct *tsk) static inline bool io_wq_current_is_worker(void) { return in_task() && (current->flags & PF_IO_WORKER) && - current->pf_io_worker; + current->worker_private; } #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 78c351e35fec..52f2fdffa3ab 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -987,8 +987,8 @@ struct task_struct { /* CLONE_CHILD_CLEARTID: */ int __user *clear_child_tid; - /* PF_IO_WORKER */ - void *pf_io_worker; + /* PF_KTHREAD | PF_IO_WORKER */ + void *worker_private; u64 utime; u64 stime; diff --git a/kernel/fork.c b/kernel/fork.c index 0816be1bb044..6f0293cb29c9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -950,7 +950,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; - tsk->pf_io_worker = NULL; + tsk->worker_private = NULL; account_kernel_stack(tsk, 1); @@ -2032,12 +2032,6 @@ static __latent_entropy struct task_struct *copy_process( siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); } - /* - * This _must_ happen before we call free_task(), i.e. before we jump - * to any of the bad_fork_* labels. This is to avoid freeing - * p->set_child_tid which is (ab)used as a kthread's data pointer for - * kernel threads (PF_KTHREAD). - */ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; /* * Clear TID on mm_release()? diff --git a/kernel/kthread.c b/kernel/kthread.c index c14707d15341..261a3c3b9c6c 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -72,7 +72,7 @@ enum KTHREAD_BITS { static inline struct kthread *to_kthread(struct task_struct *k) { WARN_ON(!(k->flags & PF_KTHREAD)); - return (__force void *)k->set_child_tid; + return k->worker_private; } /* @@ -80,7 +80,7 @@ static inline struct kthread *to_kthread(struct task_struct *k) * * Per construction; when: * - * (p->flags & PF_KTHREAD) && p->set_child_tid + * (p->flags & PF_KTHREAD) && p->worker_private * * the task is both a kthread and struct kthread is persistent. However * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and @@ -88,7 +88,7 @@ static inline struct kthread *to_kthread(struct task_struct *k) */ static inline struct kthread *__to_kthread(struct task_struct *p) { - void *kthread = (__force void *)p->set_child_tid; + void *kthread = p->worker_private; if (kthread && !(p->flags & PF_KTHREAD)) kthread = NULL; return kthread; @@ -109,11 +109,7 @@ bool set_kthread_struct(struct task_struct *p) init_completion(&kthread->parked); p->vfork_done = &kthread->exited; - /* - * We abuse ->set_child_tid to avoid the new member and because it - * can't be wrongly copied by copy_process(). - */ - p->set_child_tid = (__force void __user *)kthread; + p->worker_private = kthread; return true; } @@ -128,7 +124,7 @@ void free_kthread_struct(struct task_struct *k) #ifdef CONFIG_BLK_CGROUP WARN_ON_ONCE(kthread && kthread->blkcg_css); #endif - k->set_child_tid = (__force void __user *)NULL; + k->worker_private = NULL; kfree(kthread); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d8adbea77be1..ee222b89c692 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4908,7 +4908,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) finish_task_switch(prev); preempt_enable(); - if (!(current->flags & PF_KTHREAD) && current->set_child_tid) + if (current->set_child_tid) put_user(task_pid_vnr(current), current->set_child_tid); calculate_sigpending(); From 4f0712ccec09c071e221242a2db9a6779a55a949 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 27 Dec 2021 11:48:49 -0700 Subject: [PATCH 21/44] hexagon: Fix function name in die() When building ARCH=hexagon defconfig: arch/hexagon/kernel/traps.c:217:2: error: implicit declaration of function 'make_dead_task' [-Werror,-Wimplicit-function-declaration] make_dead_task(err); ^ The function's name is make_task_dead(), change it so there is no more build error. Fixes: 0e25498f8cd4 ("exit: Add and use make_task_dead.") Signed-off-by: Nathan Chancellor Link: https://lkml.kernel.org/r/20211227184851.2297759-2-nathan@kernel.org Signed-off-by: Eric W. Biederman --- arch/hexagon/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 6dd6cf0ab711..1240f038cce0 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -214,7 +214,7 @@ int die(const char *str, struct pt_regs *regs, long err) panic("Fatal exception"); oops_exit(); - make_dead_task(err); + make_task_dead(err); return 0; } From ab4ababdf77ccc56c7301c751dff49c79709c51c Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 27 Dec 2021 11:48:50 -0700 Subject: [PATCH 22/44] h8300: Fix build errors from do_exit() to make_task_dead() transition When building ARCH=h8300 defconfig: arch/h8300/kernel/traps.c: In function 'die': arch/h8300/kernel/traps.c:109:2: error: implicit declaration of function 'make_dead_task' [-Werror=implicit-function-declaration] 109 | make_dead_task(SIGSEGV); | ^~~~~~~~~~~~~~ arch/h8300/mm/fault.c: In function 'do_page_fault': arch/h8300/mm/fault.c:54:2: error: implicit declaration of function 'make_dead_task' [-Werror=implicit-function-declaration] 54 | make_dead_task(SIGKILL); | ^~~~~~~~~~~~~~ The function's name is make_task_dead(), change it so there is no more build error. Additionally, include linux/sched/task.h in arch/h8300/kernel/traps.c to avoid the same error because do_exit()'s declaration is in kernel.h but make_task_dead()'s is in task.h, which is not included in traps.c. Fixes: 0e25498f8cd4 ("exit: Add and use make_task_dead.") Signed-off-by: Nathan Chancellor Link: https://lkml.kernel.org/r/20211227184851.2297759-3-nathan@kernel.org Signed-off-by: Eric W. Biederman --- arch/h8300/kernel/traps.c | 3 ++- arch/h8300/mm/fault.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index 3d4e0bde37ae..a92c39e03802 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -106,7 +107,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err) dump(fp); spin_unlock_irq(&die_lock); - make_dead_task(SIGSEGV); + make_task_dead(SIGSEGV); } static int kstack_depth_to_print = 24; diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c index 0223528565dd..b465441f490d 100644 --- a/arch/h8300/mm/fault.c +++ b/arch/h8300/mm/fault.c @@ -51,7 +51,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, printk(" at virtual address %08lx\n", address); if (!user_mode(regs)) die("Oops", regs, error_code); - make_dead_task(SIGKILL); + make_task_dead(SIGKILL); return 1; } From 751971af2e3615dc5bd12674080bc795505fefeb Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 27 Dec 2021 11:48:51 -0700 Subject: [PATCH 23/44] csky: Fix function name in csky_alignment() and die() When building ARCH=csky defconfig: arch/csky/kernel/traps.c: In function 'die': arch/csky/kernel/traps.c:112:17: error: implicit declaration of function 'make_dead_task' [-Werror=implicit-function-declaration] 112 | make_dead_task(SIGSEGV); | ^~~~~~~~~~~~~~ The function's name is make_task_dead(), change it so there is no more build error. Fixes: 0e25498f8cd4 ("exit: Add and use make_task_dead.") Signed-off-by: Nathan Chancellor Reviewed-by: Guo Ren Link: https://lkml.kernel.org/r/20211227184851.2297759-4-nathan@kernel.org Signed-off-by: Eric W. Biederman --- arch/csky/abiv1/alignment.c | 2 +- arch/csky/kernel/traps.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c index 5e2fb45d605c..2df115d0e210 100644 --- a/arch/csky/abiv1/alignment.c +++ b/arch/csky/abiv1/alignment.c @@ -294,7 +294,7 @@ bad_area: __func__, opcode, rz, rx, imm, addr); show_regs(regs); bust_spinlocks(0); - make_dead_task(SIGKILL); + make_task_dead(SIGKILL); } force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c index 88a47035b925..50481d12d236 100644 --- a/arch/csky/kernel/traps.c +++ b/arch/csky/kernel/traps.c @@ -109,7 +109,7 @@ void die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) - make_dead_task(SIGSEGV); + make_task_dead(SIGSEGV); } void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) From 85be9ae7b63092895b6e7ac87a3ef383c679866c Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 5 Jan 2022 14:59:48 -0600 Subject: [PATCH 24/44] exit/xtensa: In arch/xtensa/entry.S:Linvalid_mask call make_task_dead There have historically been two big uses of do_exit. The first is it's design use to be the guts of the exit(2) system call. The second use is to terminate a task after something catastrophic has happened like a NULL pointer in kernel code. The function make_task_dead has been added to accomidate the second use. The call to do_exit in Linvalidmask is clearly not a normal userspace exit. As failure handling there are two possible ways to go. If userspace can trigger the issue force_exit_sig should be called. Otherwise make_task_dead probably from the implementation of die is appropriate. Replace the call of do_exit in Linvalidmask with make_task_dead as I don't know xtensa and especially xtensa assembly language well enough to do anything else. Link: https://lkml.kernel.org/r/YdUmN7n4W5YETUhW@zeniv-ca.linux.org.uk Suggested-by: Al Viro Signed-off-by: "Eric W. Biederman" --- arch/xtensa/kernel/entry.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 99ab3c1a3387..a1029a5b6a1d 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1433,7 +1433,7 @@ ENTRY(fast_syscall_spill_registers) rsync movi abi_arg0, SIGSEGV - abi_call do_exit + abi_call make_task_dead /* shouldn't return, so panic */ From 912616f142bfeb1dc41f40dbe7ce38331886a94a Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 5 Jan 2022 16:30:21 -0600 Subject: [PATCH 25/44] exit: Guarantee make_task_dead leaks the tsk when calling do_task_exit Change the task state to EXIT_DEAD and take an extra rcu_refernce to guarantee the task will not be reaped and that it will not be freed. Link: https://lkml.kernel.org/r/YdUzjrLAlRiNLQp2@zeniv-ca.linux.org.uk Pointed-out-by: Al Viro Fixes: 7f80a2fd7db9 ("exit: Stop poorly open coding do_task_dead in make_task_dead") Signed-off-by: "Eric W. Biederman" --- kernel/exit.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/exit.c b/kernel/exit.c index 6c4b04531f17..db4eeb7fc680 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -885,6 +885,8 @@ void __noreturn make_task_dead(int signr) if (unlikely(tsk->flags & PF_EXITING)) { pr_alert("Fixing recursive fault but reboot is needed!\n"); futex_exit_recursive(tsk); + tsk->exit_state = EXIT_DEAD; + refcount_inc(&tsk->rcu_users); do_task_dead(); } From de77c3a5b95c95a4915142071643d94e3e1ada35 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 7 Jan 2022 12:18:12 -0600 Subject: [PATCH 26/44] exit: Move force_uaccess back into do_exit With kernel threads on architectures that still have set_fs/get_fs running as KERNEL_DS moving force_uaccess_begin does not appear safe. Calling force_uaccess_begin is a noop on anything people care about. Update the comment to explain why this code while looking like an obvious candidate for moving to make_task_dead probably needs to remain in do_exit until set_fs/get_fs are entirely removed from the kernel. Fixes: 05ea0424f0e2 ("exit: Move oops specific logic from do_exit into make_task_dead") Suggested-by: Al Viro Link: https://lkml.kernel.org/r/YdUxGKRcSiDy8jGg@zeniv-ca.linux.org.uk Signed-off-by: "Eric W. Biederman" --- kernel/exit.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/kernel/exit.c b/kernel/exit.c index db4eeb7fc680..fc0726cb22db 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -737,6 +737,20 @@ void __noreturn do_exit(long code) WARN_ON(blk_needs_flush_plug(tsk)); + /* + * If do_dead is called because this processes oopsed, it's possible + * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before + * continuing. Amongst other possible reasons, this is to prevent + * mm_release()->clear_child_tid() from writing to a user-controlled + * kernel address. + * + * On uptodate architectures force_uaccess_begin is a noop. On + * architectures that still have set_fs/get_fs in addition to handling + * oopses handles kernel threads that run as set_fs(KERNEL_DS) by + * default. + */ + force_uaccess_begin(); + profile_task_exit(tsk); kcov_task_exit(tsk); @@ -862,15 +876,6 @@ void __noreturn make_task_dead(int signr) if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); - /* - * If make_task_dead is called because this processes oopsed, it's possible - * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before - * continuing. Amongst other possible reasons, this is to prevent - * mm_release()->clear_child_tid() from writing to a user-controlled - * kernel address. - */ - force_uaccess_begin(); - if (unlikely(in_atomic())) { pr_info("note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), From 98b24b16b2aebffabf5b8670f44f19666c1e029f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 19 Nov 2021 11:29:48 -0600 Subject: [PATCH 27/44] signal: Have the oom killer detect coredumps using signal->core_state In preparation for removing the flag SIGNAL_GROUP_COREDUMP, change __task_will_free_mem to test signal->core_state instead of the flag SIGNAL_GROUP_COREDUMP. Both fields are protected by siglock and both live in signal_struct so there are no real tradeoffs here, just a change to which field is being tested. Link: https://lkml.kernel.org/r/20211213225350.27481-3-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- mm/oom_kill.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 1ddabefcfb5a..5c92aad8ca1a 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -793,7 +793,7 @@ static inline bool __task_will_free_mem(struct task_struct *task) * coredump_task_exit(), so the oom killer cannot assume that * the process will promptly exit and release memory. */ - if (sig->flags & SIGNAL_GROUP_COREDUMP) + if (sig->core_state) return false; if (sig->flags & SIGNAL_GROUP_EXIT) From a0287db0f1d6918919203ba31fd7cda59bf889e8 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 8 Jan 2022 09:34:50 -0600 Subject: [PATCH 28/44] signal: Have prepare_signal detect coredumps using signal->core_state In preparation for removing the flag SIGNAL_GROUP_COREDUMP, change prepare_signal to test signal->core_state instead of the flag SIGNAL_GROUP_COREDUMP. Both fields are protected by siglock and both live in signal_struct so there are no real tradeoffs here, just a change to which field is being tested. Link: https://lkml.kernel.org/r/20211213225350.27481-1-ebiederm@xmission.com Link: https://lkml.kernel.org/r/875yqu14co.fsf_-_@email.froward.int.ebiederm.org Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c index 8272cac5f429..f95a4423519d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -906,8 +906,8 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) struct task_struct *t; sigset_t flush; - if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { - if (!(signal->flags & SIGNAL_GROUP_EXIT)) + if ((signal->flags & SIGNAL_GROUP_EXIT) || signal->core_state) { + if (signal->core_state) return sig == SIGKILL; /* * The process is in the middle of dying, nothing to do. From 7ba03471ac4ad2432e5ccf67d9d4ab03c177578a Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 8 Jan 2022 11:01:12 -0600 Subject: [PATCH 29/44] signal: Make coredump handling explicit in complete_signal Ever since commit 6cd8f0acae34 ("coredump: ensure that SIGKILL always kills the dumping thread") it has been possible for a SIGKILL received during a coredump to set SIGNAL_GROUP_EXIT and trigger a process shutdown (for a second time). Update the logic to explicitly allow coredumps so that coredumps can set SIGNAL_GROUP_EXIT and shutdown like an ordinary process. Link: https://lkml.kernel.org/r/87zgo6ytyf.fsf_-_@email.froward.int.ebiederm.org Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/signal.c b/kernel/signal.c index f95a4423519d..0706c1345a71 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1032,7 +1032,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type) * then start taking the whole group down immediately. */ if (sig_fatal(p, sig) && - !(signal->flags & SIGNAL_GROUP_EXIT) && + (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) && !sigismember(&t->real_blocked, sig) && (sig == SIGKILL || !p->ptrace)) { /* From 752dc9707567f39ed7850e21796cf2b467d71ad5 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 8 Jan 2022 09:44:58 -0600 Subject: [PATCH 30/44] signal: During coredumps set SIGNAL_GROUP_EXIT in zap_process There are only a few places that test SIGNAL_GROUP_EXIT and are not also already testing SIGNAL_GROUP_COREDUMP. This will not affect the callers of signal_group_exit as zap_process also sets group_exit_task so signal_group_exit will continue to return true at the same times. This does not affect wait_task_zombie as the none of the threads wind up in EXIT_ZOMBIE state during a coredump. This does not affect oom_kill.c:__task_will_free_mem as sig->core_state is tested and handled before SIGNAL_GROUP_EXIT is tested for. This does not affect complete_signal as signal->core_state is tested for to ensure the coredump case is handled appropriately. Link: https://lkml.kernel.org/r/20211213225350.27481-4-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- fs/coredump.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/coredump.c b/fs/coredump.c index a6b3c196cdef..0864941a879b 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -347,13 +347,13 @@ out: return ispipe; } -static int zap_process(struct task_struct *start, int exit_code, int flags) +static int zap_process(struct task_struct *start, int exit_code) { struct task_struct *t; int nr = 0; /* ignore all signals except SIGKILL, see prepare_signal() */ - start->signal->flags = SIGNAL_GROUP_COREDUMP | flags; + start->signal->flags = SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP; start->signal->group_exit_code = exit_code; start->signal->group_stop_count = 0; @@ -378,7 +378,7 @@ static int zap_threads(struct task_struct *tsk, if (!signal_group_exit(tsk->signal)) { tsk->signal->core_state = core_state; tsk->signal->group_exit_task = tsk; - nr = zap_process(tsk, exit_code, 0); + nr = zap_process(tsk, exit_code); clear_tsk_thread_flag(tsk, TIF_SIGPENDING); tsk->flags |= PF_DUMPCORE; atomic_set(&core_state->nr_threads, nr); From 2f824d4d197e02275562359a2ae5274177ce500c Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 8 Jan 2022 09:48:31 -0600 Subject: [PATCH 31/44] signal: Remove SIGNAL_GROUP_COREDUMP After the previous cleanups "signal->core_state" is set whenever SIGNAL_GROUP_COREDUMP is set and "signal->core_state" is tested whenver the code wants to know if a coredump is in progress. The remaining tests of SIGNAL_GROUP_COREDUMP also test to see if SIGNAL_GROUP_EXIT is set. Similarly the only place that sets SIGNAL_GROUP_COREDUMP also sets SIGNAL_GROUP_EXIT. Which makes SIGNAL_GROUP_COREDUMP unecessary and redundant. So stop setting SIGNAL_GROUP_COREDUMP, stop testing SIGNAL_GROUP_COREDUMP, and remove it's definition. With the setting of SIGNAL_GROUP_COREDUMP gone, coredump_finish no longer needs to clear SIGNAL_GROUP_COREDUMP out of signal->flags by setting SIGNAL_GROUP_EXIT. Link: https://lkml.kernel.org/r/20211213225350.27481-5-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- fs/coredump.c | 3 +-- include/linux/sched/signal.h | 3 +-- kernel/signal.c | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/coredump.c b/fs/coredump.c index 0864941a879b..fee1c57aee89 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -353,7 +353,7 @@ static int zap_process(struct task_struct *start, int exit_code) int nr = 0; /* ignore all signals except SIGKILL, see prepare_signal() */ - start->signal->flags = SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP; + start->signal->flags = SIGNAL_GROUP_EXIT; start->signal->group_exit_code = exit_code; start->signal->group_stop_count = 0; @@ -427,7 +427,6 @@ static void coredump_finish(bool core_dumped) if (core_dumped && !__fatal_signal_pending(current)) current->signal->group_exit_code |= 0x80; current->signal->group_exit_task = NULL; - current->signal->flags = SIGNAL_GROUP_EXIT; next = current->signal->core_state->dumper.next; current->signal->core_state = NULL; spin_unlock_irq(¤t->sighand->siglock); diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index fa26d2a58413..ecc10e148799 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -256,7 +256,6 @@ struct signal_struct { #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ -#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ /* * Pending notifications to parent. */ @@ -272,7 +271,7 @@ struct signal_struct { static inline void signal_set_stop_flags(struct signal_struct *sig, unsigned int flags) { - WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); + WARN_ON(sig->flags & SIGNAL_GROUP_EXIT); sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; } diff --git a/kernel/signal.c b/kernel/signal.c index 0706c1345a71..bae231bc2f4a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -906,7 +906,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) struct task_struct *t; sigset_t flush; - if ((signal->flags & SIGNAL_GROUP_EXIT) || signal->core_state) { + if (signal->flags & SIGNAL_GROUP_EXIT) { if (signal->core_state) return sig == SIGKILL; /* From 6ac79ec5378b675f91021c8073cde0eea59f81ad Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 19 Nov 2021 12:11:54 -0600 Subject: [PATCH 32/44] coredump: Stop setting signal->group_exit_task Currently the coredump code sets group_exit_task so that signal_group_exit() will return true during a coredump. Now that the coredump code always sets SIGNAL_GROUP_EXIT there is no longer a need to set signal->group_exit_task. Link: https://lkml.kernel.org/r/20211213225350.27481-6-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- fs/coredump.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/coredump.c b/fs/coredump.c index fee1c57aee89..c92ffc0bf2c2 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -377,7 +377,6 @@ static int zap_threads(struct task_struct *tsk, spin_lock_irq(&tsk->sighand->siglock); if (!signal_group_exit(tsk->signal)) { tsk->signal->core_state = core_state; - tsk->signal->group_exit_task = tsk; nr = zap_process(tsk, exit_code); clear_tsk_thread_flag(tsk, TIF_SIGPENDING); tsk->flags |= PF_DUMPCORE; @@ -426,7 +425,6 @@ static void coredump_finish(bool core_dumped) spin_lock_irq(¤t->sighand->siglock); if (core_dumped && !__fatal_signal_pending(current)) current->signal->group_exit_code |= 0x80; - current->signal->group_exit_task = NULL; next = current->signal->core_state->dumper.next; current->signal->core_state = NULL; spin_unlock_irq(¤t->sighand->siglock); From 60700e38fb68e800607ca7a027060d5419fc5798 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 6 Jun 2021 13:47:53 -0500 Subject: [PATCH 33/44] signal: Rename group_exit_task group_exec_task The only remaining user of group_exit_task is exec. Rename the field so that it is clear which part of the code uses it. Update the comment above the definition of group_exec_task to document how it is currently used. Link: https://lkml.kernel.org/r/20211213225350.27481-7-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- fs/exec.c | 8 ++++---- include/linux/sched/signal.h | 12 ++++-------- kernel/exit.c | 4 ++-- 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 59cac7c18178..9d2925811011 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1054,7 +1054,7 @@ static int de_thread(struct task_struct *tsk) return -EAGAIN; } - sig->group_exit_task = tsk; + sig->group_exec_task = tsk; sig->notify_count = zap_other_threads(tsk); if (!thread_group_leader(tsk)) sig->notify_count--; @@ -1082,7 +1082,7 @@ static int de_thread(struct task_struct *tsk) write_lock_irq(&tasklist_lock); /* * Do this under tasklist_lock to ensure that - * exit_notify() can't miss ->group_exit_task + * exit_notify() can't miss ->group_exec_task */ sig->notify_count = -1; if (likely(leader->exit_state)) @@ -1149,7 +1149,7 @@ static int de_thread(struct task_struct *tsk) release_task(leader); } - sig->group_exit_task = NULL; + sig->group_exec_task = NULL; sig->notify_count = 0; no_thread_group: @@ -1162,7 +1162,7 @@ no_thread_group: killed: /* protects against exit_notify() and __exit_signal() */ read_lock(&tasklist_lock); - sig->group_exit_task = NULL; + sig->group_exec_task = NULL; sig->notify_count = 0; read_unlock(&tasklist_lock); return -EAGAIN; diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index ecc10e148799..d3248aba5183 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -109,13 +109,9 @@ struct signal_struct { /* thread group exit support */ int group_exit_code; - /* overloaded: - * - notify group_exit_task when ->count is equal to notify_count - * - everyone except group_exit_task is stopped during signal delivery - * of fatal signals, group_exit_task processes the signal. - */ + /* notify group_exec_task when notify_count is less or equal to 0 */ int notify_count; - struct task_struct *group_exit_task; + struct task_struct *group_exec_task; /* thread group stop support, overloads group_exit_code too */ int group_stop_count; @@ -275,11 +271,11 @@ static inline void signal_set_stop_flags(struct signal_struct *sig, sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; } -/* If true, all threads except ->group_exit_task have pending SIGKILL */ +/* If true, all threads except ->group_exec_task have pending SIGKILL */ static inline int signal_group_exit(const struct signal_struct *sig) { return (sig->flags & SIGNAL_GROUP_EXIT) || - (sig->group_exit_task != NULL); + (sig->group_exec_task != NULL); } extern void flush_signals(struct task_struct *); diff --git a/kernel/exit.c b/kernel/exit.c index fc0726cb22db..b05578abbf26 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -116,7 +116,7 @@ static void __exit_signal(struct task_struct *tsk) * then notify it: */ if (sig->notify_count > 0 && !--sig->notify_count) - wake_up_process(sig->group_exit_task); + wake_up_process(sig->group_exec_task); if (tsk == sig->curr_target) sig->curr_target = next_thread(tsk); @@ -697,7 +697,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead) /* mt-exec, de_thread() is waiting for group leader */ if (unlikely(tsk->signal->notify_count < 0)) - wake_up_process(tsk->signal->group_exit_task); + wake_up_process(tsk->signal->group_exec_task); write_unlock_irq(&tasklist_lock); list_for_each_entry_safe(p, n, &dead, ptrace_entry) { From 49697335e0b441b0553598c1b48ee9ebb053d2f1 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 24 Jun 2021 02:14:30 -0500 Subject: [PATCH 34/44] signal: Remove the helper signal_group_exit This helper is misleading. It tests for an ongoing exec as well as the process having received a fatal signal. Sometimes it is appropriate to treat an on-going exec differently than a process that is shutting down due to a fatal signal. In particular taking the fast path out of exit_signals instead of retargeting signals is not appropriate during exec, and not changing the the exit code in do_group_exit during exec. Removing the helper makes it more obvious what is going on as both cases must be coded for explicitly. While removing the helper fix the two cases where I have observed using signal_group_exit resulted in the wrong result. In exit_signals only test for SIGNAL_GROUP_EXIT so that signals are retargetted during an exec. In do_group_exit use 0 as the exit code during an exec as de_thread does not set group_exit_code. As best as I can determine group_exit_code has been is set to 0 most of the time during de_thread. During a thread group stop group_exit_code is set to the stop signal and when the thread group receives SIGCONT group_exit_code is reset to 0. Link: https://lkml.kernel.org/r/20211213225350.27481-8-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- fs/coredump.c | 5 +++-- fs/exec.c | 2 +- include/linux/sched/signal.h | 7 ------- kernel/exit.c | 8 ++++++-- kernel/signal.c | 8 +++++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/fs/coredump.c b/fs/coredump.c index c92ffc0bf2c2..7dece20b162b 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -372,11 +372,12 @@ static int zap_process(struct task_struct *start, int exit_code) static int zap_threads(struct task_struct *tsk, struct core_state *core_state, int exit_code) { + struct signal_struct *signal = tsk->signal; int nr = -EAGAIN; spin_lock_irq(&tsk->sighand->siglock); - if (!signal_group_exit(tsk->signal)) { - tsk->signal->core_state = core_state; + if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) { + signal->core_state = core_state; nr = zap_process(tsk, exit_code); clear_tsk_thread_flag(tsk, TIF_SIGPENDING); tsk->flags |= PF_DUMPCORE; diff --git a/fs/exec.c b/fs/exec.c index 9d2925811011..82db656ca709 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1045,7 +1045,7 @@ static int de_thread(struct task_struct *tsk) * Kill all other threads in the thread group. */ spin_lock_irq(lock); - if (signal_group_exit(sig)) { + if ((sig->flags & SIGNAL_GROUP_EXIT) || sig->group_exec_task) { /* * Another group action in progress, just * return so that the signal is processed. diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index d3248aba5183..b6ecb9fc4cd2 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -271,13 +271,6 @@ static inline void signal_set_stop_flags(struct signal_struct *sig, sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; } -/* If true, all threads except ->group_exec_task have pending SIGKILL */ -static inline int signal_group_exit(const struct signal_struct *sig) -{ - return (sig->flags & SIGNAL_GROUP_EXIT) || - (sig->group_exec_task != NULL); -} - extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); diff --git a/kernel/exit.c b/kernel/exit.c index b05578abbf26..861cfb1e2f77 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -914,15 +914,19 @@ do_group_exit(int exit_code) BUG_ON(exit_code & 0x80); /* core dumps don't get here */ - if (signal_group_exit(sig)) + if (sig->flags & SIGNAL_GROUP_EXIT) exit_code = sig->group_exit_code; + else if (sig->group_exec_task) + exit_code = 0; else if (!thread_group_empty(current)) { struct sighand_struct *const sighand = current->sighand; spin_lock_irq(&sighand->siglock); - if (signal_group_exit(sig)) + if (sig->flags & SIGNAL_GROUP_EXIT) /* Another thread got here before we took the lock. */ exit_code = sig->group_exit_code; + else if (sig->group_exec_task) + exit_code = 0; else { sig->group_exit_code = exit_code; sig->flags = SIGNAL_GROUP_EXIT; diff --git a/kernel/signal.c b/kernel/signal.c index bae231bc2f4a..167b8e196a79 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2386,7 +2386,8 @@ static bool do_signal_stop(int signr) WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || - unlikely(signal_group_exit(sig))) + unlikely(sig->flags & SIGNAL_GROUP_EXIT) || + unlikely(sig->group_exec_task)) return false; /* * There is no group stop already in progress. We must @@ -2693,7 +2694,8 @@ relock: enum pid_type type; /* Has this task already been marked for death? */ - if (signal_group_exit(signal)) { + if ((signal->flags & SIGNAL_GROUP_EXIT) || + signal->group_exec_task) { ksig->info.si_signo = signr = SIGKILL; sigdelset(¤t->pending.signal, SIGKILL); trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, @@ -2949,7 +2951,7 @@ void exit_signals(struct task_struct *tsk) */ cgroup_threadgroup_change_begin(tsk); - if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { + if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) { tsk->flags |= PF_EXITING; cgroup_threadgroup_change_end(tsk); return; From 6410349ea5e177f3e53c2006d2041eed47e986ae Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 21 Dec 2021 19:10:27 -0800 Subject: [PATCH 35/44] signal: clean up kernel-doc comments Fix kernel-doc warnings in kernel/signal.c: kernel/signal.c:1830: warning: Function parameter or member 'force_coredump' not described in 'force_sig_seccomp' kernel/signal.c:2873: warning: missing initial short description on line: * signal_delivered - Also add a closing parenthesis to the comments in signal_delivered(). Signed-off-by: Randy Dunlap Cc: Alexander Viro Cc: Richard Weinberger Cc: Andrew Morton Cc: "Eric W. Biederman" Cc: Jens Axboe Cc: Peter Zijlstra Cc: Marco Elver Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20211222031027.29694-1-rdunlap@infradead.org Signed-off-by: Eric W. Biederman --- kernel/signal.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c index 167b8e196a79..6324104cf244 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1823,6 +1823,7 @@ int force_sig_perf(void __user *addr, u32 type, u64 sig_data) * force_sig_seccomp - signals the task to allow in-process syscall emulation * @syscall: syscall number to send to userland * @reason: filter-supplied reason code to send to userland (via si_errno) + * @force_coredump: true to trigger a coredump * * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. */ @@ -2872,13 +2873,13 @@ out: } /** - * signal_delivered - + * signal_delivered - called after signal delivery to update blocked signals * @ksig: kernel signal struct * @stepping: nonzero if debugger single-step or block-step in use * * This function should be called when a signal has successfully been * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask - * is always blocked, and the signal itself is blocked unless %SA_NODEFER + * is always blocked), and the signal itself is blocked unless %SA_NODEFER * is set in @ksig->ka.sa.sa_flags. Tracing is notified. */ static void signal_delivered(struct ksignal *ksig, int stepping) From 2d4bcf886e42f0f4846a3d9bdc3a90d278903a2e Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 8 Jan 2022 11:23:02 -0600 Subject: [PATCH 36/44] exit: Remove profile_task_exit & profile_munmap When I say remove I mean remove. All profile_task_exit and profile_munmap do is call a blocking notifier chain. The helpers profile_task_register and profile_task_unregister are not called anywhere in the tree. Which means this is all dead code. So remove the dead code and make it easier to read do_exit. Reviewed-by: Christoph Hellwig Link: https://lkml.kernel.org/r/20220103213312.9144-1-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- include/linux/profile.h | 26 --------------------- kernel/exit.c | 1 - kernel/profile.c | 50 ----------------------------------------- mm/mmap.c | 1 - 4 files changed, 78 deletions(-) diff --git a/include/linux/profile.h b/include/linux/profile.h index fd18ca96f557..f7eb2b57d890 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -31,11 +31,6 @@ static inline int create_proc_profile(void) } #endif -enum profile_type { - PROFILE_TASK_EXIT, - PROFILE_MUNMAP -}; - #ifdef CONFIG_PROFILING extern int prof_on __read_mostly; @@ -66,23 +61,14 @@ static inline void profile_hit(int type, void *ip) struct task_struct; struct mm_struct; -/* task is in do_exit() */ -void profile_task_exit(struct task_struct * task); - /* task is dead, free task struct ? Returns 1 if * the task was taken, 0 if the task should be freed. */ int profile_handoff_task(struct task_struct * task); -/* sys_munmap */ -void profile_munmap(unsigned long addr); - int task_handoff_register(struct notifier_block * n); int task_handoff_unregister(struct notifier_block * n); -int profile_event_register(enum profile_type, struct notifier_block * n); -int profile_event_unregister(enum profile_type, struct notifier_block * n); - #else #define prof_on 0 @@ -117,19 +103,7 @@ static inline int task_handoff_unregister(struct notifier_block * n) return -ENOSYS; } -static inline int profile_event_register(enum profile_type t, struct notifier_block * n) -{ - return -ENOSYS; -} - -static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n) -{ - return -ENOSYS; -} - -#define profile_task_exit(a) do { } while (0) #define profile_handoff_task(a) (0) -#define profile_munmap(a) do { } while (0) #endif /* CONFIG_PROFILING */ diff --git a/kernel/exit.c b/kernel/exit.c index 861cfb1e2f77..64e907bc87d5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -751,7 +751,6 @@ void __noreturn do_exit(long code) */ force_uaccess_begin(); - profile_task_exit(tsk); kcov_task_exit(tsk); coredump_task_exit(tsk); diff --git a/kernel/profile.c b/kernel/profile.c index eb9c7f0f5ac5..9355cc934a96 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -135,14 +135,7 @@ int __ref profile_init(void) /* Profile event notifications */ -static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); static ATOMIC_NOTIFIER_HEAD(task_free_notifier); -static BLOCKING_NOTIFIER_HEAD(munmap_notifier); - -void profile_task_exit(struct task_struct *task) -{ - blocking_notifier_call_chain(&task_exit_notifier, 0, task); -} int profile_handoff_task(struct task_struct *task) { @@ -151,11 +144,6 @@ int profile_handoff_task(struct task_struct *task) return (ret == NOTIFY_OK) ? 1 : 0; } -void profile_munmap(unsigned long addr) -{ - blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); -} - int task_handoff_register(struct notifier_block *n) { return atomic_notifier_chain_register(&task_free_notifier, n); @@ -168,44 +156,6 @@ int task_handoff_unregister(struct notifier_block *n) } EXPORT_SYMBOL_GPL(task_handoff_unregister); -int profile_event_register(enum profile_type type, struct notifier_block *n) -{ - int err = -EINVAL; - - switch (type) { - case PROFILE_TASK_EXIT: - err = blocking_notifier_chain_register( - &task_exit_notifier, n); - break; - case PROFILE_MUNMAP: - err = blocking_notifier_chain_register( - &munmap_notifier, n); - break; - } - - return err; -} -EXPORT_SYMBOL_GPL(profile_event_register); - -int profile_event_unregister(enum profile_type type, struct notifier_block *n) -{ - int err = -EINVAL; - - switch (type) { - case PROFILE_TASK_EXIT: - err = blocking_notifier_chain_unregister( - &task_exit_notifier, n); - break; - case PROFILE_MUNMAP: - err = blocking_notifier_chain_unregister( - &munmap_notifier, n); - break; - } - - return err; -} -EXPORT_SYMBOL_GPL(profile_event_unregister); - #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) /* * Each cpu has a pair of open-addressed hashtables for pending diff --git a/mm/mmap.c b/mm/mmap.c index bfb0ea164a90..70318c2a47c3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2928,7 +2928,6 @@ EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { addr = untagged_addr(addr); - profile_munmap(addr); return __vm_munmap(addr, len, true); } From 2873cd31a20c25b5e763b35e5fb886f0938c6dd5 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 8 Jan 2022 10:03:24 -0600 Subject: [PATCH 37/44] exit: Remove profile_handoff_task All profile_handoff_task does is notify the task_free_notifier chain. The helpers task_handoff_register and task_handoff_unregister are used to add and delete entries from that chain and are never called. So remove the dead code and make it much easier to read and reason about __put_task_struct. Suggested-by: Al Viro Link: https://lkml.kernel.org/r/87fspyw6m0.fsf@email.froward.int.ebiederm.org Signed-off-by: "Eric W. Biederman" --- include/linux/profile.h | 19 ------------------- kernel/fork.c | 4 +--- kernel/profile.c | 23 ----------------------- 3 files changed, 1 insertion(+), 45 deletions(-) diff --git a/include/linux/profile.h b/include/linux/profile.h index f7eb2b57d890..11db1ec516e2 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -61,14 +61,6 @@ static inline void profile_hit(int type, void *ip) struct task_struct; struct mm_struct; -/* task is dead, free task struct ? Returns 1 if - * the task was taken, 0 if the task should be freed. - */ -int profile_handoff_task(struct task_struct * task); - -int task_handoff_register(struct notifier_block * n); -int task_handoff_unregister(struct notifier_block * n); - #else #define prof_on 0 @@ -93,17 +85,6 @@ static inline void profile_hit(int type, void *ip) return; } -static inline int task_handoff_register(struct notifier_block * n) -{ - return -ENOSYS; -} - -static inline int task_handoff_unregister(struct notifier_block * n) -{ - return -ENOSYS; -} - -#define profile_handoff_task(a) (0) #endif /* CONFIG_PROFILING */ diff --git a/kernel/fork.c b/kernel/fork.c index 6f0293cb29c9..494539ecb6d3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -754,9 +754,7 @@ void __put_task_struct(struct task_struct *tsk) delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); sched_core_free(tsk); - - if (!profile_handoff_task(tsk)) - free_task(tsk); + free_task(tsk); } EXPORT_SYMBOL_GPL(__put_task_struct); diff --git a/kernel/profile.c b/kernel/profile.c index 9355cc934a96..37640a0bd8a3 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -133,29 +133,6 @@ int __ref profile_init(void) return -ENOMEM; } -/* Profile event notifications */ - -static ATOMIC_NOTIFIER_HEAD(task_free_notifier); - -int profile_handoff_task(struct task_struct *task) -{ - int ret; - ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); - return (ret == NOTIFY_OK) ? 1 : 0; -} - -int task_handoff_register(struct notifier_block *n) -{ - return atomic_notifier_chain_register(&task_free_notifier, n); -} -EXPORT_SYMBOL_GPL(task_handoff_register); - -int task_handoff_unregister(struct notifier_block *n) -{ - return atomic_notifier_chain_unregister(&task_free_notifier, n); -} -EXPORT_SYMBOL_GPL(task_handoff_unregister); - #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) /* * Each cpu has a pair of open-addressed hashtables for pending From 270b6541e603a7fae0cad7af3dc3bca6adb343f3 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 23 Dec 2021 10:05:19 -0600 Subject: [PATCH 38/44] exit: Coredumps reach do_group_exit The comment about coredumps not reaching do_group_exit and the corresponding BUG_ON are bogus. What happens and has happened for years is that get_signal calls do_coredump (which sets SIGNAL_GROUP_EXIT and group_exit_code) and then do_group_exit passing the signal number. Then do_group_exit ignores the exit_code it is passed and uses signal->group_exit_code from the coredump. The comment and BUG_ON were correct when they were added during the 2.5 development cycle, but became obsolete and incorrect when get_signal was changed to fall through to do_group_exit after do_coredump in 2.6.10-rc2. So remove the stale comment and BUG_ON Fixes: 63bd6144f191 ("[PATCH] Invalid BUG_ONs in signal.c") History-Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git Link: https://lkml.kernel.org/r/20220103213312.9144-2-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- kernel/exit.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/exit.c b/kernel/exit.c index 64e907bc87d5..db86307077d4 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -911,8 +911,6 @@ do_group_exit(int exit_code) { struct signal_struct *sig = current->signal; - BUG_ON(exit_code & 0x80); /* core dumps don't get here */ - if (sig->flags & SIGNAL_GROUP_EXIT) exit_code = sig->group_exit_code; else if (sig->group_exec_task) From 907c311f37ba04ccebd00a9b9f3ba718e318a1de Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 21 Dec 2021 10:11:01 -0600 Subject: [PATCH 39/44] exit: Fix the exit_code for wait_task_zombie The function wait_task_zombie is defined to always returns the process not thread exit status. Unfortunately when process group exit support was added to wait_task_zombie the WNOWAIT case was overlooked. Usually tsk->exit_code and tsk->signal->group_exit_code will be in sync so fixing this is bug probably has no effect in practice. But fix it anyway so that people aren't scratching their heads about why the two code paths are different. History-Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git Fixes: 2c66151cbc2c ("[PATCH] sys_exit() threading improvements, BK-curr") Link: https://lkml.kernel.org/r/20220103213312.9144-3-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- kernel/exit.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/exit.c b/kernel/exit.c index db86307077d4..b00a25bb4ab9 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1018,7 +1018,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) return 0; if (unlikely(wo->wo_flags & WNOWAIT)) { - status = p->exit_code; + status = (p->signal->flags & SIGNAL_GROUP_EXIT) + ? p->signal->group_exit_code : p->exit_code; get_task_struct(p); read_unlock(&tasklist_lock); sched_annotate_sleep(); From 2d18f7f456209ed8a8fc138b8bc535dbdaf84695 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 20 Dec 2021 19:16:34 -0600 Subject: [PATCH 40/44] exit: Use the correct exit_code in /proc//stat Since do_proc_statt was modified to return process wide values instead of per task values the exit_code calculation has never been updated. Update it now to return the process wide exit_code when it is requested and available. History-Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git Fixes: bf719d26a5c1 ("[PATCH] distinct tgid/tid CPU usage") Link: https://lkml.kernel.org/r/20220103213312.9144-4-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- fs/proc/array.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/proc/array.c b/fs/proc/array.c index ff869a66b34e..43a7abde9e42 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -468,6 +468,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, u64 cgtime, gtime; unsigned long rsslim = 0; unsigned long flags; + int exit_code = task->exit_code; state = *get_task_state(task); vsize = eip = esp = 0; @@ -531,6 +532,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, maj_flt += sig->maj_flt; thread_group_cputime_adjusted(task, &utime, &stime); gtime += sig->gtime; + + if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED)) + exit_code = sig->group_exit_code; } sid = task_session_nr_ns(task, ns); @@ -630,7 +634,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, seq_puts(m, " 0 0 0 0 0 0 0"); if (permitted) - seq_put_decimal_ll(m, " ", task->exit_code); + seq_put_decimal_ll(m, " ", exit_code); else seq_puts(m, " 0"); From 1b5a42d9c85f0e731f01c8d1129001fd8531a8a0 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 3 Jan 2022 11:32:36 -0600 Subject: [PATCH 41/44] taskstats: Cleanup the use of task->exit_code In the function bacct_add_task the code reading task->exit_code was introduced in commit f3cef7a99469 ("[PATCH] csa: basic accounting over taskstats"), and it is not entirely clear what the taskstats interface is trying to return as only returning the exit_code of the first task in a process doesn't make a lot of sense. As best as I can figure the intent is to return task->exit_code after a task exits. The field is returned with per task fields, so the exit_code of the entire process is not wanted. Only the value of the first task is returned so this is not a useful way to get the per task ptrace stop code. The ordinary case of returning this value is returning after a task exits, which also precludes use for getting a ptrace value. It is common to for the first task of a process to also be the last task of a process so this field may have done something reasonable by accident in testing. Make ac_exitcode a reliable per task value by always returning it for every exited task. Setting ac_exitcode in a sensible mannter makes it possible to continue to provide this value going forward. Cc: Balbir Singh Fixes: f3cef7a99469 ("[PATCH] csa: basic accounting over taskstats") Link: https://lkml.kernel.org/r/20220103213312.9144-5-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- kernel/tsacct.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/tsacct.c b/kernel/tsacct.c index f00de83d0246..1d261fbe367b 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -38,11 +38,10 @@ void bacct_add_tsk(struct user_namespace *user_ns, stats->ac_btime = clamp_t(time64_t, btime, 0, U32_MAX); stats->ac_btime64 = btime; - if (thread_group_leader(tsk)) { + if (tsk->flags & PF_EXITING) stats->ac_exitcode = tsk->exit_code; - if (tsk->flags & PF_FORKNOEXEC) - stats->ac_flag |= AFORK; - } + if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC)) + stats->ac_flag |= AFORK; if (tsk->flags & PF_SUPERPRIV) stats->ac_flag |= ASU; if (tsk->flags & PF_DUMPCORE) From 6707d0fc60576fa8ef2dfa2f9009b606df35ba24 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 20 Dec 2021 17:15:09 -0600 Subject: [PATCH 42/44] ptrace: Remove second setting of PT_SEIZED in ptrace_attach The code is totally redundant remove it. Link: https://lkml.kernel.org/r/20220103213312.9144-6-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- kernel/ptrace.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index f8589bf8d7dc..eea265082e97 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -419,8 +419,6 @@ static int ptrace_attach(struct task_struct *task, long request, if (task->ptrace) goto unlock_tasklist; - if (seize) - flags |= PT_SEIZED; task->ptrace = flags; ptrace_link(task, current); From 4264178416cd52a55a3eccbefb3973866e060280 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 20 Dec 2021 16:28:53 -0600 Subject: [PATCH 43/44] ptrace: Remove unused regs argument from ptrace_report_syscall Link: https://lkml.kernel.org/r/20220103213312.9144-7-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- include/linux/tracehook.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 2564b7434b4d..88c007ab5ebc 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -54,8 +54,7 @@ struct linux_binprm; /* * ptrace report for syscall entry and exit looks identical. */ -static inline int ptrace_report_syscall(struct pt_regs *regs, - unsigned long message) +static inline int ptrace_report_syscall(unsigned long message) { int ptrace = current->ptrace; @@ -102,7 +101,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs, static inline __must_check int tracehook_report_syscall_entry( struct pt_regs *regs) { - return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY); + return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY); } /** @@ -127,7 +126,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) if (step) user_single_step_report(regs); else - ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT); + ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT); } /** From a403df29789ba38796edb97dad9bfb47836b68c0 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 20 Dec 2021 16:29:29 -0600 Subject: [PATCH 44/44] ptrace/m68k: Stop open coding ptrace_report_syscall The generic function ptrace_report_syscall does a little more than syscall_trace on m68k. The function ptrace_report_syscall stops early if PT_TRACED is not set, it sets ptrace_message, and returns the result of fatal_signal_pending. Setting ptrace_message to a passed in value of 0 is effectively not setting ptrace_message, making that additional work a noop. Returning the result of fatal_signal_pending and letting the caller ignore the result becomes a noop in this change. When a process is ptraced, the flag PT_PTRACED is always set in current->ptrace. Testing for PT_PTRACED in ptrace_report_syscall is just an optimization to fail early if the process is not ptraced. Later on in ptrace_notify, ptrace_stop will test current->ptrace under tasklist_lock and skip performing any work if the task is not ptraced. Cc: Geert Uytterhoeven Link: https://lkml.kernel.org/r/20220103213312.9144-8-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- arch/m68k/kernel/ptrace.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index 94b3b274186d..aa3a0b8d07e9 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c @@ -273,17 +273,7 @@ out_eio: asmlinkage void syscall_trace(void) { - ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) - ? 0x80 : 0)); - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + ptrace_report_syscall(0); } #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)