coredump: Limit coredumps to a single thread group
Today when a signal is delivered with a handler of SIG_DFL whose
default behavior is to generate a core dump not only that process but
every process that shares the mm is killed.
In the case of vfork this looks like a real world problem. Consider
the following well defined sequence.
if (vfork() == 0) {
execve(...);
_exit(EXIT_FAILURE);
}
If a signal that generates a core dump is received after vfork but
before the execve changes the mm the process that called vfork will
also be killed (as the mm is shared).
Similarly if the execve fails after the point of no return the kernel
delivers SIGSEGV which will kill both the exec'ing process and because
the mm is shared the process that called vfork as well.
As far as I can tell this behavior is a violation of people's
reasonable expectations, POSIX, and is unnecessarily fragile when the
system is low on memory.
Solve this by making a userspace visible change to only kill a single
process/thread group. This is possible because Jann Horn recently
modified[1] the coredump code so that the mm can safely be modified
while the coredump is happening. With LinuxThreads long gone I don't
expect anyone to have a notice this behavior change in practice.
To accomplish this move the core_state pointer from mm_struct to
signal_struct, which allows different thread groups to coredump
simultatenously.
In zap_threads remove the work to kill anything except for the current
thread group.
v2: Remove core_state from the VM_BUG_ON_MM print to fix
compile failure when CONFIG_DEBUG_VM is enabled.
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
[1] a07279c9a8
("binfmt_elf, binfmt_elf_fdpic: use a VMA list snapshot")
Fixes: d89f3847def4 ("[PATCH] thread-aware coredumps, 2.5.43-C3")
History-tree: git://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git
Link: https://lkml.kernel.org/r/87y27mvnke.fsf@disp2133
Link: https://lkml.kernel.org/r/20211007144701.67592574@canb.auug.org.au
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
parent
9230738308
commit
0258b5fd7c
|
@ -1834,7 +1834,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
|
|||
/*
|
||||
* Allocate a structure for each thread.
|
||||
*/
|
||||
for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
|
||||
for (ct = &dump_task->signal->core_state->dumper; ct; ct = ct->next) {
|
||||
t = kzalloc(offsetof(struct elf_thread_core_info,
|
||||
notes[info->thread_notes]),
|
||||
GFP_KERNEL);
|
||||
|
@ -2024,7 +2024,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
|
|||
if (!elf_note_info_init(info))
|
||||
return 0;
|
||||
|
||||
for (ct = current->mm->core_state->dumper.next;
|
||||
for (ct = current->signal->core_state->dumper.next;
|
||||
ct; ct = ct->next) {
|
||||
ets = kzalloc(sizeof(*ets), GFP_KERNEL);
|
||||
if (!ets)
|
||||
|
|
|
@ -1494,7 +1494,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
|
|||
if (dump_vma_snapshot(cprm, &vma_count, &vma_meta, &vma_data_size))
|
||||
goto end_coredump;
|
||||
|
||||
for (ct = current->mm->core_state->dumper.next;
|
||||
for (ct = current->signal->core_state->dumper.next;
|
||||
ct; ct = ct->next) {
|
||||
tmp = elf_dump_thread_status(cprm->siginfo->si_signo,
|
||||
ct->task, &thread_status_size);
|
||||
|
|
|
@ -369,99 +369,34 @@ static int zap_process(struct task_struct *start, int exit_code, int flags)
|
|||
return nr;
|
||||
}
|
||||
|
||||
static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
||||
static int zap_threads(struct task_struct *tsk,
|
||||
struct core_state *core_state, int exit_code)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
unsigned long flags;
|
||||
int nr = -EAGAIN;
|
||||
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
if (!signal_group_exit(tsk->signal)) {
|
||||
mm->core_state = core_state;
|
||||
tsk->signal->core_state = core_state;
|
||||
tsk->signal->group_exit_task = tsk;
|
||||
nr = zap_process(tsk, exit_code, 0);
|
||||
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
|
||||
tsk->flags |= PF_DUMPCORE;
|
||||
atomic_set(&core_state->nr_threads, nr);
|
||||
}
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
if (unlikely(nr < 0))
|
||||
return nr;
|
||||
|
||||
tsk->flags |= PF_DUMPCORE;
|
||||
if (atomic_read(&mm->mm_users) == nr + 1)
|
||||
goto done;
|
||||
/*
|
||||
* We should find and kill all tasks which use this mm, and we should
|
||||
* count them correctly into ->nr_threads. We don't take tasklist
|
||||
* lock, but this is safe wrt:
|
||||
*
|
||||
* fork:
|
||||
* None of sub-threads can fork after zap_process(leader). All
|
||||
* processes which were created before this point should be
|
||||
* visible to zap_threads() because copy_process() adds the new
|
||||
* process to the tail of init_task.tasks list, and lock/unlock
|
||||
* of ->siglock provides a memory barrier.
|
||||
*
|
||||
* do_exit:
|
||||
* The caller holds mm->mmap_lock. This means that the task which
|
||||
* uses this mm can't pass coredump_task_exit(), so it can't exit
|
||||
* or clear its ->mm.
|
||||
*
|
||||
* de_thread:
|
||||
* It does list_replace_rcu(&leader->tasks, ¤t->tasks),
|
||||
* we must see either old or new leader, this does not matter.
|
||||
* However, it can change p->sighand, so lock_task_sighand(p)
|
||||
* must be used. Since p->mm != NULL and we hold ->mmap_lock
|
||||
* it can't fail.
|
||||
*
|
||||
* Note also that "g" can be the old leader with ->mm == NULL
|
||||
* and already unhashed and thus removed from ->thread_group.
|
||||
* This is OK, __unhash_process()->list_del_rcu() does not
|
||||
* clear the ->next pointer, we will find the new leader via
|
||||
* next_thread().
|
||||
*/
|
||||
rcu_read_lock();
|
||||
for_each_process(g) {
|
||||
if (g == tsk->group_leader)
|
||||
continue;
|
||||
if (g->flags & PF_KTHREAD)
|
||||
continue;
|
||||
|
||||
for_each_thread(g, p) {
|
||||
if (unlikely(!p->mm))
|
||||
continue;
|
||||
if (unlikely(p->mm == mm)) {
|
||||
lock_task_sighand(p, &flags);
|
||||
nr += zap_process(p, exit_code,
|
||||
SIGNAL_GROUP_EXIT);
|
||||
unlock_task_sighand(p, &flags);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
done:
|
||||
atomic_set(&core_state->nr_threads, nr);
|
||||
return nr;
|
||||
}
|
||||
|
||||
static int coredump_wait(int exit_code, struct core_state *core_state)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
int core_waiters = -EBUSY;
|
||||
|
||||
init_completion(&core_state->startup);
|
||||
core_state->dumper.task = tsk;
|
||||
core_state->dumper.next = NULL;
|
||||
|
||||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
|
||||
if (!mm->core_state)
|
||||
core_waiters = zap_threads(tsk, mm, core_state, exit_code);
|
||||
mmap_write_unlock(mm);
|
||||
|
||||
core_waiters = zap_threads(tsk, core_state, exit_code);
|
||||
if (core_waiters > 0) {
|
||||
struct core_thread *ptr;
|
||||
|
||||
|
@ -483,7 +418,7 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
|
|||
return core_waiters;
|
||||
}
|
||||
|
||||
static void coredump_finish(struct mm_struct *mm, bool core_dumped)
|
||||
static void coredump_finish(bool core_dumped)
|
||||
{
|
||||
struct core_thread *curr, *next;
|
||||
struct task_struct *task;
|
||||
|
@ -493,9 +428,10 @@ static void coredump_finish(struct mm_struct *mm, bool core_dumped)
|
|||
current->signal->group_exit_code |= 0x80;
|
||||
current->signal->group_exit_task = NULL;
|
||||
current->signal->flags = SIGNAL_GROUP_EXIT;
|
||||
next = current->signal->core_state->dumper.next;
|
||||
current->signal->core_state = NULL;
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
next = mm->core_state->dumper.next;
|
||||
while ((curr = next) != NULL) {
|
||||
next = curr->next;
|
||||
task = curr->task;
|
||||
|
@ -507,8 +443,6 @@ static void coredump_finish(struct mm_struct *mm, bool core_dumped)
|
|||
curr->task = NULL;
|
||||
wake_up_process(task);
|
||||
}
|
||||
|
||||
mm->core_state = NULL;
|
||||
}
|
||||
|
||||
static bool dump_interrupted(void)
|
||||
|
@ -839,7 +773,7 @@ fail_dropcount:
|
|||
fail_unlock:
|
||||
kfree(argv);
|
||||
kfree(cn.corename);
|
||||
coredump_finish(mm, core_dumped);
|
||||
coredump_finish(core_dumped);
|
||||
revert_creds(old_cred);
|
||||
fail_creds:
|
||||
put_cred(cred);
|
||||
|
|
|
@ -408,9 +408,9 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
|
|||
cpumask_pr_args(&task->cpus_mask));
|
||||
}
|
||||
|
||||
static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
|
||||
static inline void task_core_dumping(struct seq_file *m, struct task_struct *task)
|
||||
{
|
||||
seq_put_decimal_ull(m, "CoreDumping:\t", !!mm->core_state);
|
||||
seq_put_decimal_ull(m, "CoreDumping:\t", !!task->signal->core_state);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
|
@ -436,7 +436,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
|
|||
|
||||
if (mm) {
|
||||
task_mem(m, mm);
|
||||
task_core_dumping(m, mm);
|
||||
task_core_dumping(m, task);
|
||||
task_thp_status(m, mm);
|
||||
mmput(mm);
|
||||
}
|
||||
|
|
|
@ -387,17 +387,6 @@ struct vm_area_struct {
|
|||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
|
||||
} __randomize_layout;
|
||||
|
||||
struct core_thread {
|
||||
struct task_struct *task;
|
||||
struct core_thread *next;
|
||||
};
|
||||
|
||||
struct core_state {
|
||||
atomic_t nr_threads;
|
||||
struct core_thread dumper;
|
||||
struct completion startup;
|
||||
};
|
||||
|
||||
struct kioctx_table;
|
||||
struct mm_struct {
|
||||
struct {
|
||||
|
@ -518,8 +507,6 @@ struct mm_struct {
|
|||
|
||||
unsigned long flags; /* Must use atomic bitops to access */
|
||||
|
||||
struct core_state *core_state; /* coredumping support */
|
||||
|
||||
#ifdef CONFIG_AIO
|
||||
spinlock_t ioctx_lock;
|
||||
struct kioctx_table __rcu *ioctx_table;
|
||||
|
|
|
@ -72,6 +72,17 @@ struct multiprocess_signals {
|
|||
struct hlist_node node;
|
||||
};
|
||||
|
||||
struct core_thread {
|
||||
struct task_struct *task;
|
||||
struct core_thread *next;
|
||||
};
|
||||
|
||||
struct core_state {
|
||||
atomic_t nr_threads;
|
||||
struct core_thread dumper;
|
||||
struct completion startup;
|
||||
};
|
||||
|
||||
/*
|
||||
* NOTE! "signal_struct" does not have its own
|
||||
* locking, because a shared signal_struct always
|
||||
|
@ -110,6 +121,8 @@ struct signal_struct {
|
|||
int group_stop_count;
|
||||
unsigned int flags; /* see SIGNAL_* flags below */
|
||||
|
||||
struct core_state *core_state; /* coredumping support */
|
||||
|
||||
/*
|
||||
* PR_SET_CHILD_SUBREAPER marks a process, like a service
|
||||
* manager, to re-parent orphan (double-forking) child processes
|
||||
|
|
|
@ -342,23 +342,18 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
|
|||
static void coredump_task_exit(struct task_struct *tsk)
|
||||
{
|
||||
struct core_state *core_state;
|
||||
struct mm_struct *mm;
|
||||
|
||||
mm = tsk->mm;
|
||||
if (!mm)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Serialize with any possible pending coredump.
|
||||
* We must hold mmap_lock around checking core_state
|
||||
* We must hold siglock around checking core_state
|
||||
* and setting PF_POSTCOREDUMP. The core-inducing thread
|
||||
* will increment ->nr_threads for each thread in the
|
||||
* group without PF_POSTCOREDUMP set.
|
||||
*/
|
||||
mmap_read_lock(mm);
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
tsk->flags |= PF_POSTCOREDUMP;
|
||||
core_state = mm->core_state;
|
||||
mmap_read_unlock(mm);
|
||||
core_state = tsk->signal->core_state;
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
if (core_state) {
|
||||
struct core_thread self;
|
||||
|
||||
|
|
|
@ -1044,7 +1044,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
|||
seqcount_init(&mm->write_protect_seq);
|
||||
mmap_init_lock(mm);
|
||||
INIT_LIST_HEAD(&mm->mmlist);
|
||||
mm->core_state = NULL;
|
||||
mm_pgtables_bytes_init(mm);
|
||||
mm->map_count = 0;
|
||||
mm->locked_vm = 0;
|
||||
|
|
|
@ -214,7 +214,7 @@ void dump_mm(const struct mm_struct *mm)
|
|||
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
|
||||
"start_brk %lx brk %lx start_stack %lx\n"
|
||||
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
|
||||
"binfmt %px flags %lx core_state %px\n"
|
||||
"binfmt %px flags %lx\n"
|
||||
#ifdef CONFIG_AIO
|
||||
"ioctx_table %px\n"
|
||||
#endif
|
||||
|
@ -246,7 +246,7 @@ void dump_mm(const struct mm_struct *mm)
|
|||
mm->start_code, mm->end_code, mm->start_data, mm->end_data,
|
||||
mm->start_brk, mm->brk, mm->start_stack,
|
||||
mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
|
||||
mm->binfmt, mm->flags, mm->core_state,
|
||||
mm->binfmt, mm->flags,
|
||||
#ifdef CONFIG_AIO
|
||||
mm->ioctx_table,
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue