exec: simplify the new ->sighand allocation

de_thread() pre-allocates newsighand to make sure that exec() can't fail after
killing all sub-threads. Imho, this buys nothing, but complicates the code:

	- this is (mostly) needed to handle CLONE_SIGHAND without CLONE_THREAD
	  tasks, this is very unlikely (if ever used) case

	- unless we already have some serious problems, GFP_KERNEL allocation
	  should not fail

	- ENOMEM still can happen after de_thread(), ->sighand is not the last
	  object we have to allocate

Change the code to allocate the new ->sighand on demand.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Oleg Nesterov 2007-10-16 23:27:22 -07:00 committed by Linus Torvalds
parent 0840a90d94
commit b2c903b879
1 changed files with 9 additions and 15 deletions

View File

@ -747,7 +747,7 @@ static int exec_mmap(struct mm_struct *mm)
static int de_thread(struct task_struct *tsk) static int de_thread(struct task_struct *tsk)
{ {
struct signal_struct *sig = tsk->signal; struct signal_struct *sig = tsk->signal;
struct sighand_struct *newsighand, *oldsighand = tsk->sighand; struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock; spinlock_t *lock = &oldsighand->siglock;
struct task_struct *leader = NULL; struct task_struct *leader = NULL;
int count; int count;
@ -761,10 +761,6 @@ static int de_thread(struct task_struct *tsk)
return 0; return 0;
} }
newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
if (!newsighand)
return -ENOMEM;
if (thread_group_empty(tsk)) if (thread_group_empty(tsk))
goto no_thread_group; goto no_thread_group;
@ -781,7 +777,6 @@ static int de_thread(struct task_struct *tsk)
*/ */
spin_unlock_irq(lock); spin_unlock_irq(lock);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
kmem_cache_free(sighand_cachep, newsighand);
return -EAGAIN; return -EAGAIN;
} }
@ -899,17 +894,16 @@ no_thread_group:
if (leader) if (leader)
release_task(leader); release_task(leader);
if (atomic_read(&oldsighand->count) == 1) { if (atomic_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
/* /*
* Now that we nuked the rest of the thread group, * This ->sighand is shared with the CLONE_SIGHAND
* it turns out we are not sharing sighand any more either. * but not CLONE_THREAD task, switch to the new one.
* So we can just keep it.
*/
kmem_cache_free(sighand_cachep, newsighand);
} else {
/*
* Move our state over to newsighand and switch it in.
*/ */
newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
if (!newsighand)
return -ENOMEM;
atomic_set(&newsighand->count, 1); atomic_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action, memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action)); sizeof(newsighand->action));