[PATCH] de_thread: Use tsk not current

Ingo Oeser pointed out that because current expands to an inline function
it is more space efficient and somewhat faster to simply keep a cached copy
of current in another variable.  This patch implements that for the
de_thread function.

(akpm: saves nearly 100 bytes of text on x86)

Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Eric W. Biederman 2006-09-27 01:51:13 -07:00 committed by Linus Torvalds
parent 66f37509fc
commit aafe6c2a2b
1 changed files with 23 additions and 23 deletions

View File

@ -595,7 +595,7 @@ static int de_thread(struct task_struct *tsk)
if (!newsighand)
return -ENOMEM;
if (thread_group_empty(current))
if (thread_group_empty(tsk))
goto no_thread_group;
/*
@ -620,17 +620,17 @@ static int de_thread(struct task_struct *tsk)
* Reparenting needs write_lock on tasklist_lock,
* so it is safe to do it under read_lock.
*/
if (unlikely(current->group_leader == child_reaper))
child_reaper = current;
if (unlikely(tsk->group_leader == child_reaper))
child_reaper = tsk;
zap_other_threads(current);
zap_other_threads(tsk);
read_unlock(&tasklist_lock);
/*
* Account for the thread group leader hanging around:
*/
count = 1;
if (!thread_group_leader(current)) {
if (!thread_group_leader(tsk)) {
count = 2;
/*
* The SIGALRM timer survives the exec, but needs to point
@ -639,14 +639,14 @@ static int de_thread(struct task_struct *tsk)
* synchronize with any firing (by calling del_timer_sync)
* before we can safely let the old group leader die.
*/
sig->tsk = current;
sig->tsk = tsk;
spin_unlock_irq(lock);
if (hrtimer_cancel(&sig->real_timer))
hrtimer_restart(&sig->real_timer);
spin_lock_irq(lock);
}
while (atomic_read(&sig->count) > count) {
sig->group_exit_task = current;
sig->group_exit_task = tsk;
sig->notify_count = count;
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(lock);
@ -662,13 +662,13 @@ static int de_thread(struct task_struct *tsk)
* do is to wait for the thread group leader to become inactive,
* and to assume its PID:
*/
if (!thread_group_leader(current)) {
if (!thread_group_leader(tsk)) {
/*
* Wait for the thread group leader to be a zombie.
* It should already be zombie at this point, most
* of the time.
*/
leader = current->group_leader;
leader = tsk->group_leader;
while (leader->exit_state != EXIT_ZOMBIE)
yield();
@ -682,12 +682,12 @@ static int de_thread(struct task_struct *tsk)
* When we take on its identity by switching to its PID, we
* also take its birthdate (always earlier than our own).
*/
current->start_time = leader->start_time;
tsk->start_time = leader->start_time;
write_lock_irq(&tasklist_lock);
BUG_ON(leader->tgid != current->tgid);
BUG_ON(current->pid == current->tgid);
BUG_ON(leader->tgid != tsk->tgid);
BUG_ON(tsk->pid == tsk->tgid);
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
@ -700,17 +700,17 @@ static int de_thread(struct task_struct *tsk)
* Note: The old leader also uses this pid until release_task
* is called. Odd but simple and correct.
*/
detach_pid(current, PIDTYPE_PID);
current->pid = leader->pid;
attach_pid(current, PIDTYPE_PID, current->pid);
transfer_pid(leader, current, PIDTYPE_PGID);
transfer_pid(leader, current, PIDTYPE_SID);
list_replace_rcu(&leader->tasks, &current->tasks);
detach_pid(tsk, PIDTYPE_PID);
tsk->pid = leader->pid;
attach_pid(tsk, PIDTYPE_PID, tsk->pid);
transfer_pid(leader, tsk, PIDTYPE_PGID);
transfer_pid(leader, tsk, PIDTYPE_SID);
list_replace_rcu(&leader->tasks, &tsk->tasks);
current->group_leader = current;
leader->group_leader = current;
tsk->group_leader = tsk;
leader->group_leader = tsk;
current->exit_signal = SIGCHLD;
tsk->exit_signal = SIGCHLD;
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
@ -750,7 +750,7 @@ no_thread_group:
spin_lock(&oldsighand->siglock);
spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
rcu_assign_pointer(current->sighand, newsighand);
rcu_assign_pointer(tsk->sighand, newsighand);
recalc_sigpending();
spin_unlock(&newsighand->siglock);
@ -761,7 +761,7 @@ no_thread_group:
kmem_cache_free(sighand_cachep, oldsighand);
}
BUG_ON(!thread_group_leader(current));
BUG_ON(!thread_group_leader(tsk));
return 0;
}