sched/headers: Move the task_lock()/unlock() APIs to <linux/sched/task.h>
The task_lock()/task_unlock() APIs are not realated to core scheduling, they are task lifetime APIs, i.e. they belong into <linux/sched/task.h>. Move them. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
cdc75e9f7b
commit
56cd697366
|
@ -1526,26 +1526,6 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
|
||||
* subscriptions and synchronises with wait4(). Also used in procfs. Also
|
||||
* pins the final release of task.io_context. Also protects ->cpuset and
|
||||
* ->cgroup.subsys[]. And ->vfork_done.
|
||||
*
|
||||
* Nests both inside and outside of read_lock(&tasklist_lock).
|
||||
* It must not be nested with write_lock_irq(&tasklist_lock),
|
||||
* neither inside nor outside.
|
||||
*/
|
||||
static inline void task_lock(struct task_struct *p)
|
||||
{
|
||||
spin_lock(&p->alloc_lock);
|
||||
}
|
||||
|
||||
static inline void task_unlock(struct task_struct *p)
|
||||
{
|
||||
spin_unlock(&p->alloc_lock);
|
||||
}
|
||||
|
||||
/* set thread flags in other task's structures
|
||||
* - see asm/thread_info.h for TIF_xxxx flags available
|
||||
*/
|
||||
|
|
|
@ -91,4 +91,24 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
|
||||
* subscriptions and synchronises with wait4(). Also used in procfs. Also
|
||||
* pins the final release of task.io_context. Also protects ->cpuset and
|
||||
* ->cgroup.subsys[]. And ->vfork_done.
|
||||
*
|
||||
* Nests both inside and outside of read_lock(&tasklist_lock).
|
||||
* It must not be nested with write_lock_irq(&tasklist_lock),
|
||||
* neither inside nor outside.
|
||||
*/
|
||||
static inline void task_lock(struct task_struct *p)
|
||||
{
|
||||
spin_lock(&p->alloc_lock);
|
||||
}
|
||||
|
||||
static inline void task_unlock(struct task_struct *p)
|
||||
{
|
||||
spin_unlock(&p->alloc_lock);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_SCHED_TASK_H */
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/delayacct.h>
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#include "cgroup-internal.h"
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/proc_ns.h>
|
||||
|
|
Loading…
Reference in New Issue