kthread: Generalize pf_io_worker so it can point to struct kthread
The point of using set_child_tid to hold the kthread pointer was that it already did what is necessary. There are now restrictions on when set_child_tid can be initialized and when set_child_tid can be used in schedule_tail. Which indicates that continuing to use set_child_tid to hold the kthread pointer is a bad idea. Instead of continuing to use the set_child_tid field of task_struct generalize the pf_io_worker field of task_struct and use it to hold the kthread pointer. Rename pf_io_worker (which is a void * pointer) to worker_private so it can be used to store kthreads struct kthread pointer. Update the kthread code to store the kthread pointer in the worker_private field. Remove the places where set_child_tid had to be dealt with carefully because kthreads also used it. Link: https://lkml.kernel.org/r/CAHk-=wgtFAA9SbVYg0gR1tqPMC17-NYcs0GQkaYg1bGhh1uJQQ@mail.gmail.com Link: https://lkml.kernel.org/r/87a6grvqy8.fsf_-_@email.froward.int.ebiederm.org Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
parent
00580f03af
commit
e32cf5dfbe
|
@ -657,7 +657,7 @@ loop:
|
|||
*/
|
||||
void io_wq_worker_running(struct task_struct *tsk)
|
||||
{
|
||||
struct io_worker *worker = tsk->pf_io_worker;
|
||||
struct io_worker *worker = tsk->worker_private;
|
||||
|
||||
if (!worker)
|
||||
return;
|
||||
|
@ -675,7 +675,7 @@ void io_wq_worker_running(struct task_struct *tsk)
|
|||
*/
|
||||
void io_wq_worker_sleeping(struct task_struct *tsk)
|
||||
{
|
||||
struct io_worker *worker = tsk->pf_io_worker;
|
||||
struct io_worker *worker = tsk->worker_private;
|
||||
|
||||
if (!worker)
|
||||
return;
|
||||
|
@ -694,7 +694,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
|
|||
static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
tsk->pf_io_worker = worker;
|
||||
tsk->worker_private = worker;
|
||||
worker->task = tsk;
|
||||
set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
|
||||
tsk->flags |= PF_NO_SETAFFINITY;
|
||||
|
|
|
@ -200,6 +200,6 @@ static inline void io_wq_worker_running(struct task_struct *tsk)
|
|||
static inline bool io_wq_current_is_worker(void)
|
||||
{
|
||||
return in_task() && (current->flags & PF_IO_WORKER) &&
|
||||
current->pf_io_worker;
|
||||
current->worker_private;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -987,8 +987,8 @@ struct task_struct {
|
|||
/* CLONE_CHILD_CLEARTID: */
|
||||
int __user *clear_child_tid;
|
||||
|
||||
/* PF_IO_WORKER */
|
||||
void *pf_io_worker;
|
||||
/* PF_KTHREAD | PF_IO_WORKER */
|
||||
void *worker_private;
|
||||
|
||||
u64 utime;
|
||||
u64 stime;
|
||||
|
|
|
@ -950,7 +950,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
|||
tsk->splice_pipe = NULL;
|
||||
tsk->task_frag.page = NULL;
|
||||
tsk->wake_q.next = NULL;
|
||||
tsk->pf_io_worker = NULL;
|
||||
tsk->worker_private = NULL;
|
||||
|
||||
account_kernel_stack(tsk, 1);
|
||||
|
||||
|
@ -2032,12 +2032,6 @@ static __latent_entropy struct task_struct *copy_process(
|
|||
siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
|
||||
}
|
||||
|
||||
/*
|
||||
* This _must_ happen before we call free_task(), i.e. before we jump
|
||||
* to any of the bad_fork_* labels. This is to avoid freeing
|
||||
* p->set_child_tid which is (ab)used as a kthread's data pointer for
|
||||
* kernel threads (PF_KTHREAD).
|
||||
*/
|
||||
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
|
||||
/*
|
||||
* Clear TID on mm_release()?
|
||||
|
|
|
@ -72,7 +72,7 @@ enum KTHREAD_BITS {
|
|||
static inline struct kthread *to_kthread(struct task_struct *k)
|
||||
{
|
||||
WARN_ON(!(k->flags & PF_KTHREAD));
|
||||
return (__force void *)k->set_child_tid;
|
||||
return k->worker_private;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -80,7 +80,7 @@ static inline struct kthread *to_kthread(struct task_struct *k)
|
|||
*
|
||||
* Per construction; when:
|
||||
*
|
||||
* (p->flags & PF_KTHREAD) && p->set_child_tid
|
||||
* (p->flags & PF_KTHREAD) && p->worker_private
|
||||
*
|
||||
* the task is both a kthread and struct kthread is persistent. However
|
||||
* PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
|
||||
|
@ -88,7 +88,7 @@ static inline struct kthread *to_kthread(struct task_struct *k)
|
|||
*/
|
||||
static inline struct kthread *__to_kthread(struct task_struct *p)
|
||||
{
|
||||
void *kthread = (__force void *)p->set_child_tid;
|
||||
void *kthread = p->worker_private;
|
||||
if (kthread && !(p->flags & PF_KTHREAD))
|
||||
kthread = NULL;
|
||||
return kthread;
|
||||
|
@ -109,11 +109,7 @@ bool set_kthread_struct(struct task_struct *p)
|
|||
init_completion(&kthread->parked);
|
||||
p->vfork_done = &kthread->exited;
|
||||
|
||||
/*
|
||||
* We abuse ->set_child_tid to avoid the new member and because it
|
||||
* can't be wrongly copied by copy_process().
|
||||
*/
|
||||
p->set_child_tid = (__force void __user *)kthread;
|
||||
p->worker_private = kthread;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -128,7 +124,7 @@ void free_kthread_struct(struct task_struct *k)
|
|||
#ifdef CONFIG_BLK_CGROUP
|
||||
WARN_ON_ONCE(kthread && kthread->blkcg_css);
|
||||
#endif
|
||||
k->set_child_tid = (__force void __user *)NULL;
|
||||
k->worker_private = NULL;
|
||||
kfree(kthread);
|
||||
}
|
||||
|
||||
|
|
|
@ -4908,7 +4908,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
|
|||
finish_task_switch(prev);
|
||||
preempt_enable();
|
||||
|
||||
if (!(current->flags & PF_KTHREAD) && current->set_child_tid)
|
||||
if (current->set_child_tid)
|
||||
put_user(task_pid_vnr(current), current->set_child_tid);
|
||||
|
||||
calculate_sigpending();
|
||||
|
|
Loading…
Reference in New Issue