fork: Generalize PF_IO_WORKER handling
Add fn and fn_arg members into struct kernel_clone_args and test for them in copy_thread (instead of testing for PF_KTHREAD | PF_IO_WORKER). This allows any task that wants to be a user space task that only runs in kernel mode to use this functionality. The code on x86 is an exception and still retains a PF_KTHREAD test because x86 unlikely everything else handles kthreads slightly differently than user space tasks that start with a function. The functions that created tasks that start with a function have been updated to set ".fn" and ".fn_arg" instead of ".stack" and ".stack_size". These functions are fork_idle(), create_io_thread(), kernel_thread(), and user_mode_thread(). Link: https://lkml.kernel.org/r/20220506141512.516114-4-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
parent
36cb0e1cda
commit
5bd2e97c86
|
@ -237,7 +237,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long kthread_arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
extern void ret_from_fork(void);
|
||||
extern void ret_from_kernel_thread(void);
|
||||
|
@ -251,13 +250,13 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
childti->pcb.ksp = (unsigned long) childstack;
|
||||
childti->pcb.flags = 1; /* set FEN, clear everything else */
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
/* kernel thread */
|
||||
memset(childstack, 0,
|
||||
sizeof(struct switch_stack) + sizeof(struct pt_regs));
|
||||
childstack->r26 = (unsigned long) ret_from_kernel_thread;
|
||||
childstack->r9 = usp; /* function */
|
||||
childstack->r10 = kthread_arg;
|
||||
childstack->r9 = (unsigned long) args->fn;
|
||||
childstack->r10 = (unsigned long) args->fn_arg;
|
||||
childregs->hae = alpha_mv.hae_cache;
|
||||
childti->pcb.usp = 0;
|
||||
return 0;
|
||||
|
|
|
@ -166,7 +166,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long kthread_arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *c_regs; /* child's pt_regs */
|
||||
unsigned long *childksp; /* to unwind out of __switch_to() */
|
||||
|
@ -193,11 +192,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
childksp[0] = 0; /* fp */
|
||||
childksp[1] = (unsigned long)ret_from_fork; /* blink */
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
memset(c_regs, 0, sizeof(struct pt_regs));
|
||||
|
||||
c_callee->r13 = kthread_arg;
|
||||
c_callee->r14 = usp; /* function */
|
||||
c_callee->r13 = (unsigned long)args->fn_arg;
|
||||
c_callee->r14 = (unsigned long)args->fn;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -242,7 +242,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long stack_start = args->stack;
|
||||
unsigned long stk_sz = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct thread_info *thread = task_thread_info(p);
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
@ -259,15 +258,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
thread->cpu_domain = get_domain();
|
||||
#endif
|
||||
|
||||
if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
|
||||
if (likely(!args->fn)) {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->ARM_r0 = 0;
|
||||
if (stack_start)
|
||||
childregs->ARM_sp = stack_start;
|
||||
} else {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
thread->cpu_context.r4 = stk_sz;
|
||||
thread->cpu_context.r5 = stack_start;
|
||||
thread->cpu_context.r4 = (unsigned long)args->fn_arg;
|
||||
thread->cpu_context.r5 = (unsigned long)args->fn;
|
||||
childregs->ARM_cpsr = SVC_MODE;
|
||||
}
|
||||
thread->cpu_context.pc = (unsigned long)ret_from_fork;
|
||||
|
|
|
@ -320,7 +320,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long stack_start = args->stack;
|
||||
unsigned long stk_sz = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
|
@ -337,7 +336,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
|
||||
ptrauth_thread_init_kernel(p);
|
||||
|
||||
if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
|
||||
if (likely(!args->fn)) {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->regs[0] = 0;
|
||||
|
||||
|
@ -371,8 +370,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
|
||||
|
||||
p->thread.cpu_context.x19 = stack_start;
|
||||
p->thread.cpu_context.x20 = stk_sz;
|
||||
p->thread.cpu_context.x19 = (unsigned long)args->fn;
|
||||
p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
|
||||
}
|
||||
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
|
||||
p->thread.cpu_context.sp = (unsigned long)childregs;
|
||||
|
|
|
@ -34,7 +34,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long kthread_arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct switch_stack *childstack;
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
@ -49,11 +48,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
/* setup thread.sp for switch_to !!! */
|
||||
p->thread.sp = (unsigned long)childstack;
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childstack->r15 = (unsigned long) ret_from_kernel_thread;
|
||||
childstack->r10 = kthread_arg;
|
||||
childstack->r9 = usp;
|
||||
childstack->r10 = (unsigned long) args->fn_arg;
|
||||
childstack->r9 = (unsigned long) args->fn;
|
||||
childregs->sr = mfcr("psr");
|
||||
} else {
|
||||
*childregs = *(current_pt_regs());
|
||||
|
|
|
@ -108,16 +108,15 @@ void flush_thread(void)
|
|||
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
{
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long topstk = args->stack_size;
|
||||
struct pt_regs *childregs;
|
||||
|
||||
childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->retpc = (unsigned long) ret_from_kernel_thread;
|
||||
childregs->er4 = topstk; /* arg */
|
||||
childregs->er5 = usp; /* fn */
|
||||
childregs->er4 = (unsigned long) args->fn_arg;
|
||||
childregs->er5 = (unsigned long) args->fn;
|
||||
} else {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->er0 = 0;
|
||||
|
|
|
@ -54,7 +54,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
struct hexagon_switch_stack *ss;
|
||||
|
@ -76,11 +75,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
sizeof(*ss));
|
||||
ss->lr = (unsigned long)ret_from_fork;
|
||||
p->thread.switch_sp = ss;
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
/* r24 <- fn, r25 <- arg */
|
||||
ss->r24 = usp;
|
||||
ss->r25 = arg;
|
||||
ss->r24 = (unsigned long)args->fn;
|
||||
ss->r25 = (unsigned long)args->fn_arg;
|
||||
pt_set_kmode(childregs);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -341,14 +341,14 @@ copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
|
||||
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
if (unlikely(args->idle)) {
|
||||
/* fork_idle() called us */
|
||||
return 0;
|
||||
}
|
||||
memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack));
|
||||
child_stack->r4 = user_stack_base; /* payload */
|
||||
child_stack->r5 = user_stack_size; /* argument */
|
||||
child_stack->r4 = (unsigned long) args->fn;
|
||||
child_stack->r5 = (unsigned long) args->fn_arg;
|
||||
/*
|
||||
* Preserve PSR bits, except for bits 32-34 and 37-45,
|
||||
* which we can't read.
|
||||
|
|
|
@ -142,7 +142,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct fork_frame {
|
||||
struct switch_stack sw;
|
||||
|
@ -160,12 +159,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
*/
|
||||
p->thread.fc = USER_DATA;
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
/* kernel thread */
|
||||
memset(frame, 0, sizeof(struct fork_frame));
|
||||
frame->regs.sr = PS_S;
|
||||
frame->sw.a3 = usp; /* function */
|
||||
frame->sw.d7 = arg;
|
||||
frame->sw.a3 = (unsigned long)args->fn;
|
||||
frame->sw.d7 = (unsigned long)args->fn_arg;
|
||||
frame->sw.retpc = (unsigned long)ret_from_kernel_thread;
|
||||
p->thread.usp = 0;
|
||||
return 0;
|
||||
|
|
|
@ -56,19 +56,18 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
/* if we're creating a new kernel thread then just zeroing all
|
||||
* the registers. That's OK for a brand new thread.*/
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
|
||||
ti->cpu_context.r1 = (unsigned long)childregs;
|
||||
ti->cpu_context.r20 = (unsigned long)usp; /* fn */
|
||||
ti->cpu_context.r19 = (unsigned long)arg;
|
||||
ti->cpu_context.r20 = (unsigned long)args->fn;
|
||||
ti->cpu_context.r19 = (unsigned long)args->fn_arg;
|
||||
childregs->pt_mode = 1;
|
||||
local_save_flags(childregs->msr);
|
||||
ti->cpu_context.msr = childregs->msr & ~MSR_IE;
|
||||
|
|
|
@ -109,7 +109,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long kthread_arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
struct pt_regs *childregs, *regs = current_pt_regs();
|
||||
|
@ -122,12 +121,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
/* Put the stack after the struct pt_regs. */
|
||||
childksp = (unsigned long) childregs;
|
||||
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
/* kernel thread */
|
||||
unsigned long status = p->thread.cp0_status;
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
p->thread.reg16 = usp; /* fn */
|
||||
p->thread.reg17 = kthread_arg;
|
||||
p->thread.reg16 = (unsigned long)args->fn;
|
||||
p->thread.reg17 = (unsigned long)args->fn_arg;
|
||||
p->thread.reg29 = childksp;
|
||||
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
|
||||
#if defined(CONFIG_CPU_R3000)
|
||||
|
|
|
@ -104,7 +104,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
struct pt_regs *regs;
|
||||
|
@ -112,12 +111,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
struct switch_stack *childstack =
|
||||
((struct switch_stack *)childregs) - 1;
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
memset(childstack, 0,
|
||||
sizeof(struct switch_stack) + sizeof(struct pt_regs));
|
||||
|
||||
childstack->r16 = usp; /* fn */
|
||||
childstack->r17 = arg;
|
||||
childstack->r16 = (unsigned long) args->fn;
|
||||
childstack->r17 = (unsigned long) args->fn_arg;
|
||||
childstack->ra = (unsigned long) ret_from_kernel_thread;
|
||||
childregs->estatus = STATUS_PIE;
|
||||
childregs->sp = (unsigned long) childstack;
|
||||
|
|
|
@ -156,7 +156,6 @@ copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *userregs;
|
||||
struct pt_regs *kregs;
|
||||
|
@ -175,10 +174,10 @@ copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
sp -= sizeof(struct pt_regs);
|
||||
kregs = (struct pt_regs *)sp;
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
memset(kregs, 0, sizeof(struct pt_regs));
|
||||
kregs->gpr[20] = usp; /* fn, kernel thread */
|
||||
kregs->gpr[22] = arg;
|
||||
kregs->gpr[20] = (unsigned long)args->fn;
|
||||
kregs->gpr[22] = (unsigned long)args->fn_arg;
|
||||
} else {
|
||||
*userregs = *current_pt_regs();
|
||||
|
||||
|
|
|
@ -210,7 +210,6 @@ copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long kthread_arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *cregs = &(p->thread.regs);
|
||||
void *stack = task_stack_page(p);
|
||||
|
@ -221,7 +220,7 @@ copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
extern void * const ret_from_kernel_thread;
|
||||
extern void * const child_return;
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
/* kernel thread */
|
||||
memset(cregs, 0, sizeof(struct pt_regs));
|
||||
if (args->idle) /* idle thread */
|
||||
|
@ -236,12 +235,12 @@ copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
* ret_from_kernel_thread.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
cregs->gr[27] = ((unsigned long *)usp)[3];
|
||||
cregs->gr[26] = ((unsigned long *)usp)[2];
|
||||
cregs->gr[27] = ((unsigned long *)args->fn)[3];
|
||||
cregs->gr[26] = ((unsigned long *)args->fn)[2];
|
||||
#else
|
||||
cregs->gr[26] = usp;
|
||||
cregs->gr[26] = (unsigned long) args->fn;
|
||||
#endif
|
||||
cregs->gr[25] = kthread_arg;
|
||||
cregs->gr[25] = (unsigned long) args->fn_arg;
|
||||
} else {
|
||||
/* user thread */
|
||||
/* usp must be word aligned. This also prevents users from
|
||||
|
|
|
@ -1720,7 +1720,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long kthread_arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *childregs, *kregs;
|
||||
extern void ret_from_fork(void);
|
||||
|
@ -1738,18 +1737,18 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
/* Copy registers */
|
||||
sp -= sizeof(struct pt_regs);
|
||||
childregs = (struct pt_regs *) sp;
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
/* kernel thread */
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gpr[1] = sp + sizeof(struct pt_regs);
|
||||
/* function */
|
||||
if (usp)
|
||||
childregs->gpr[14] = ppc_function_entry((void *)usp);
|
||||
if (args->fn)
|
||||
childregs->gpr[14] = ppc_function_entry((void *)args->fn);
|
||||
#ifdef CONFIG_PPC64
|
||||
clear_tsk_thread_flag(p, TIF_32BIT);
|
||||
childregs->softe = IRQS_ENABLED;
|
||||
#endif
|
||||
childregs->gpr[15] = kthread_arg;
|
||||
childregs->gpr[15] = (unsigned long)args->fn_arg;
|
||||
p->thread.regs = NULL; /* no user register state */
|
||||
ti->flags |= _TIF_RESTOREALL;
|
||||
f = ret_from_kernel_thread;
|
||||
|
|
|
@ -124,12 +124,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
/* p->thread holds context to be restored by __switch_to() */
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
/* Kernel thread */
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gp = gp_in_global;
|
||||
|
@ -137,8 +136,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
childregs->status = SR_PP | SR_PIE;
|
||||
|
||||
p->thread.ra = (unsigned long)ret_from_kernel_thread;
|
||||
p->thread.s[0] = usp; /* fn */
|
||||
p->thread.s[1] = arg;
|
||||
p->thread.s[0] = (unsigned long)args->fn;
|
||||
p->thread.s[1] = (unsigned long)args->fn_arg;
|
||||
} else {
|
||||
*childregs = *(current_pt_regs());
|
||||
if (usp) /* User fork */
|
||||
|
|
|
@ -98,7 +98,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long new_stackp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct fake_frame
|
||||
{
|
||||
|
@ -133,15 +132,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
frame->sf.gprs[9] = (unsigned long)frame;
|
||||
|
||||
/* Store access registers to kernel stack of new process. */
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
/* kernel thread */
|
||||
memset(&frame->childregs, 0, sizeof(struct pt_regs));
|
||||
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
|
||||
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
|
||||
frame->childregs.psw.addr =
|
||||
(unsigned long)__ret_from_fork;
|
||||
frame->childregs.gprs[9] = new_stackp; /* function */
|
||||
frame->childregs.gprs[10] = arg;
|
||||
frame->childregs.gprs[9] = (unsigned long)args->fn;
|
||||
frame->childregs.gprs[10] = (unsigned long)args->fn_arg;
|
||||
frame->childregs.orig_gpr2 = -1;
|
||||
frame->childregs.last_break = 1;
|
||||
return 0;
|
||||
|
|
|
@ -96,7 +96,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
struct pt_regs *childregs;
|
||||
|
@ -117,11 +116,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
|
||||
childregs = task_pt_regs(p);
|
||||
p->thread.sp = (unsigned long) childregs;
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
p->thread.pc = (unsigned long) ret_from_kernel_thread;
|
||||
childregs->regs[4] = arg;
|
||||
childregs->regs[5] = usp;
|
||||
childregs->regs[4] = (unsigned long) args->fn_arg;
|
||||
childregs->regs[5] = (unsigned long) args->fn;
|
||||
childregs->sr = SR_MD;
|
||||
#if defined(CONFIG_SH_FPU)
|
||||
childregs->sr |= SR_FD;
|
||||
|
|
|
@ -263,7 +263,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long sp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
struct pt_regs *childregs, *regs = current_pt_regs();
|
||||
|
@ -299,13 +298,13 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
ti->ksp = (unsigned long) new_stack;
|
||||
p->thread.kregs = childregs;
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
extern int nwindows;
|
||||
unsigned long psr;
|
||||
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
|
||||
ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
|
||||
childregs->u_regs[UREG_G1] = sp; /* function */
|
||||
childregs->u_regs[UREG_G2] = arg;
|
||||
childregs->u_regs[UREG_G1] = (unsigned long) args->fn;
|
||||
childregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
|
||||
psr = childregs->psr = get_psr();
|
||||
ti->kpsr = psr | PSR_PIL;
|
||||
ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows);
|
||||
|
|
|
@ -568,7 +568,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long sp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct thread_info *t = task_thread_info(p);
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
|
@ -587,12 +586,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
sizeof(struct sparc_stackf));
|
||||
t->fpsaved[0] = 0;
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
memset(child_trap_frame, 0, child_stack_sz);
|
||||
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
|
||||
(current_pt_regs()->tstate + 1) & TSTATE_CWP;
|
||||
t->kregs->u_regs[UREG_G1] = sp; /* function */
|
||||
t->kregs->u_regs[UREG_G2] = arg;
|
||||
t->kregs->u_regs[UREG_G1] = (unsigned long) args->fn;
|
||||
t->kregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -158,15 +158,13 @@ int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long sp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
void (*handler)(void);
|
||||
int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER);
|
||||
int ret = 0;
|
||||
|
||||
p->thread = (struct thread_struct) INIT_THREAD;
|
||||
|
||||
if (!kthread) {
|
||||
if (!args->fn) {
|
||||
memcpy(&p->thread.regs.regs, current_pt_regs(),
|
||||
sizeof(p->thread.regs.regs));
|
||||
PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
|
||||
|
@ -178,14 +176,14 @@ int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
|
|||
arch_copy_thread(¤t->thread.arch, &p->thread.arch);
|
||||
} else {
|
||||
get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
|
||||
p->thread.request.u.thread.proc = (int (*)(void *))sp;
|
||||
p->thread.request.u.thread.arg = (void *)arg;
|
||||
p->thread.request.u.thread.proc = args->fn;
|
||||
p->thread.request.u.thread.arg = args->fn_arg;
|
||||
handler = new_thread_handler;
|
||||
}
|
||||
|
||||
new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
|
||||
|
||||
if (!kthread) {
|
||||
if (!args->fn) {
|
||||
clear_flushed_tls(p);
|
||||
|
||||
/*
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
extern void save_fpregs_to_fpstate(struct fpu *fpu);
|
||||
extern void fpu__drop(struct fpu *fpu);
|
||||
extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags);
|
||||
extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal);
|
||||
extern void fpu_flush_thread(void);
|
||||
|
||||
/*
|
||||
|
|
|
@ -78,13 +78,13 @@ static inline void update_task_stack(struct task_struct *task)
|
|||
}
|
||||
|
||||
static inline void kthread_frame_init(struct inactive_task_frame *frame,
|
||||
unsigned long fun, unsigned long arg)
|
||||
int (*fun)(void *), void *arg)
|
||||
{
|
||||
frame->bx = fun;
|
||||
frame->bx = (unsigned long)fun;
|
||||
#ifdef CONFIG_X86_32
|
||||
frame->di = arg;
|
||||
frame->di = (unsigned long)arg;
|
||||
#else
|
||||
frame->r12 = arg;
|
||||
frame->r12 = (unsigned long)arg;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -556,7 +556,7 @@ static inline void fpu_inherit_perms(struct fpu *dst_fpu)
|
|||
}
|
||||
|
||||
/* Clone current's FPU state on fork */
|
||||
int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
|
||||
int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal)
|
||||
{
|
||||
struct fpu *src_fpu = ¤t->thread.fpu;
|
||||
struct fpu *dst_fpu = &dst->thread.fpu;
|
||||
|
@ -579,7 +579,7 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
|
|||
* No FPU state inheritance for kernel threads and IO
|
||||
* worker threads.
|
||||
*/
|
||||
if (dst->flags & (PF_KTHREAD | PF_IO_WORKER)) {
|
||||
if (minimal) {
|
||||
/* Clear out the minimal state */
|
||||
memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
|
||||
init_fpstate_copy_size());
|
||||
|
|
|
@ -134,7 +134,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long sp = args->stack;
|
||||
unsigned long arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct inactive_task_frame *frame;
|
||||
struct fork_frame *fork_frame;
|
||||
|
@ -172,13 +171,13 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
frame->flags = X86_EFLAGS_FIXED;
|
||||
#endif
|
||||
|
||||
fpu_clone(p, clone_flags);
|
||||
fpu_clone(p, clone_flags, args->fn);
|
||||
|
||||
/* Kernel thread ? */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
p->thread.pkru = pkru_get_init_value();
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
kthread_frame_init(frame, sp, arg);
|
||||
kthread_frame_init(frame, args->fn, args->fn_arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -198,10 +197,10 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
task_user_gs(p) = get_user_gs(current_pt_regs());
|
||||
#endif
|
||||
|
||||
if (unlikely(p->flags & PF_IO_WORKER)) {
|
||||
if (unlikely(args->fn)) {
|
||||
/*
|
||||
* An IO thread is a user space thread, but it doesn't
|
||||
* return to ret_after_fork().
|
||||
* A user space thread, but it doesn't return to
|
||||
* ret_after_fork().
|
||||
*
|
||||
* In order to indicate that to tools like gdb,
|
||||
* we reset the stack and instruction pointers.
|
||||
|
@ -211,7 +210,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
*/
|
||||
childregs->sp = 0;
|
||||
childregs->ip = 0;
|
||||
kthread_frame_init(frame, sp, arg);
|
||||
kthread_frame_init(frame, args->fn, args->fn_arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -205,7 +205,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
unsigned long usp_thread_fn = args->stack;
|
||||
unsigned long thread_fn_arg = args->stack_size;
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
|
@ -226,7 +225,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
#error Unsupported Xtensa ABI
|
||||
#endif
|
||||
|
||||
if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (!args->fn) {
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
unsigned long usp = usp_thread_fn ?
|
||||
usp_thread_fn : regs->areg[1];
|
||||
|
@ -278,15 +277,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
* Window underflow will load registers from the
|
||||
* spill slots on the stack on return from _switch_to.
|
||||
*/
|
||||
SPILL_SLOT(childregs, 2) = usp_thread_fn;
|
||||
SPILL_SLOT(childregs, 3) = thread_fn_arg;
|
||||
SPILL_SLOT(childregs, 2) = (unsigned long)args->fn;
|
||||
SPILL_SLOT(childregs, 3) = (unsigned long)args->fn_arg;
|
||||
#elif defined(__XTENSA_CALL0_ABI__)
|
||||
/*
|
||||
* a12 = thread_fn, a13 = thread_fn arg.
|
||||
* _switch_to epilogue will load registers from the stack.
|
||||
*/
|
||||
((unsigned long *)p->thread.sp)[0] = usp_thread_fn;
|
||||
((unsigned long *)p->thread.sp)[1] = thread_fn_arg;
|
||||
((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn;
|
||||
((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg;
|
||||
#else
|
||||
#error Unsupported Xtensa ABI
|
||||
#endif
|
||||
|
|
|
@ -34,6 +34,8 @@ struct kernel_clone_args {
|
|||
int io_thread;
|
||||
int kthread;
|
||||
int idle;
|
||||
int (*fn)(void *);
|
||||
void *fn_arg;
|
||||
struct cgroup *cgrp;
|
||||
struct css_set *cset;
|
||||
};
|
||||
|
|
|
@ -2555,8 +2555,8 @@ struct task_struct * __init fork_idle(int cpu)
|
|||
struct task_struct *task;
|
||||
struct kernel_clone_args args = {
|
||||
.flags = CLONE_VM,
|
||||
.stack = (unsigned long)&idle_dummy,
|
||||
.stack_size = (unsigned long)NULL,
|
||||
.fn = &idle_dummy,
|
||||
.fn_arg = NULL,
|
||||
.kthread = 1,
|
||||
.idle = 1,
|
||||
};
|
||||
|
@ -2589,8 +2589,8 @@ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
|
|||
.flags = ((lower_32_bits(flags) | CLONE_VM |
|
||||
CLONE_UNTRACED) & ~CSIGNAL),
|
||||
.exit_signal = (lower_32_bits(flags) & CSIGNAL),
|
||||
.stack = (unsigned long)fn,
|
||||
.stack_size = (unsigned long)arg,
|
||||
.fn = fn,
|
||||
.fn_arg = arg,
|
||||
.io_thread = 1,
|
||||
};
|
||||
|
||||
|
@ -2694,8 +2694,8 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
|
|||
.flags = ((lower_32_bits(flags) | CLONE_VM |
|
||||
CLONE_UNTRACED) & ~CSIGNAL),
|
||||
.exit_signal = (lower_32_bits(flags) & CSIGNAL),
|
||||
.stack = (unsigned long)fn,
|
||||
.stack_size = (unsigned long)arg,
|
||||
.fn = fn,
|
||||
.fn_arg = arg,
|
||||
.kthread = 1,
|
||||
};
|
||||
|
||||
|
@ -2711,8 +2711,8 @@ pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags)
|
|||
.flags = ((lower_32_bits(flags) | CLONE_VM |
|
||||
CLONE_UNTRACED) & ~CSIGNAL),
|
||||
.exit_signal = (lower_32_bits(flags) & CSIGNAL),
|
||||
.stack = (unsigned long)fn,
|
||||
.stack_size = (unsigned long)arg,
|
||||
.fn = fn,
|
||||
.fn_arg = arg,
|
||||
};
|
||||
|
||||
return kernel_clone(&args);
|
||||
|
|
Loading…
Reference in New Issue