OpenCloudOS-Kernel/arch/nios2/kernel/process.c

271 lines
6.9 KiB
C
Raw Normal View History

/*
* Architecture-dependent parts of process handling.
*
* Copyright (C) 2013 Altera Corporation
* Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
* Copyright (C) 2009 Wind River Systems Inc
* Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
* Copyright (C) 2004 Microtronix Datacom Ltd
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/mm_types.h>
#include <linux/tick.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/traps.h>
#include <asm/cpuinfo.h>
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
void arch_cpu_idle(void)
{
local_irq_enable();
}
/*
* The development boards have no way to pull a board reset. Just jump to the
* cpu reset address and let the boot loader or the code in head.S take care of
* resetting peripherals.
*/
void machine_restart(char *__unused)
{
pr_notice("Machine restart (%08x)...\n", cpuinfo.reset_addr);
local_irq_disable();
__asm__ __volatile__ (
"jmp %0\n\t"
:
: "r" (cpuinfo.reset_addr)
: "r4");
}
void machine_halt(void)
{
pr_notice("Machine halt...\n");
local_irq_disable();
for (;;)
;
}
/*
* There is no way to power off the development boards. So just spin for now. If
* we ever have a way of resetting a board using a GPIO we should add that here.
*/
void machine_power_off(void)
{
pr_notice("Machine power off...\n");
local_irq_disable();
for (;;)
;
}
void show_regs(struct pt_regs *regs)
{
pr_notice("\n");
show_regs_print_info(KERN_DEFAULT);
pr_notice("r1: %08lx r2: %08lx r3: %08lx r4: %08lx\n",
regs->r1, regs->r2, regs->r3, regs->r4);
pr_notice("r5: %08lx r6: %08lx r7: %08lx r8: %08lx\n",
regs->r5, regs->r6, regs->r7, regs->r8);
pr_notice("r9: %08lx r10: %08lx r11: %08lx r12: %08lx\n",
regs->r9, regs->r10, regs->r11, regs->r12);
pr_notice("r13: %08lx r14: %08lx r15: %08lx\n",
regs->r13, regs->r14, regs->r15);
pr_notice("ra: %08lx fp: %08lx sp: %08lx gp: %08lx\n",
regs->ra, regs->fp, regs->sp, regs->gp);
pr_notice("ea: %08lx estatus: %08lx\n",
regs->ea, regs->estatus);
}
void flush_thread(void)
{
}
int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
struct task_struct *p, unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
struct pt_regs *regs;
struct switch_stack *stack;
struct switch_stack *childstack =
((struct switch_stack *)childregs) - 1;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
childstack->r16 = usp; /* fn */
childstack->r17 = arg;
childstack->ra = (unsigned long) ret_from_kernel_thread;
childregs->estatus = STATUS_PIE;
childregs->sp = (unsigned long) childstack;
p->thread.ksp = (unsigned long) childstack;
p->thread.kregs = childregs;
return 0;
}
regs = current_pt_regs();
*childregs = *regs;
childregs->r2 = 0; /* Set the return value for the child. */
childregs->r7 = 0;
stack = ((struct switch_stack *) regs) - 1;
*childstack = *stack;
childstack->ra = (unsigned long)ret_from_fork;
p->thread.kregs = childregs;
p->thread.ksp = (unsigned long) childstack;
if (usp)
childregs->sp = usp;
/* Initialize tls register. */
if (clone_flags & CLONE_SETTLS)
nios2: enable HAVE_COPY_THREAD_TLS, switch to kernel_clone_args This is part of a larger series that aims at getting rid of the copy_thread()/copy_thread_tls() split that makes the process creation codepaths in the kernel more convoluted and error-prone than they need to be. I'm converting all the remaining arches that haven't yet switched and am collecting individual acks. Once I have them, I'll send the whole series removing the copy_thread()/copy_thread_tls() split, the HAVE_COPY_THREAD_TLS define and the legacy do_fork() helper. The only kernel-wide process creation entry point for anything not going directly through the syscall path will then be based on struct kernel_clone_args. No more danger of weird process creation abi quirks between architectures hopefully, and easier to maintain overall. It also unblocks implementing clone3() on architectures not support copy_thread_tls(). Any architecture that wants to implement clone3() will need to select HAVE_COPY_THREAD_TLS and thus need to implement copy_thread_tls(). So both goals are connected but independently beneficial. HAVE_COPY_THREAD_TLS means that a given architecture supports CLONE_SETTLS and not setting it should usually mean that the architectures doesn't implement it but that's not how things are. In fact all architectures support CLONE_TLS it's just that they don't follow the calling convention that HAVE_COPY_THREAD_TLS implies. That means all architectures can be switched over to select HAVE_COPY_THREAD_TLS. Once that is done we can remove that macro (yay, less code), the unnecessary do_fork() export in kernel/fork.c, and also rename copy_thread_tls() back to copy_thread(). At this point copy_thread() becomes the main architecture specific part of process creation but it will be the same layout and calling convention for all architectures. (Once that is done we can probably cleanup each copy_thread() function even more but that's for the future.) Since nios2 does support CLONE_SETTLS there's no reason to not select HAVE_COPY_THREAD_TLS. This brings us one step closer to getting rid of the copy_thread()/copy_thread_tls() split we still have and ultimately the HAVE_COPY_THREAD_TLS define in general. A lot of architectures have already converted and nios2 is one of the few hat haven't yet. This also unblocks implementing the clone3() syscall on nios2. Once that is done we can get of another ARCH_WANTS_* macro. Once Any architecture that supports HAVE_COPY_THREAD_TLS cannot call the do_fork() helper anymore. This is fine and intended since it should be removed in favor of the new, cleaner _do_fork() calling convention based on struct kernel_clone_args. In fact, most architectures have already switched. With this patch, nios2 joins the other arches which can't use the fork(), vfork(), clone(), clone3() syscalls directly and who follow the new process creation calling convention that is based on struct kernel_clone_args which we introduced a while back. This means less custom assembly in the architectures entry path to set up the registers before calling into the process creation helper and it is easier to to support new features without having to adapt calling conventions. It also unifies all process creation paths between fork(), vfork(), clone(), and clone3(). (We can't fix the ABI nightmare that legacy clone() is but we can prevent stuff like this happening in the future.) For some more context, please see: commit 606e9ad20094f6d500166881d301f31a51bc8aa7 Merge: ac61145a725a 457677c70c76 Author: Linus Torvalds <torvalds@linux-foundation.org> Date: Sat Jan 11 15:33:48 2020 -0800 Merge tag 'clone3-tls-v5.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux Pull thread fixes from Christian Brauner: "This contains a series of patches to fix CLONE_SETTLS when used with clone3(). The clone3() syscall passes the tls argument through struct clone_args instead of a register. This means, all architectures that do not implement copy_thread_tls() but still support CLONE_SETTLS via copy_thread() expecting the tls to be located in a register argument based on clone() are currently unfortunately broken. Their tls value will be garbage. The patch series fixes this on all architectures that currently define __ARCH_WANT_SYS_CLONE3. It also adds a compile-time check to ensure that any architecture that enables clone3() in the future is forced to also implement copy_thread_tls(). My ultimate goal is to get rid of the copy_thread()/copy_thread_tls() split and just have copy_thread_tls() at some point in the not too distant future (Maybe even renaming copy_thread_tls() back to simply copy_thread() once the old function is ripped from all arches). This is dependent now on all arches supporting clone3(). While all relevant arches do that now there are still four missing: ia64, m68k, sh and sparc. They have the system call reserved, but not implemented. Once they all implement clone3() we can get rid of ARCH_WANT_SYS_CLONE3 and HAVE_COPY_THREAD_TLS. Note that in the meantime, m68k has already switched to the new calling convention. And I've got sparc patches acked by Dave and ia64 is already done too. You can find a link to a booting qemu nios2 system with all the changes here at [1]. [1]: https://asciinema.org/a/333353 Cc: linux-kernel@vger.kernel.org Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Ley Foon Tan <ley.foon.tan@intel.com> Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
2020-05-07 21:14:27 +08:00
childstack->r23 = tls;
return 0;
}
/*
* Generic dumping code. Used for panic and debug.
*/
void dump(struct pt_regs *fp)
{
unsigned long *sp;
unsigned char *tp;
int i;
pr_emerg("\nCURRENT PROCESS:\n\n");
pr_emerg("COMM=%s PID=%d\n", current->comm, current->pid);
if (current->mm) {
pr_emerg("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
(int) current->mm->start_code,
(int) current->mm->end_code,
(int) current->mm->start_data,
(int) current->mm->end_data,
(int) current->mm->end_data,
(int) current->mm->brk);
pr_emerg("USER-STACK=%08x KERNEL-STACK=%08x\n\n",
(int) current->mm->start_stack,
(int)(((unsigned long) current) + THREAD_SIZE));
}
pr_emerg("PC: %08lx\n", fp->ea);
pr_emerg("SR: %08lx SP: %08lx\n",
(long) fp->estatus, (long) fp);
pr_emerg("r1: %08lx r2: %08lx r3: %08lx\n",
fp->r1, fp->r2, fp->r3);
pr_emerg("r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n",
fp->r4, fp->r5, fp->r6, fp->r7);
pr_emerg("r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n",
fp->r8, fp->r9, fp->r10, fp->r11);
pr_emerg("r12: %08lx r13: %08lx r14: %08lx r15: %08lx\n",
fp->r12, fp->r13, fp->r14, fp->r15);
pr_emerg("or2: %08lx ra: %08lx fp: %08lx sp: %08lx\n",
fp->orig_r2, fp->ra, fp->fp, fp->sp);
pr_emerg("\nUSP: %08x TRAPFRAME: %08x\n",
(unsigned int) fp->sp, (unsigned int) fp);
pr_emerg("\nCODE:");
tp = ((unsigned char *) fp->ea) - 0x20;
for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
if ((i % 0x10) == 0)
pr_emerg("\n%08x: ", (int) (tp + i));
pr_emerg("%08x ", (int) *sp++);
}
pr_emerg("\n");
pr_emerg("\nKERNEL STACK:");
tp = ((unsigned char *) fp) - 0x40;
for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
if ((i % 0x10) == 0)
pr_emerg("\n%08x: ", (int) (tp + i));
pr_emerg("%08x ", (int) *sp++);
}
pr_emerg("\n");
pr_emerg("\n");
pr_emerg("\nUSER STACK:");
tp = (unsigned char *) (fp->sp - 0x10);
for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
if ((i % 0x10) == 0)
pr_emerg("\n%08x: ", (int) (tp + i));
pr_emerg("%08x ", (int) *sp++);
}
pr_emerg("\n\n");
}
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)p;
fp = ((struct switch_stack *)p->thread.ksp)->fp; /* ;dgt2 */
do {
if (fp < stack_page+sizeof(struct task_struct) ||
fp >= 8184+stack_page) /* ;dgt2;tmp */
return 0;
pc = ((unsigned long *)fp)[1];
if (!in_sched_functions(pc))
return pc;
fp = *(unsigned long *) fp;
} while (count++ < 16); /* ;dgt2;tmp */
return 0;
}
/*
* Do necessary setup to start up a newly executed thread.
* Will startup in user mode (status_extension = 0).
*/
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
memset((void *) regs, 0, sizeof(struct pt_regs));
regs->estatus = ESTATUS_EPIE | ESTATUS_EU;
regs->ea = pc;
regs->sp = sp;
}
nios2: enable HAVE_COPY_THREAD_TLS, switch to kernel_clone_args This is part of a larger series that aims at getting rid of the copy_thread()/copy_thread_tls() split that makes the process creation codepaths in the kernel more convoluted and error-prone than they need to be. I'm converting all the remaining arches that haven't yet switched and am collecting individual acks. Once I have them, I'll send the whole series removing the copy_thread()/copy_thread_tls() split, the HAVE_COPY_THREAD_TLS define and the legacy do_fork() helper. The only kernel-wide process creation entry point for anything not going directly through the syscall path will then be based on struct kernel_clone_args. No more danger of weird process creation abi quirks between architectures hopefully, and easier to maintain overall. It also unblocks implementing clone3() on architectures not support copy_thread_tls(). Any architecture that wants to implement clone3() will need to select HAVE_COPY_THREAD_TLS and thus need to implement copy_thread_tls(). So both goals are connected but independently beneficial. HAVE_COPY_THREAD_TLS means that a given architecture supports CLONE_SETTLS and not setting it should usually mean that the architectures doesn't implement it but that's not how things are. In fact all architectures support CLONE_TLS it's just that they don't follow the calling convention that HAVE_COPY_THREAD_TLS implies. That means all architectures can be switched over to select HAVE_COPY_THREAD_TLS. Once that is done we can remove that macro (yay, less code), the unnecessary do_fork() export in kernel/fork.c, and also rename copy_thread_tls() back to copy_thread(). At this point copy_thread() becomes the main architecture specific part of process creation but it will be the same layout and calling convention for all architectures. (Once that is done we can probably cleanup each copy_thread() function even more but that's for the future.) Since nios2 does support CLONE_SETTLS there's no reason to not select HAVE_COPY_THREAD_TLS. This brings us one step closer to getting rid of the copy_thread()/copy_thread_tls() split we still have and ultimately the HAVE_COPY_THREAD_TLS define in general. A lot of architectures have already converted and nios2 is one of the few hat haven't yet. This also unblocks implementing the clone3() syscall on nios2. Once that is done we can get of another ARCH_WANTS_* macro. Once Any architecture that supports HAVE_COPY_THREAD_TLS cannot call the do_fork() helper anymore. This is fine and intended since it should be removed in favor of the new, cleaner _do_fork() calling convention based on struct kernel_clone_args. In fact, most architectures have already switched. With this patch, nios2 joins the other arches which can't use the fork(), vfork(), clone(), clone3() syscalls directly and who follow the new process creation calling convention that is based on struct kernel_clone_args which we introduced a while back. This means less custom assembly in the architectures entry path to set up the registers before calling into the process creation helper and it is easier to to support new features without having to adapt calling conventions. It also unifies all process creation paths between fork(), vfork(), clone(), and clone3(). (We can't fix the ABI nightmare that legacy clone() is but we can prevent stuff like this happening in the future.) For some more context, please see: commit 606e9ad20094f6d500166881d301f31a51bc8aa7 Merge: ac61145a725a 457677c70c76 Author: Linus Torvalds <torvalds@linux-foundation.org> Date: Sat Jan 11 15:33:48 2020 -0800 Merge tag 'clone3-tls-v5.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux Pull thread fixes from Christian Brauner: "This contains a series of patches to fix CLONE_SETTLS when used with clone3(). The clone3() syscall passes the tls argument through struct clone_args instead of a register. This means, all architectures that do not implement copy_thread_tls() but still support CLONE_SETTLS via copy_thread() expecting the tls to be located in a register argument based on clone() are currently unfortunately broken. Their tls value will be garbage. The patch series fixes this on all architectures that currently define __ARCH_WANT_SYS_CLONE3. It also adds a compile-time check to ensure that any architecture that enables clone3() in the future is forced to also implement copy_thread_tls(). My ultimate goal is to get rid of the copy_thread()/copy_thread_tls() split and just have copy_thread_tls() at some point in the not too distant future (Maybe even renaming copy_thread_tls() back to simply copy_thread() once the old function is ripped from all arches). This is dependent now on all arches supporting clone3(). While all relevant arches do that now there are still four missing: ia64, m68k, sh and sparc. They have the system call reserved, but not implemented. Once they all implement clone3() we can get rid of ARCH_WANT_SYS_CLONE3 and HAVE_COPY_THREAD_TLS. Note that in the meantime, m68k has already switched to the new calling convention. And I've got sparc patches acked by Dave and ia64 is already done too. You can find a link to a booting qemu nios2 system with all the changes here at [1]. [1]: https://asciinema.org/a/333353 Cc: linux-kernel@vger.kernel.org Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Ley Foon Tan <ley.foon.tan@intel.com> Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
2020-05-07 21:14:27 +08:00
asmlinkage int nios2_clone(unsigned long clone_flags, unsigned long newsp,
int __user *parent_tidptr, int __user *child_tidptr,
unsigned long tls)
{
struct kernel_clone_args args = {
.flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
.pidfd = parent_tidptr,
.child_tid = child_tidptr,
.parent_tid = parent_tidptr,
.exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
.stack = newsp,
.tls = tls,
};
return _do_fork(&args);
}