2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2013-01-18 17:42:18 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
|
|
*
|
|
|
|
* Amit Bhor, Kanika Nema: Codito Technologies 2004
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-09 01:51:36 +08:00
|
|
|
#include <linux/sched/task.h>
|
2017-02-09 01:51:37 +08:00
|
|
|
#include <linux/sched/task_stack.h>
|
2017-02-09 01:51:36 +08:00
|
|
|
|
2013-01-18 17:42:18 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/tick.h>
|
|
|
|
|
2020-01-18 07:04:03 +08:00
|
|
|
#include <asm/fpu.h>
|
|
|
|
|
2013-01-18 17:42:18 +08:00
|
|
|
SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
|
|
|
|
{
|
|
|
|
task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We return the user space TLS data ptr as sys-call return code
|
|
|
|
* Ideally it should be copy to user.
|
|
|
|
* However we can cheat by the fact that some sys-calls do return
|
|
|
|
* absurdly high values
|
|
|
|
* Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
|
|
|
|
* it won't be considered a sys-call error
|
|
|
|
* and it will be loads better than copy-to-user, which is a definite
|
|
|
|
* D-TLB Miss
|
|
|
|
*/
|
|
|
|
SYSCALL_DEFINE0(arc_gettls)
|
|
|
|
{
|
|
|
|
return task_thread_info(current)->thr_ptr;
|
|
|
|
}
|
2013-01-18 17:42:18 +08:00
|
|
|
|
2016-10-20 22:39:45 +08:00
|
|
|
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
|
|
|
{
|
2016-11-08 02:36:46 +08:00
|
|
|
struct pt_regs *regs = current_pt_regs();
|
2018-06-19 22:22:05 +08:00
|
|
|
u32 uval;
|
|
|
|
int ret;
|
2016-10-20 22:39:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is only for old cores lacking LLOCK/SCOND, which by defintion
|
|
|
|
* can't possibly be SMP. Thus doesn't need to be SMP safe.
|
|
|
|
* And this also helps reduce the overhead for serializing in
|
|
|
|
* the UP case
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
|
|
|
|
|
2016-11-08 02:36:46 +08:00
|
|
|
/* Z indicates to userspace if operation succeded */
|
|
|
|
regs->status32 &= ~STATUS_Z_MASK;
|
|
|
|
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 10:57:57 +08:00
|
|
|
ret = access_ok(uaddr, sizeof(*uaddr));
|
2018-06-19 22:22:05 +08:00
|
|
|
if (!ret)
|
|
|
|
goto fail;
|
2016-10-20 22:39:45 +08:00
|
|
|
|
2018-06-19 22:22:05 +08:00
|
|
|
again:
|
2016-10-20 22:39:45 +08:00
|
|
|
preempt_disable();
|
|
|
|
|
2018-06-19 22:22:05 +08:00
|
|
|
ret = __get_user(uval, uaddr);
|
|
|
|
if (ret)
|
|
|
|
goto fault;
|
2016-10-20 22:39:45 +08:00
|
|
|
|
2018-06-19 22:22:05 +08:00
|
|
|
if (uval != expected)
|
|
|
|
goto out;
|
2016-10-20 22:39:45 +08:00
|
|
|
|
2018-06-19 22:22:05 +08:00
|
|
|
ret = __put_user(new, uaddr);
|
|
|
|
if (ret)
|
|
|
|
goto fault;
|
|
|
|
|
|
|
|
regs->status32 |= STATUS_Z_MASK;
|
2016-10-20 22:39:45 +08:00
|
|
|
|
2018-06-19 22:22:05 +08:00
|
|
|
out:
|
|
|
|
preempt_enable();
|
2016-11-08 02:36:46 +08:00
|
|
|
return uval;
|
2018-06-19 22:22:05 +08:00
|
|
|
|
|
|
|
fault:
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
if (unlikely(ret != -EFAULT))
|
|
|
|
goto fail;
|
|
|
|
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_lock(current->mm);
|
2020-08-12 09:39:01 +08:00
|
|
|
ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
|
2018-06-19 22:22:05 +08:00
|
|
|
FAULT_FLAG_WRITE, NULL);
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_unlock(current->mm);
|
2018-06-19 22:22:05 +08:00
|
|
|
|
|
|
|
if (likely(!ret))
|
|
|
|
goto again;
|
|
|
|
|
|
|
|
fail:
|
2019-05-23 23:17:27 +08:00
|
|
|
force_sig(SIGSEGV);
|
2018-06-19 22:22:05 +08:00
|
|
|
return ret;
|
2016-10-20 22:39:45 +08:00
|
|
|
}
|
|
|
|
|
2017-06-03 02:49:10 +08:00
|
|
|
#ifdef CONFIG_ISA_ARCV2
|
|
|
|
|
2013-03-22 05:49:36 +08:00
|
|
|
void arch_cpu_idle(void)
|
2013-01-18 17:42:18 +08:00
|
|
|
{
|
2017-06-03 02:49:10 +08:00
|
|
|
/* Re-enable interrupts <= default irq priority before commiting SLEEP */
|
|
|
|
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
|
|
|
|
|
2015-11-16 16:22:07 +08:00
|
|
|
__asm__ __volatile__(
|
|
|
|
"sleep %0 \n"
|
|
|
|
:
|
2017-06-03 02:49:10 +08:00
|
|
|
:"I"(arg)); /* can't be "r" has to be embedded const */
|
|
|
|
}
|
|
|
|
|
2017-05-28 14:52:06 +08:00
|
|
|
#else /* ARC700 */
|
2017-06-03 02:49:10 +08:00
|
|
|
|
|
|
|
void arch_cpu_idle(void)
|
|
|
|
{
|
|
|
|
/* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
|
|
|
|
__asm__ __volatile__("sleep 0x3 \n");
|
2013-01-18 17:42:18 +08:00
|
|
|
}
|
|
|
|
|
2017-06-03 02:49:10 +08:00
|
|
|
#endif
|
|
|
|
|
2013-01-18 17:42:18 +08:00
|
|
|
asmlinkage void ret_from_fork(void);
|
|
|
|
|
2015-03-14 02:04:18 +08:00
|
|
|
/*
|
|
|
|
* Copy architecture-specific thread state
|
|
|
|
*
|
|
|
|
* Layout of Child kernel mode stack as setup at the end of this function is
|
2013-01-18 17:42:18 +08:00
|
|
|
*
|
|
|
|
* | ... |
|
|
|
|
* | ... |
|
|
|
|
* | unused |
|
|
|
|
* | |
|
|
|
|
* ------------------
|
2013-05-28 00:13:41 +08:00
|
|
|
* | r25 | <==== top of Stack (thread.ksp)
|
2013-01-18 17:42:18 +08:00
|
|
|
* ~ ~
|
2015-08-19 19:53:58 +08:00
|
|
|
* | --to-- | (CALLEE Regs of kernel mode)
|
2013-01-18 17:42:18 +08:00
|
|
|
* | r13 |
|
|
|
|
* ------------------
|
|
|
|
* | fp |
|
|
|
|
* | blink | @ret_from_fork
|
|
|
|
* ------------------
|
|
|
|
* | |
|
|
|
|
* ~ ~
|
|
|
|
* ~ ~
|
|
|
|
* | |
|
|
|
|
* ------------------
|
|
|
|
* | r12 |
|
|
|
|
* ~ ~
|
|
|
|
* | --to-- | (scratch Regs of user mode)
|
|
|
|
* | r0 |
|
2013-05-28 15:54:43 +08:00
|
|
|
* ------------------
|
|
|
|
* | SP |
|
|
|
|
* | orig_r0 |
|
2013-06-11 21:26:54 +08:00
|
|
|
* | event/ECR |
|
ARC: pt_regs update #4: r25 saved/restored unconditionally
(This is a VERY IMP change for low level interrupt/exception handling)
-----------------------------------------------------------------------
WHAT
-----------------------------------------------------------------------
* User 25 now saved in pt_regs->user_r25 (vs. tsk->thread_info.user_r25)
* This allows Low level interrupt code to unconditionally save r25
(vs. the prev version which would only do it for U->K transition).
Ofcourse for nested interrupts, only the pt_regs->user_r25 of
bottom-most frame is useful.
* simplifies the interrupt prologue/epilogue
* Needed for ARCv2 ISA code and done here to keep design similar with
ARCompact event handling
-----------------------------------------------------------------------
WHY
-------------------------------------------------------------------------
With CONFIG_ARC_CURR_IN_REG, r25 is used to cache "current" task pointer
in kernel mode. So when entering kernel mode from User Mode
- user r25 is specially safe-kept (it being a callee reg is NOT part of
pt_regs which are saved by default on each interrupt/trap/exception)
- r25 loaded with current task pointer.
Further, if interrupt was taken in kernel mode, this is skipped since we
know that r25 already has valid "current" pointer.
With 2 level of interrupts in ARCompact ISA, detecting this is difficult
but still possible, since we could be in kernel mode but r25 not already saved
(in fact the stack itself might not have been switched).
A. User mode
B. L1 IRQ taken
C. L2 IRQ taken (while on 1st line of L1 ISR)
So in #C, although in kernel mode, r25 not saved (infact SP not
switched at all)
Given that ARcompact has manual stack switching, we could use a bit of
trickey - The low level code would make sure that SP is only set to kernel
mode value at the very end (after saving r25). So a non kernel mode SP,
even if in kernel mode, meant r25 was NOT saved.
The same paradigm won't work in ARCv2 ISA since SP is auto-switched so
it's setting can't be delayed/constrained.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-28 16:20:41 +08:00
|
|
|
* | user_r25 |
|
2013-01-18 17:42:18 +08:00
|
|
|
* ------------------ <===== END of PAGE
|
|
|
|
*/
|
2020-06-11 17:04:15 +08:00
|
|
|
int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|
|
|
unsigned long kthread_arg, struct task_struct *p,
|
|
|
|
unsigned long tls)
|
2013-01-18 17:42:18 +08:00
|
|
|
{
|
|
|
|
struct pt_regs *c_regs; /* child's pt_regs */
|
|
|
|
unsigned long *childksp; /* to unwind out of __switch_to() */
|
|
|
|
struct callee_regs *c_callee; /* child's callee regs */
|
|
|
|
struct callee_regs *parent_callee; /* paren't callee */
|
|
|
|
struct pt_regs *regs = current_pt_regs();
|
|
|
|
|
|
|
|
/* Mark the specific anchors to begin with (see pic above) */
|
|
|
|
c_regs = task_pt_regs(p);
|
|
|
|
childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
|
|
|
|
c_callee = ((struct callee_regs *)childksp) - 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __switch_to() uses thread.ksp to start unwinding stack
|
|
|
|
* For kernel threads we don't need to create callee regs, the
|
|
|
|
* stack layout nevertheless needs to remain the same.
|
|
|
|
* Also, since __switch_to anyways unwinds callee regs, we use
|
|
|
|
* this to populate kernel thread entry-pt/args into callee regs,
|
|
|
|
* so that ret_from_kernel_thread() becomes simpler.
|
|
|
|
*/
|
|
|
|
p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
|
|
|
|
|
|
|
|
/* __switch_to expects FP(0), BLINK(return addr) at top */
|
|
|
|
childksp[0] = 0; /* fp */
|
|
|
|
childksp[1] = (unsigned long)ret_from_fork; /* blink */
|
|
|
|
|
|
|
|
if (unlikely(p->flags & PF_KTHREAD)) {
|
|
|
|
memset(c_regs, 0, sizeof(struct pt_regs));
|
|
|
|
|
2015-03-14 02:04:18 +08:00
|
|
|
c_callee->r13 = kthread_arg;
|
2013-01-18 17:42:18 +08:00
|
|
|
c_callee->r14 = usp; /* function */
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*--------- User Task Only --------------*/
|
|
|
|
|
|
|
|
/* __switch_to expects FP(0), BLINK(return addr) at top of stack */
|
|
|
|
childksp[0] = 0; /* for POP fp */
|
|
|
|
childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
|
|
|
|
|
|
|
|
/* Copy parents pt regs on child's kernel mode stack */
|
|
|
|
*c_regs = *regs;
|
|
|
|
|
|
|
|
if (usp)
|
|
|
|
c_regs->sp = usp;
|
|
|
|
|
|
|
|
c_regs->r0 = 0; /* fork returns 0 in child */
|
|
|
|
|
|
|
|
parent_callee = ((struct callee_regs *)regs) - 1;
|
|
|
|
*c_callee = *parent_callee;
|
|
|
|
|
|
|
|
if (unlikely(clone_flags & CLONE_SETTLS)) {
|
|
|
|
/*
|
|
|
|
* set task's userland tls data ptr from 4th arg
|
|
|
|
* clone C-lib call is difft from clone sys-call
|
|
|
|
*/
|
2020-01-16 08:08:12 +08:00
|
|
|
task_thread_info(p)->thr_ptr = tls;
|
2013-01-18 17:42:18 +08:00
|
|
|
} else {
|
|
|
|
/* Normal fork case: set parent's TLS ptr in child */
|
|
|
|
task_thread_info(p)->thr_ptr =
|
|
|
|
task_thread_info(current)->thr_ptr;
|
|
|
|
}
|
|
|
|
|
2018-10-06 03:48:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* setup usermode thread pointer #1:
|
|
|
|
* when child is picked by scheduler, __switch_to() uses @c_callee to
|
|
|
|
* populate usermode callee regs: this works (despite being in a kernel
|
|
|
|
* function) since special return path for child @ret_from_fork()
|
|
|
|
* ensures those regs are not clobbered all the way to RTIE to usermode
|
|
|
|
*/
|
|
|
|
c_callee->r25 = task_thread_info(p)->thr_ptr;
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARC_CURR_IN_REG
|
|
|
|
/*
|
|
|
|
* setup usermode thread pointer #2:
|
|
|
|
* however for this special use of r25 in kernel, __switch_to() sets
|
|
|
|
* r25 for kernel needs and only in the final return path is usermode
|
|
|
|
* r25 setup, from pt_regs->user_r25. So set that up as well
|
|
|
|
*/
|
|
|
|
c_regs->user_r25 = c_callee->r25;
|
|
|
|
#endif
|
|
|
|
|
2013-01-18 17:42:18 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-18 14:49:59 +08:00
|
|
|
/*
|
|
|
|
* Do necessary setup to start up a new user task
|
|
|
|
*/
|
2020-01-18 07:04:03 +08:00
|
|
|
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
|
2014-04-18 14:49:59 +08:00
|
|
|
{
|
|
|
|
regs->sp = usp;
|
|
|
|
regs->ret = pc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* [U]ser Mode bit set
|
|
|
|
* [L] ZOL loop inhibited to begin with - cleared by a LP insn
|
|
|
|
* Interrupts enabled
|
|
|
|
*/
|
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 21:00:41 +08:00
|
|
|
regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
|
2014-04-18 14:49:59 +08:00
|
|
|
|
2020-01-18 07:04:03 +08:00
|
|
|
fpu_init_task(regs);
|
|
|
|
|
2014-04-18 14:49:59 +08:00
|
|
|
/* bogus seed values for debugging */
|
|
|
|
regs->lp_start = 0x10;
|
|
|
|
regs->lp_end = 0x80;
|
|
|
|
}
|
|
|
|
|
2013-01-18 17:42:18 +08:00
|
|
|
/*
|
|
|
|
* Some archs flush debug and FPU info here
|
|
|
|
*/
|
|
|
|
void flush_thread(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
int elf_check_arch(const struct elf32_hdr *x)
|
|
|
|
{
|
|
|
|
unsigned int eflags;
|
|
|
|
|
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 21:00:41 +08:00
|
|
|
if (x->e_machine != EM_ARC_INUSE) {
|
|
|
|
pr_err("ELF not built for %s ISA\n",
|
|
|
|
is_isa_arcompact() ? "ARCompact":"ARCv2");
|
2013-01-18 17:42:18 +08:00
|
|
|
return 0;
|
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 21:00:41 +08:00
|
|
|
}
|
2013-01-18 17:42:18 +08:00
|
|
|
|
|
|
|
eflags = x->e_flags;
|
2016-08-11 05:10:57 +08:00
|
|
|
if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
|
2013-01-18 17:42:18 +08:00
|
|
|
pr_err("ABI mismatch - you need newer toolchain\n");
|
2019-05-21 23:03:48 +08:00
|
|
|
force_sigsegv(SIGSEGV);
|
2013-01-18 17:42:18 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(elf_check_arch);
|