2008-03-26 16:43:29 +08:00
|
|
|
/* linux/arch/sparc64/kernel/sys_sparc.c
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This file contains various random system calls that
|
|
|
|
* have a non-standard calling sequence on the Linux/sparc
|
|
|
|
* platform.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sem.h>
|
|
|
|
#include <linux/msg.h>
|
|
|
|
#include <linux/shm.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/utsname.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/ipc.h>
|
|
|
|
#include <linux/personality.h>
|
2006-03-18 09:42:57 +08:00
|
|
|
#include <linux/random.h>
|
2011-07-23 01:18:16 +08:00
|
|
|
#include <linux/export.h>
|
2013-09-14 20:02:11 +08:00
|
|
|
#include <linux/context_tracking.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/utrap.h>
|
2006-10-02 17:18:34 +08:00
|
|
|
#include <asm/unistd.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-03-26 16:43:29 +08:00
|
|
|
#include "entry.h"
|
2014-05-17 05:25:54 +08:00
|
|
|
#include "kernel.h"
|
2008-03-26 16:43:29 +08:00
|
|
|
#include "systbls.h"
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* #define DEBUG_UNIMP_SYSCALL */
|
|
|
|
|
|
|
|
asmlinkage unsigned long sys_getpagesize(void)
|
|
|
|
{
|
|
|
|
return PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2006-03-03 10:12:27 +08:00
|
|
|
/* Does addr --> addr+len fall within 4GB of the VA-space hole or
|
|
|
|
* overflow past the end of the 64-bit address space?
|
|
|
|
*/
|
|
|
|
static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
|
|
|
|
{
|
|
|
|
unsigned long va_exclude_start, va_exclude_end;
|
|
|
|
|
|
|
|
va_exclude_start = VA_EXCLUDE_START;
|
|
|
|
va_exclude_end = VA_EXCLUDE_END;
|
|
|
|
|
|
|
|
if (unlikely(len >= va_exclude_start))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (unlikely((addr + len) < addr))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
|
|
|
|
((addr + len) >= va_exclude_start &&
|
|
|
|
(addr + len) < va_exclude_end)))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-03-18 06:41:03 +08:00
|
|
|
/* These functions differ from the default implementations in
|
|
|
|
* mm/mmap.c in two ways:
|
|
|
|
*
|
|
|
|
* 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
|
|
|
|
* for fixed such mappings we just validate what the user gave us.
|
|
|
|
* 2) For 64-bit tasks we avoid mapping anything within 4GB of
|
|
|
|
* the spitfire/niagara VA-hole.
|
|
|
|
*/
|
|
|
|
|
2012-12-12 08:02:29 +08:00
|
|
|
static inline unsigned long COLOR_ALIGN(unsigned long addr,
|
2006-03-18 06:41:03 +08:00
|
|
|
unsigned long pgoff)
|
|
|
|
{
|
|
|
|
unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
|
|
|
|
unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
|
|
|
|
|
|
|
|
return base + off;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct vm_area_struct * vma;
|
|
|
|
unsigned long task_size = TASK_SIZE;
|
|
|
|
int do_color_align;
|
2012-12-12 08:02:21 +08:00
|
|
|
struct vm_unmapped_area_info info;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (flags & MAP_FIXED) {
|
|
|
|
/* We do not accept a shared mapping if it would violate
|
|
|
|
* cache aliasing constraints.
|
|
|
|
*/
|
|
|
|
if ((flags & MAP_SHARED) &&
|
|
|
|
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
|
|
|
|
return -EINVAL;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (test_thread_flag(TIF_32BIT))
|
2006-03-18 09:33:56 +08:00
|
|
|
task_size = STACK_TOP32;
|
2006-03-18 06:41:03 +08:00
|
|
|
if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
do_color_align = 0;
|
|
|
|
if (filp || (flags & MAP_SHARED))
|
|
|
|
do_color_align = 1;
|
|
|
|
|
|
|
|
if (addr) {
|
|
|
|
if (do_color_align)
|
2012-12-12 08:02:29 +08:00
|
|
|
addr = COLOR_ALIGN(addr, pgoff);
|
2005-04-17 06:20:36 +08:00
|
|
|
else
|
|
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
|
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
if (task_size - len >= addr &&
|
|
|
|
(!vma || addr + len <= vma->vm_start))
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2012-12-12 08:02:21 +08:00
|
|
|
info.flags = 0;
|
|
|
|
info.length = len;
|
|
|
|
info.low_limit = TASK_UNMAPPED_BASE;
|
|
|
|
info.high_limit = min(task_size, VA_EXCLUDE_START);
|
|
|
|
info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
|
|
|
|
info.align_offset = pgoff << PAGE_SHIFT;
|
|
|
|
addr = vm_unmapped_area(&info);
|
|
|
|
|
|
|
|
if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
|
|
|
|
VM_BUG_ON(addr != -ENOMEM);
|
|
|
|
info.low_limit = VA_EXCLUDE_END;
|
|
|
|
info.high_limit = task_size;
|
|
|
|
addr = vm_unmapped_area(&info);
|
2005-06-22 08:14:49 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-12-12 08:02:21 +08:00
|
|
|
return addr;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-03-18 06:41:03 +08:00
|
|
|
unsigned long
|
|
|
|
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
|
|
const unsigned long len, const unsigned long pgoff,
|
|
|
|
const unsigned long flags)
|
|
|
|
{
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct mm_struct *mm = current->mm;
|
2006-03-18 09:33:56 +08:00
|
|
|
unsigned long task_size = STACK_TOP32;
|
2006-03-18 06:41:03 +08:00
|
|
|
unsigned long addr = addr0;
|
|
|
|
int do_color_align;
|
2012-12-12 08:02:21 +08:00
|
|
|
struct vm_unmapped_area_info info;
|
2006-03-18 06:41:03 +08:00
|
|
|
|
|
|
|
/* This should only ever run for 32-bit processes. */
|
|
|
|
BUG_ON(!test_thread_flag(TIF_32BIT));
|
|
|
|
|
|
|
|
if (flags & MAP_FIXED) {
|
|
|
|
/* We do not accept a shared mapping if it would violate
|
|
|
|
* cache aliasing constraints.
|
|
|
|
*/
|
|
|
|
if ((flags & MAP_SHARED) &&
|
|
|
|
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
|
|
|
|
return -EINVAL;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(len > task_size))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
do_color_align = 0;
|
|
|
|
if (filp || (flags & MAP_SHARED))
|
|
|
|
do_color_align = 1;
|
|
|
|
|
|
|
|
/* requesting a specific address */
|
|
|
|
if (addr) {
|
|
|
|
if (do_color_align)
|
2012-12-12 08:02:29 +08:00
|
|
|
addr = COLOR_ALIGN(addr, pgoff);
|
2006-03-18 06:41:03 +08:00
|
|
|
else
|
|
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
|
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
if (task_size - len >= addr &&
|
|
|
|
(!vma || addr + len <= vma->vm_start))
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2012-12-12 08:02:21 +08:00
|
|
|
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
|
|
|
info.length = len;
|
|
|
|
info.low_limit = PAGE_SIZE;
|
|
|
|
info.high_limit = mm->mmap_base;
|
|
|
|
info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
|
|
|
|
info.align_offset = pgoff << PAGE_SHIFT;
|
|
|
|
addr = vm_unmapped_area(&info);
|
2006-03-18 06:41:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A failed mmap() very likely causes application failure,
|
|
|
|
* so fall back to the bottom-up function here. This scenario
|
|
|
|
* can happen with large stack limits and large mmap()
|
|
|
|
* allocations.
|
|
|
|
*/
|
2012-12-12 08:02:21 +08:00
|
|
|
if (addr & ~PAGE_MASK) {
|
|
|
|
VM_BUG_ON(addr != -ENOMEM);
|
|
|
|
info.flags = 0;
|
|
|
|
info.low_limit = TASK_UNMAPPED_BASE;
|
|
|
|
info.high_limit = STACK_TOP32;
|
|
|
|
addr = vm_unmapped_area(&info);
|
|
|
|
}
|
2006-03-18 06:41:03 +08:00
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Try to align mapping such that we align it as much as possible. */
|
|
|
|
unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
|
|
{
|
|
|
|
unsigned long align_goal, addr = -ENOMEM;
|
2009-11-30 21:38:43 +08:00
|
|
|
unsigned long (*get_area)(struct file *, unsigned long,
|
|
|
|
unsigned long, unsigned long, unsigned long);
|
|
|
|
|
|
|
|
get_area = current->mm->get_unmapped_area;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (flags & MAP_FIXED) {
|
|
|
|
/* Ok, don't mess with it. */
|
2009-11-30 21:38:43 +08:00
|
|
|
return get_area(NULL, orig_addr, len, pgoff, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
flags &= ~MAP_SHARED;
|
|
|
|
|
|
|
|
align_goal = PAGE_SIZE;
|
|
|
|
if (len >= (4UL * 1024 * 1024))
|
|
|
|
align_goal = (4UL * 1024 * 1024);
|
|
|
|
else if (len >= (512UL * 1024))
|
|
|
|
align_goal = (512UL * 1024);
|
|
|
|
else if (len >= (64UL * 1024))
|
|
|
|
align_goal = (64UL * 1024);
|
|
|
|
|
|
|
|
do {
|
2009-11-30 21:38:43 +08:00
|
|
|
addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!(addr & ~PAGE_MASK)) {
|
|
|
|
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (align_goal == (4UL * 1024 * 1024))
|
|
|
|
align_goal = (512UL * 1024);
|
|
|
|
else if (align_goal == (512UL * 1024))
|
|
|
|
align_goal = (64UL * 1024);
|
|
|
|
else
|
|
|
|
align_goal = PAGE_SIZE;
|
|
|
|
} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
|
|
|
|
|
|
|
|
/* Mapping is smaller than 64K or larger areas could not
|
|
|
|
* be obtained.
|
|
|
|
*/
|
|
|
|
if (addr & ~PAGE_MASK)
|
2009-11-30 21:38:43 +08:00
|
|
|
addr = get_area(NULL, orig_addr, len, pgoff, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
2009-01-09 08:58:20 +08:00
|
|
|
EXPORT_SYMBOL(get_fb_unmapped_area);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-02-19 06:06:47 +08:00
|
|
|
/* Essentially the same as PowerPC. */
|
|
|
|
static unsigned long mmap_rnd(void)
|
2006-03-18 06:41:03 +08:00
|
|
|
{
|
2011-02-19 06:06:47 +08:00
|
|
|
unsigned long rnd = 0UL;
|
2006-03-18 09:42:57 +08:00
|
|
|
|
|
|
|
if (current->flags & PF_RANDOMIZE) {
|
2011-02-19 06:06:47 +08:00
|
|
|
unsigned long val = get_random_int();
|
2006-03-18 09:42:57 +08:00
|
|
|
if (test_thread_flag(TIF_32BIT))
|
2011-10-17 21:05:23 +08:00
|
|
|
rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
|
2006-03-18 09:42:57 +08:00
|
|
|
else
|
2011-10-17 21:05:23 +08:00
|
|
|
rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
|
2006-03-18 09:42:57 +08:00
|
|
|
}
|
2011-10-17 21:05:23 +08:00
|
|
|
return rnd << PAGE_SHIFT;
|
2011-02-19 06:06:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
unsigned long random_factor = mmap_rnd();
|
|
|
|
unsigned long gap;
|
2006-03-18 09:42:57 +08:00
|
|
|
|
2006-03-18 06:41:03 +08:00
|
|
|
/*
|
|
|
|
* Fall back to the standard layout if the personality
|
|
|
|
* bit is set, or if the expected stack growth is unlimited:
|
|
|
|
*/
|
2010-01-08 16:03:26 +08:00
|
|
|
gap = rlimit(RLIMIT_STACK);
|
2006-03-18 06:41:03 +08:00
|
|
|
if (!test_thread_flag(TIF_32BIT) ||
|
|
|
|
(current->personality & ADDR_COMPAT_LAYOUT) ||
|
2010-01-08 16:03:26 +08:00
|
|
|
gap == RLIM_INFINITY ||
|
2006-03-18 06:41:03 +08:00
|
|
|
sysctl_legacy_va_layout) {
|
2006-03-18 09:42:57 +08:00
|
|
|
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
2006-03-18 06:41:03 +08:00
|
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
|
|
} else {
|
|
|
|
/* We know it's 32-bit */
|
2006-03-18 09:33:56 +08:00
|
|
|
unsigned long task_size = STACK_TOP32;
|
2006-03-18 06:41:03 +08:00
|
|
|
|
|
|
|
if (gap < 128 * 1024 * 1024)
|
|
|
|
gap = 128 * 1024 * 1024;
|
|
|
|
if (gap > (task_size / 6 * 5))
|
|
|
|
gap = (task_size / 6 * 5);
|
|
|
|
|
2006-03-18 09:42:57 +08:00
|
|
|
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
|
2006-03-18 06:41:03 +08:00
|
|
|
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* sys_pipe() is the normal C calling standard for creating
|
|
|
|
* a pipe. It's not the way unix traditionally does this, though.
|
|
|
|
*/
|
2009-01-20 13:11:27 +08:00
|
|
|
SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int fd[2];
|
|
|
|
int error;
|
|
|
|
|
2008-07-24 12:29:30 +08:00
|
|
|
error = do_pipe_flags(fd, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
regs->u_regs[UREG_I1] = fd[1];
|
|
|
|
error = fd[0];
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
|
|
|
|
*
|
|
|
|
* This is really horribly ugly.
|
|
|
|
*/
|
|
|
|
|
2010-03-11 07:21:18 +08:00
|
|
|
SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
|
2009-01-20 13:11:27 +08:00
|
|
|
unsigned long, third, void __user *, ptr, long, fifth)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-10-10 11:56:31 +08:00
|
|
|
long err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* No need for backward compatibility. We can start fresh... */
|
2015-03-03 14:16:55 +08:00
|
|
|
if (call <= SEMTIMEDOP) {
|
2005-04-17 06:20:36 +08:00
|
|
|
switch (call) {
|
|
|
|
case SEMOP:
|
|
|
|
err = sys_semtimedop(first, ptr,
|
|
|
|
(unsigned)second, NULL);
|
|
|
|
goto out;
|
|
|
|
case SEMTIMEDOP:
|
|
|
|
err = sys_semtimedop(first, ptr, (unsigned)second,
|
2008-03-26 16:43:29 +08:00
|
|
|
(const struct timespec __user *)
|
|
|
|
(unsigned long) fifth);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
case SEMGET:
|
|
|
|
err = sys_semget(first, (int)second, (int)third);
|
|
|
|
goto out;
|
|
|
|
case SEMCTL: {
|
2008-04-25 17:12:05 +08:00
|
|
|
err = sys_semctl(first, second,
|
|
|
|
(int)third | IPC_64,
|
2013-03-06 04:04:55 +08:00
|
|
|
(unsigned long) ptr);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
err = -ENOSYS;
|
|
|
|
goto out;
|
2011-06-03 22:45:23 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
if (call <= MSGCTL) {
|
|
|
|
switch (call) {
|
|
|
|
case MSGSND:
|
|
|
|
err = sys_msgsnd(first, ptr, (size_t)second,
|
|
|
|
(int)third);
|
|
|
|
goto out;
|
|
|
|
case MSGRCV:
|
|
|
|
err = sys_msgrcv(first, ptr, (size_t)second, fifth,
|
|
|
|
(int)third);
|
|
|
|
goto out;
|
|
|
|
case MSGGET:
|
|
|
|
err = sys_msgget((key_t)first, (int)second);
|
|
|
|
goto out;
|
|
|
|
case MSGCTL:
|
|
|
|
err = sys_msgctl(first, (int)second | IPC_64, ptr);
|
|
|
|
goto out;
|
|
|
|
default:
|
|
|
|
err = -ENOSYS;
|
|
|
|
goto out;
|
2011-06-03 22:45:23 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
if (call <= SHMCTL) {
|
|
|
|
switch (call) {
|
|
|
|
case SHMAT: {
|
|
|
|
ulong raddr;
|
2012-07-31 05:42:38 +08:00
|
|
|
err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!err) {
|
|
|
|
if (put_user(raddr,
|
|
|
|
(ulong __user *) third))
|
|
|
|
err = -EFAULT;
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
case SHMDT:
|
|
|
|
err = sys_shmdt(ptr);
|
|
|
|
goto out;
|
|
|
|
case SHMGET:
|
|
|
|
err = sys_shmget(first, (size_t)second, (int)third);
|
|
|
|
goto out;
|
|
|
|
case SHMCTL:
|
|
|
|
err = sys_shmctl(first, (int)second | IPC_64, ptr);
|
|
|
|
goto out;
|
|
|
|
default:
|
|
|
|
err = -ENOSYS;
|
|
|
|
goto out;
|
2011-06-03 22:45:23 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
err = -ENOSYS;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-01-20 13:11:27 +08:00
|
|
|
SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2012-08-02 03:10:51 +08:00
|
|
|
if (personality(current->personality) == PER_LINUX32 &&
|
|
|
|
personality(personality) == PER_LINUX)
|
|
|
|
personality |= PER_LINUX32;
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = sys_personality(personality);
|
2012-08-02 03:10:51 +08:00
|
|
|
if (personality(ret) == PER_LINUX32)
|
|
|
|
ret &= ~PER_LINUX32;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
sparc: Merge asm-sparc{,64}/mman.h
Renaming the function sparc64_mmap_check() to
sparc_mmap_check() was enough to make the two
header files identical.
:$ diff -u include/asm-sparc/mman.h include/asm-sparc64/mman.h
:-- include/asm-sparc/mman.h 2008-06-13 06:46:39.000000000 +0200
:++ include/asm-sparc64/mman.h 2008-06-13 06:46:39.000000000 +0200
:@@ -1,5 +1,5 @@
:-#ifndef __SPARC_MMAN_H__
:-#define __SPARC_MMAN_H__
:+#ifndef __SPARC64_MMAN_H__
:+#define __SPARC64_MMAN_H__
:
: #include <asm-generic/mman.h>
:
:@@ -23,9 +23,9 @@
:
: #ifdef __KERNEL__
: #ifndef __ASSEMBLY__
:-#define arch_mmap_check(addr,len,flags) sparc_mmap_check(addr,len)
:-int sparc_mmap_check(unsigned long addr, unsigned long len);
:+#define arch_mmap_check(addr,len,flags) sparc64_mmap_check(addr,len)
:+int sparc64_mmap_check(unsigned long addr, unsigned long len);
: #endif
: #endif
:
:-#endif /* __SPARC_MMAN_H__ */
:+#endif /* __SPARC64_MMAN_H__ */
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
2008-06-07 02:51:20 +08:00
|
|
|
int sparc_mmap_check(unsigned long addr, unsigned long len)
|
2006-09-07 18:17:04 +08:00
|
|
|
{
|
|
|
|
if (test_thread_flag(TIF_32BIT)) {
|
|
|
|
if (len >= STACK_TOP32)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-05-07 17:24:28 +08:00
|
|
|
if (addr > STACK_TOP32 - len)
|
2006-09-07 18:17:04 +08:00
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
if (len >= VA_EXCLUDE_START)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-05-07 17:24:28 +08:00
|
|
|
if (invalid_64bit_range(addr, len))
|
2006-09-07 18:17:04 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Linux version of mmap */
|
2009-01-20 13:11:27 +08:00
|
|
|
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
|
|
|
unsigned long, prot, unsigned long, flags, unsigned long, fd,
|
|
|
|
unsigned long, off)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-12-01 06:37:04 +08:00
|
|
|
unsigned long retval = -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-12-01 06:37:04 +08:00
|
|
|
if ((off + PAGE_ALIGN(len)) < off)
|
|
|
|
goto out;
|
|
|
|
if (off & ~PAGE_MASK)
|
|
|
|
goto out;
|
|
|
|
retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2009-01-20 13:11:27 +08:00
|
|
|
SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-03-03 10:12:27 +08:00
|
|
|
if (invalid_64bit_range(addr, len))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2006-03-03 10:12:27 +08:00
|
|
|
|
2012-04-21 09:57:04 +08:00
|
|
|
return vm_munmap(addr, len);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-01-20 13:11:27 +08:00
|
|
|
SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
|
|
|
|
unsigned long, new_len, unsigned long, flags,
|
|
|
|
unsigned long, new_addr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (test_thread_flag(TIF_32BIT))
|
2012-05-30 14:12:40 +08:00
|
|
|
return -EINVAL;
|
|
|
|
return sys_mremap(addr, old_len, new_len, flags, new_addr);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* we come to here via sys_nis_syscall so it can setup the regs argument */
|
|
|
|
asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
static int count;
|
|
|
|
|
|
|
|
/* Don't make the system unusable, if someone goes stuck */
|
|
|
|
if (count++ > 5)
|
|
|
|
return -ENOSYS;
|
|
|
|
|
|
|
|
printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
|
|
|
|
#ifdef DEBUG_UNIMP_SYSCALL
|
|
|
|
show_regs (regs);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* #define DEBUG_SPARC_BREAKPOINT */
|
|
|
|
|
|
|
|
asmlinkage void sparc_breakpoint(struct pt_regs *regs)
|
|
|
|
{
|
2013-09-14 20:02:11 +08:00
|
|
|
enum ctx_state prev_state = exception_enter();
|
2005-04-17 06:20:36 +08:00
|
|
|
siginfo_t info;
|
|
|
|
|
|
|
|
if (test_thread_flag(TIF_32BIT)) {
|
|
|
|
regs->tpc &= 0xffffffff;
|
|
|
|
regs->tnpc &= 0xffffffff;
|
|
|
|
}
|
|
|
|
#ifdef DEBUG_SPARC_BREAKPOINT
|
|
|
|
printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
|
|
|
|
#endif
|
|
|
|
info.si_signo = SIGTRAP;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = TRAP_BRKPT;
|
|
|
|
info.si_addr = (void __user *)regs->tpc;
|
|
|
|
info.si_trapno = 0;
|
|
|
|
force_sig_info(SIGTRAP, &info, current);
|
|
|
|
#ifdef DEBUG_SPARC_BREAKPOINT
|
|
|
|
printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
|
|
|
|
#endif
|
2013-09-14 20:02:11 +08:00
|
|
|
exception_exit(prev_state);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extern void check_pending(int signum);
|
|
|
|
|
2009-01-20 13:11:27 +08:00
|
|
|
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-07-22 05:12:39 +08:00
|
|
|
int nlen, err;
|
|
|
|
|
2006-09-18 22:11:36 +08:00
|
|
|
if (len < 0)
|
2006-07-22 05:12:39 +08:00
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
down_read(&uts_sem);
|
|
|
|
|
2006-10-02 17:18:11 +08:00
|
|
|
nlen = strlen(utsname()->domainname) + 1;
|
2006-09-18 22:11:36 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
if (nlen > len)
|
|
|
|
goto out;
|
2006-07-22 05:12:39 +08:00
|
|
|
|
|
|
|
err = -EFAULT;
|
2006-10-02 17:18:11 +08:00
|
|
|
if (!copy_to_user(name, utsname()->domainname, nlen))
|
2006-07-22 05:12:39 +08:00
|
|
|
err = 0;
|
|
|
|
|
2006-09-18 22:11:36 +08:00
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
up_read(&uts_sem);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-01-20 13:11:27 +08:00
|
|
|
SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
|
|
|
|
utrap_handler_t, new_p, utrap_handler_t, new_d,
|
|
|
|
utrap_handler_t __user *, old_p,
|
|
|
|
utrap_handler_t __user *, old_d)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
|
|
|
|
return -EINVAL;
|
|
|
|
if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
|
|
|
|
if (old_p) {
|
|
|
|
if (!current_thread_info()->utraps) {
|
|
|
|
if (put_user(NULL, old_p))
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
|
|
|
if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (old_d) {
|
|
|
|
if (put_user(NULL, old_d))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (!current_thread_info()->utraps) {
|
|
|
|
current_thread_info()->utraps =
|
2006-03-07 05:48:40 +08:00
|
|
|
kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!current_thread_info()->utraps)
|
|
|
|
return -ENOMEM;
|
|
|
|
current_thread_info()->utraps[0] = 1;
|
|
|
|
} else {
|
|
|
|
if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
|
|
|
|
current_thread_info()->utraps[0] > 1) {
|
2008-03-26 16:43:29 +08:00
|
|
|
unsigned long *p = current_thread_info()->utraps;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
current_thread_info()->utraps =
|
|
|
|
kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!current_thread_info()->utraps) {
|
|
|
|
current_thread_info()->utraps = p;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
p[0]--;
|
|
|
|
current_thread_info()->utraps[0] = 1;
|
|
|
|
memcpy(current_thread_info()->utraps+1, p+1,
|
|
|
|
UT_TRAP_INSTRUCTION_31*sizeof(long));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (old_p) {
|
|
|
|
if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
if (old_d) {
|
|
|
|
if (put_user(NULL, old_d))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
current_thread_info()->utraps[type] = (long)new_p;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-26 16:43:29 +08:00
|
|
|
asmlinkage long sparc_memory_ordering(unsigned long model,
|
|
|
|
struct pt_regs *regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (model >= 3)
|
|
|
|
return -EINVAL;
|
|
|
|
regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-01-20 13:11:27 +08:00
|
|
|
SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
|
|
|
|
struct sigaction __user *, oact, void __user *, restorer,
|
|
|
|
size_t, sigsetsize)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct k_sigaction new_ka, old_ka;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* XXX: Don't preclude handling different sized sigset_t's. */
|
|
|
|
if (sigsetsize != sizeof(sigset_t))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (act) {
|
|
|
|
new_ka.ka_restorer = restorer;
|
|
|
|
if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
|
|
|
|
|
|
|
|
if (!ret && oact) {
|
|
|
|
if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision
Multiply instructions work by loading a combination of the floating
point and multiple register windows worth of integer registers
with the inputs.
These values are 64-bit. But for 32-bit userland processes we only
save the low 32-bits of each integer register during a register spill.
This is because the register window save area is in the user stack and
has a fixed layout.
Therefore, the only way to use these instruction in 32-bit mode is to
perform the following sequence:
1) Load the top-32bits of a choosen integer register with a sentinel,
say "-1". This will be in the outer-most register window.
The idea is that we're trying to see if the outer-most register
window gets spilled, and thus the 64-bit values were truncated.
2) Load all the inputs for the montmul/montsqr/mpmul instruction,
down to the inner-most register window.
3) Execute the opcode.
4) Traverse back up to the outer-most register window.
5) Check the sentinel, if it's still "-1" store the results.
Otherwise retry the entire sequence.
This retry is extremely troublesome. If you're just unlucky and an
interrupt or other trap happens, it'll push that outer-most window to
the stack and clear the sentinel when we restore it.
We could retry forever and never make forward progress if interrupts
arrive at a fast enough rate (consider perf events as one example).
So we have do limited retries and fallback to software which is
extremely non-deterministic.
Luckily it's very straightforward to provide a mechanism to let
32-bit applications use a 64-bit stack. Stacks in 64-bit mode are
biased by 2047 bytes, which means that the lowest bit is set in the
actual %sp register value.
So if we see bit zero set in a 32-bit application's stack we treat
it like a 64-bit stack.
Runtime detection of such a facility is tricky, and cumbersome at
best. For example, just trying to use a biased stack and seeing if it
works is hard to recover from (the signal handler will need to use an
alt stack, plus something along the lines of longjmp). Therefore, we
add a system call to report a bitmask of arch specific features like
this in a cheap and less hairy way.
With help from Andy Polyakov.
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-27 06:18:37 +08:00
|
|
|
asmlinkage long sys_kern_features(void)
|
|
|
|
{
|
|
|
|
return KERN_FEATURE_MIXED_MODE_STACK;
|
|
|
|
}
|