arm64: Use generic kernel_execve() implementation

This patch enables CONFIG_GENERIC_KERNEL_EXECVE on arm64 and removes the
arm64-specific implementation of kernel_execve().

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Catalin Marinas 2012-09-10 16:11:46 +01:00
parent c34501d21b
commit 59dc67b0cc
3 changed files with 2 additions and 44 deletions

View File

@ -6,6 +6,7 @@ config ARM64
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_KERNEL_EXECVE
select GENERIC_KERNEL_THREAD
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL

View File

@ -594,7 +594,7 @@ work_resched:
/*
* "slow" syscall return path.
*/
ENTRY(ret_to_user)
ret_to_user:
disable_irq // disable interrupts
ldr x1, [tsk, #TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK

View File

@ -62,49 +62,6 @@ out:
return error;
}
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
struct pt_regs regs;
int ret;
memset(&regs, 0, sizeof(struct pt_regs));
ret = do_execve(filename,
(const char __user *const __user *)argv,
(const char __user *const __user *)envp, &regs);
if (ret < 0)
goto out;
/*
* Save argc to the register structure for userspace.
*/
regs.regs[0] = ret;
/*
* We were successful. We won't be returning to our caller, but
* instead to user space by manipulating the kernel stack.
*/
asm( "add x0, %0, %1\n\t"
"mov x1, %2\n\t"
"mov x2, %3\n\t"
"bl memmove\n\t" /* copy regs to top of stack */
"mov x27, #0\n\t" /* not a syscall */
"mov x28, %0\n\t" /* thread structure */
"mov sp, x0\n\t" /* reposition stack pointer */
"b ret_to_user"
:
: "r" (current_thread_info()),
"Ir" (THREAD_START_SP - sizeof(regs)),
"r" (&regs),
"Ir" (sizeof(regs))
: "x0", "x1", "x2", "x27", "x28", "x30", "memory");
out:
return ret;
}
EXPORT_SYMBOL(kernel_execve);
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, off_t off)