Merge tag 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
Pull UML updates from Richard Weinberger: - hostfs saw a face lifting - old/broken stuff was removed (SMP, HIGHMEM, SKAS3/4) - random cleanups and bug fixes * tag 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml: (26 commits) um: Print minimum physical memory requirement um: Move uml_postsetup in the init_thread stack um: add a kmsg_dumper x86, UML: fix integer overflow in ELF_ET_DYN_BASE um: hostfs: Reduce number of syscalls in readdir um: Remove broken highmem support um: Remove broken SMP support um: Remove SKAS3/4 support um: Remove ppc cruft um: Remove ia64 cruft um: Remove dead code from stacktrace hostfs: No need to box and later unbox the file mode hostfs: Use page_offset() hostfs: Set page flags in hostfs_readpage() correctly hostfs: Remove superfluous initializations in hostfs_open() hostfs: hostfs_open: Reset open flags upon each retry hostfs: Remove superfluous test in hostfs_open() hostfs: Report append flag in ->show_options() hostfs: Use __getname() in follow_link hostfs: Remove open coded strcpy() ...
This commit is contained in:
commit
e44740c1a9
|
@ -95,48 +95,6 @@ config MAGIC_SYSRQ
|
|||
The keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
|
||||
unless you really know what this hack does.
|
||||
|
||||
config SMP
|
||||
bool "Symmetric multi-processing support"
|
||||
default n
|
||||
depends on BROKEN
|
||||
help
|
||||
This option enables UML SMP support.
|
||||
It is NOT related to having a real SMP box. Not directly, at least.
|
||||
|
||||
UML implements virtual SMP by allowing as many processes to run
|
||||
simultaneously on the host as there are virtual processors configured.
|
||||
|
||||
Obviously, if the host is a uniprocessor, those processes will
|
||||
timeshare, but, inside UML, will appear to be running simultaneously.
|
||||
If the host is a multiprocessor, then UML processes may run
|
||||
simultaneously, depending on the host scheduler.
|
||||
|
||||
This, however, is supported only in TT mode. So, if you use the SKAS
|
||||
patch on your host, switching to TT mode and enabling SMP usually
|
||||
gives you worse performances.
|
||||
Also, since the support for SMP has been under-developed, there could
|
||||
be some bugs being exposed by enabling SMP.
|
||||
|
||||
If you don't know what to do, say N.
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-32)"
|
||||
range 2 32
|
||||
depends on SMP
|
||||
default "32"
|
||||
|
||||
config HIGHMEM
|
||||
bool "Highmem support"
|
||||
depends on !64BIT && BROKEN
|
||||
default n
|
||||
help
|
||||
This was used to allow UML to run with big amounts of memory.
|
||||
Currently it is unstable, so if unsure say N.
|
||||
|
||||
To use big amounts of memory, it is recommended enable static
|
||||
linking (i.e. CONFIG_STATIC_LINK) - this should allow the
|
||||
guest to use up to 2.75G of memory.
|
||||
|
||||
config KERNEL_STACK_ORDER
|
||||
int "Kernel stack size order"
|
||||
default 1 if 64BIT
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
START_ADDR = 0x1000000000000000
|
|
@ -1,9 +0,0 @@
|
|||
ifeq ($(CONFIG_HOST_2G_2G), y)
|
||||
START_ADDR = 0x80000000
|
||||
else
|
||||
START_ADDR = 0xc0000000
|
||||
endif
|
||||
ARCH_CFLAGS = -U__powerpc__ -D__UM_PPC__
|
||||
|
||||
# The arch is ppc, but the elf32 name is powerpc
|
||||
ELF_SUBARCH = powerpc
|
|
@ -33,10 +33,6 @@
|
|||
* fix-mapped?
|
||||
*/
|
||||
enum fixed_addresses {
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||
#endif
|
||||
__end_of_fixed_addresses
|
||||
};
|
||||
|
||||
|
|
|
@ -47,11 +47,7 @@ extern unsigned long end_iomem;
|
|||
#define VMALLOC_OFFSET (__va_space)
|
||||
#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
|
||||
#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
|
||||
#else
|
||||
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
|
||||
#endif
|
||||
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
|
||||
#define MODULES_VADDR VMALLOC_START
|
||||
#define MODULES_END VMALLOC_END
|
||||
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
|
||||
|
|
|
@ -98,16 +98,8 @@ struct cpuinfo_um {
|
|||
|
||||
extern struct cpuinfo_um boot_cpu_data;
|
||||
|
||||
#define my_cpu_data cpu_data[smp_processor_id()]
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern struct cpuinfo_um cpu_data[];
|
||||
#define current_cpu_data cpu_data[smp_processor_id()]
|
||||
#else
|
||||
#define cpu_data (&boot_cpu_data)
|
||||
#define current_cpu_data boot_cpu_data
|
||||
#endif
|
||||
|
||||
|
||||
#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
|
||||
extern unsigned long get_wchan(struct task_struct *p);
|
||||
|
|
|
@ -1,32 +1,6 @@
|
|||
#ifndef __UM_SMP_H
|
||||
#define __UM_SMP_H
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/current.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#define raw_smp_processor_id() (current_thread->cpu)
|
||||
|
||||
#define cpu_logical_map(n) (n)
|
||||
#define cpu_number_map(n) (n)
|
||||
extern int hard_smp_processor_id(void);
|
||||
#define NO_PROC_ID -1
|
||||
|
||||
extern int ncpus;
|
||||
|
||||
|
||||
static inline void smp_cpus_done(unsigned int maxcpus)
|
||||
{
|
||||
}
|
||||
|
||||
extern struct task_struct *idle_threads[NR_CPUS];
|
||||
|
||||
#else
|
||||
|
||||
#define hard_smp_processor_id() 0
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -56,6 +56,7 @@ extern unsigned long brk_start;
|
|||
extern unsigned long host_task_size;
|
||||
|
||||
extern int linux_main(int argc, char **argv);
|
||||
extern void uml_finishsetup(void);
|
||||
|
||||
struct siginfo;
|
||||
extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *);
|
||||
|
|
|
@ -174,7 +174,6 @@ extern unsigned long long os_makedev(unsigned major, unsigned minor);
|
|||
|
||||
/* start_up.c */
|
||||
extern void os_early_checks(void);
|
||||
extern void can_do_skas(void);
|
||||
extern void os_check_bugs(void);
|
||||
extern void check_host_supports_tls(int *supports_tls, int *tls_min);
|
||||
|
||||
|
@ -187,7 +186,6 @@ extern int os_process_parent(int pid);
|
|||
extern void os_stop_process(int pid);
|
||||
extern void os_kill_process(int pid, int reap_child);
|
||||
extern void os_kill_ptraced_process(int pid, int reap_child);
|
||||
extern long os_ptrace_ldt(long pid, long addr, long data);
|
||||
|
||||
extern int os_getpid(void);
|
||||
extern int os_getpgrp(void);
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SKAS_PROC_MM_H
|
||||
#define __SKAS_PROC_MM_H
|
||||
|
||||
#define MM_MMAP 54
|
||||
#define MM_MUNMAP 55
|
||||
#define MM_MPROTECT 56
|
||||
#define MM_COPY_SEGMENTS 57
|
||||
|
||||
struct mm_mmap {
|
||||
unsigned long addr;
|
||||
unsigned long len;
|
||||
unsigned long prot;
|
||||
unsigned long flags;
|
||||
unsigned long fd;
|
||||
unsigned long offset;
|
||||
};
|
||||
|
||||
struct mm_munmap {
|
||||
unsigned long addr;
|
||||
unsigned long len;
|
||||
};
|
||||
|
||||
struct mm_mprotect {
|
||||
unsigned long addr;
|
||||
unsigned long len;
|
||||
unsigned int prot;
|
||||
};
|
||||
|
||||
struct proc_mm_op {
|
||||
int op;
|
||||
union {
|
||||
struct mm_mmap mmap;
|
||||
struct mm_munmap munmap;
|
||||
struct mm_mprotect mprotect;
|
||||
int copy_segments;
|
||||
} u;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -9,13 +9,10 @@
|
|||
#include <sysdep/ptrace.h>
|
||||
|
||||
extern int userspace_pid[];
|
||||
extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
|
||||
extern int skas_needs_stub;
|
||||
|
||||
extern int user_thread(unsigned long stack, int flags);
|
||||
extern void new_thread_handler(void);
|
||||
extern void handle_syscall(struct uml_pt_regs *regs);
|
||||
extern int new_mm(unsigned long stack);
|
||||
extern long execute_syscall_skas(void *r);
|
||||
extern unsigned long current_stub_stack(void);
|
||||
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SKAS_PTRACE_H
|
||||
#define __SKAS_PTRACE_H
|
||||
|
||||
#define PTRACE_FAULTINFO 52
|
||||
#define PTRACE_SWITCH_MM 55
|
||||
|
||||
#include <sysdep/skas_ptrace.h>
|
||||
|
||||
#endif
|
|
@ -12,8 +12,8 @@ clean-files :=
|
|||
|
||||
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
|
||||
physmem.o process.o ptrace.o reboot.o sigio.o \
|
||||
signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \
|
||||
um_arch.o umid.o maccess.o skas/
|
||||
signal.o syscall.o sysrq.o time.o tlb.o trap.o \
|
||||
um_arch.o umid.o maccess.o kmsg_dump.o skas/
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
|
||||
obj-$(CONFIG_GPROF) += gprof_syms.o
|
||||
|
|
|
@ -35,9 +35,6 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
|
|||
struct irq_fd *irq_fd;
|
||||
int n;
|
||||
|
||||
if (smp_sigio_handler())
|
||||
return;
|
||||
|
||||
while (1) {
|
||||
n = os_waiting_for_events(active_fds);
|
||||
if (n <= 0) {
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
#include <linux/kmsg_dump.h>
|
||||
#include <linux/console.h>
|
||||
#include <shared/init.h>
|
||||
#include <shared/kern.h>
|
||||
#include <os.h>
|
||||
|
||||
static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
|
||||
enum kmsg_dump_reason reason)
|
||||
{
|
||||
static char line[1024];
|
||||
|
||||
size_t len = 0;
|
||||
bool con_available = false;
|
||||
|
||||
/* only dump kmsg when no console is available */
|
||||
if (!console_trylock())
|
||||
return;
|
||||
|
||||
if (console_drivers != NULL)
|
||||
con_available = true;
|
||||
|
||||
console_unlock();
|
||||
|
||||
if (con_available == true)
|
||||
return;
|
||||
|
||||
printf("kmsg_dump:\n");
|
||||
while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len)) {
|
||||
line[len] = '\0';
|
||||
printf("%s", line);
|
||||
}
|
||||
}
|
||||
|
||||
static struct kmsg_dumper kmsg_dumper = {
|
||||
.dump = kmsg_dumper_stdout
|
||||
};
|
||||
|
||||
int __init kmsg_dumper_stdout_init(void)
|
||||
{
|
||||
return kmsg_dump_register(&kmsg_dumper);
|
||||
}
|
||||
|
||||
__uml_postsetup(kmsg_dumper_stdout_init);
|
|
@ -38,19 +38,6 @@ int kmalloc_ok = 0;
|
|||
/* Used during early boot */
|
||||
static unsigned long brk_end;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
static void setup_highmem(unsigned long highmem_start,
|
||||
unsigned long highmem_len)
|
||||
{
|
||||
unsigned long highmem_pfn;
|
||||
int i;
|
||||
|
||||
highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
|
||||
for (i = 0; i < highmem_len >> PAGE_SHIFT; i++)
|
||||
free_highmem_page(&mem_map[highmem_pfn + i]);
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
/* clear the zero-page */
|
||||
|
@ -67,9 +54,6 @@ void __init mem_init(void)
|
|||
/* this will put all low memory onto the freelists */
|
||||
free_all_bootmem();
|
||||
max_low_pfn = totalram_pages;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
setup_highmem(end_iomem, highmem);
|
||||
#endif
|
||||
max_pfn = totalram_pages;
|
||||
mem_init_print_info(NULL);
|
||||
kmalloc_ok = 1;
|
||||
|
@ -127,49 +111,6 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pte_t *kmap_pte;
|
||||
pgprot_t kmap_prot;
|
||||
|
||||
#define kmap_get_fixmap_pte(vaddr) \
|
||||
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
|
||||
(vaddr)), (vaddr))
|
||||
|
||||
static void __init kmap_init(void)
|
||||
{
|
||||
unsigned long kmap_vstart;
|
||||
|
||||
/* cache the first kmap pte */
|
||||
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
|
||||
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
|
||||
|
||||
kmap_prot = PAGE_KERNEL;
|
||||
}
|
||||
|
||||
static void __init init_highmem(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
unsigned long vaddr;
|
||||
|
||||
/*
|
||||
* Permanent kmaps:
|
||||
*/
|
||||
vaddr = PKMAP_BASE;
|
||||
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
|
||||
|
||||
pgd = swapper_pg_dir + pgd_index(vaddr);
|
||||
pud = pud_offset(pgd, vaddr);
|
||||
pmd = pmd_offset(pud, vaddr);
|
||||
pte = pte_offset_kernel(pmd, vaddr);
|
||||
pkmap_page_table = pte;
|
||||
|
||||
kmap_init();
|
||||
}
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
static void __init fixaddr_user_init( void)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
|
||||
|
@ -211,9 +152,6 @@ void __init paging_init(void)
|
|||
|
||||
zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
|
||||
(uml_physmem >> PAGE_SHIFT);
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
|
||||
#endif
|
||||
free_area_init(zones_size);
|
||||
|
||||
/*
|
||||
|
@ -224,10 +162,6 @@ void __init paging_init(void)
|
|||
fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
|
||||
|
||||
fixaddr_user_init();
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
init_highmem();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -57,22 +57,51 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
|
|||
|
||||
extern int __syscall_stub_start;
|
||||
|
||||
/**
|
||||
* setup_physmem() - Setup physical memory for UML
|
||||
* @start: Start address of the physical kernel memory,
|
||||
* i.e start address of the executable image.
|
||||
* @reserve_end: end address of the physical kernel memory.
|
||||
* @len: Length of total physical memory that should be mapped/made
|
||||
* available, in bytes.
|
||||
* @highmem: Number of highmem bytes that should be mapped/made available.
|
||||
*
|
||||
* Creates an unlinked temporary file of size (len + highmem) and memory maps
|
||||
* it on the last executable image address (uml_reserved).
|
||||
*
|
||||
* The offset is needed as the length of the total physical memory
|
||||
* (len + highmem) includes the size of the memory used be the executable image,
|
||||
* but the mapped-to address is the last address of the executable image
|
||||
* (uml_reserved == end address of executable image).
|
||||
*
|
||||
* The memory mapped memory of the temporary file is used as backing memory
|
||||
* of all user space processes/kernel tasks.
|
||||
*/
|
||||
void __init setup_physmem(unsigned long start, unsigned long reserve_end,
|
||||
unsigned long len, unsigned long long highmem)
|
||||
{
|
||||
unsigned long reserve = reserve_end - start;
|
||||
int pfn = PFN_UP(__pa(reserve_end));
|
||||
int delta = (len - reserve) >> PAGE_SHIFT;
|
||||
int err, offset, bootmap_size;
|
||||
unsigned long pfn = PFN_UP(__pa(reserve_end));
|
||||
unsigned long delta = (len - reserve) >> PAGE_SHIFT;
|
||||
unsigned long offset, bootmap_size;
|
||||
long map_size;
|
||||
int err;
|
||||
|
||||
offset = uml_reserved - uml_physmem;
|
||||
map_size = len - offset;
|
||||
if(map_size <= 0) {
|
||||
printf("Too few physical memory! Needed=%d, given=%d\n",
|
||||
offset, len);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
physmem_fd = create_mem_file(len + highmem);
|
||||
|
||||
offset = uml_reserved - uml_physmem;
|
||||
err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
|
||||
len - offset, 1, 1, 1);
|
||||
map_size, 1, 1, 1);
|
||||
if (err < 0) {
|
||||
printf("setup_physmem - mapping %ld bytes of memory at 0x%p "
|
||||
"failed - errno = %d\n", len - offset,
|
||||
"failed - errno = %d\n", map_size,
|
||||
(void *) uml_reserved, err);
|
||||
exit(1);
|
||||
}
|
||||
|
|
|
@ -259,17 +259,6 @@ int strlen_user_proc(char __user *str)
|
|||
return strlen_user(str);
|
||||
}
|
||||
|
||||
int smp_sigio_handler(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu = current_thread_info()->cpu;
|
||||
IPI_handler(cpu);
|
||||
if (cpu != 0)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpu(void)
|
||||
{
|
||||
return current_thread_info()->cpu;
|
||||
|
|
|
@ -8,9 +8,6 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <skas_ptrace.h>
|
||||
|
||||
|
||||
|
||||
void user_enable_single_step(struct task_struct *child)
|
||||
{
|
||||
|
@ -104,35 +101,6 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
ret = ptrace_set_thread_area(child, addr, vp);
|
||||
break;
|
||||
|
||||
case PTRACE_FAULTINFO: {
|
||||
/*
|
||||
* Take the info from thread->arch->faultinfo,
|
||||
* but transfer max. sizeof(struct ptrace_faultinfo).
|
||||
* On i386, ptrace_faultinfo is smaller!
|
||||
*/
|
||||
ret = copy_to_user(p, &child->thread.arch.faultinfo,
|
||||
sizeof(struct ptrace_faultinfo)) ?
|
||||
-EIO : 0;
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef PTRACE_LDT
|
||||
case PTRACE_LDT: {
|
||||
struct ptrace_ldt ldt;
|
||||
|
||||
if (copy_from_user(&ldt, p, sizeof(ldt))) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* This one is confusing, so just punt and return -EIO for
|
||||
* now
|
||||
*/
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
if (ret == -EIO)
|
||||
|
|
|
@ -15,28 +15,21 @@ void (*pm_power_off)(void);
|
|||
|
||||
static void kill_off_processes(void)
|
||||
{
|
||||
if (proc_mm)
|
||||
/*
|
||||
* FIXME: need to loop over userspace_pids
|
||||
*/
|
||||
os_kill_ptraced_process(userspace_pid[0], 1);
|
||||
else {
|
||||
struct task_struct *p;
|
||||
int pid;
|
||||
struct task_struct *p;
|
||||
int pid;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process(p) {
|
||||
struct task_struct *t;
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process(p) {
|
||||
struct task_struct *t;
|
||||
|
||||
t = find_lock_task_mm(p);
|
||||
if (!t)
|
||||
continue;
|
||||
pid = t->mm->context.id.u.pid;
|
||||
task_unlock(t);
|
||||
os_kill_ptraced_process(pid, 1);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
t = find_lock_task_mm(p);
|
||||
if (!t)
|
||||
continue;
|
||||
pid = t->mm->context.id.u.pid;
|
||||
task_unlock(t);
|
||||
os_kill_ptraced_process(pid, 1);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
void uml_cleanup(void)
|
||||
|
|
|
@ -54,35 +54,22 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
|
|||
unsigned long stack = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (skas_needs_stub) {
|
||||
stack = get_zeroed_page(GFP_KERNEL);
|
||||
if (stack == 0)
|
||||
goto out;
|
||||
}
|
||||
stack = get_zeroed_page(GFP_KERNEL);
|
||||
if (stack == 0)
|
||||
goto out;
|
||||
|
||||
to_mm->id.stack = stack;
|
||||
if (current->mm != NULL && current->mm != &init_mm)
|
||||
from_mm = ¤t->mm->context;
|
||||
|
||||
if (proc_mm) {
|
||||
ret = new_mm(stack);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "init_new_context_skas - "
|
||||
"new_mm failed, errno = %d\n", ret);
|
||||
goto out_free;
|
||||
}
|
||||
to_mm->id.u.mm_fd = ret;
|
||||
}
|
||||
else {
|
||||
if (from_mm)
|
||||
to_mm->id.u.pid = copy_context_skas0(stack,
|
||||
from_mm->id.u.pid);
|
||||
else to_mm->id.u.pid = start_userspace(stack);
|
||||
if (from_mm)
|
||||
to_mm->id.u.pid = copy_context_skas0(stack,
|
||||
from_mm->id.u.pid);
|
||||
else to_mm->id.u.pid = start_userspace(stack);
|
||||
|
||||
if (to_mm->id.u.pid < 0) {
|
||||
ret = to_mm->id.u.pid;
|
||||
goto out_free;
|
||||
}
|
||||
if (to_mm->id.u.pid < 0) {
|
||||
ret = to_mm->id.u.pid;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = init_new_ldt(to_mm, from_mm);
|
||||
|
@ -105,9 +92,6 @@ void uml_setup_stubs(struct mm_struct *mm)
|
|||
{
|
||||
int err, ret;
|
||||
|
||||
if (!skas_needs_stub)
|
||||
return;
|
||||
|
||||
ret = init_stub_pte(mm, STUB_CODE,
|
||||
(unsigned long) &__syscall_stub_start);
|
||||
if (ret)
|
||||
|
@ -154,25 +138,19 @@ void destroy_context(struct mm_struct *mm)
|
|||
{
|
||||
struct mm_context *mmu = &mm->context;
|
||||
|
||||
if (proc_mm)
|
||||
os_close_file(mmu->id.u.mm_fd);
|
||||
else {
|
||||
/*
|
||||
* If init_new_context wasn't called, this will be
|
||||
* zero, resulting in a kill(0), which will result in the
|
||||
* whole UML suddenly dying. Also, cover negative and
|
||||
* 1 cases, since they shouldn't happen either.
|
||||
*/
|
||||
if (mmu->id.u.pid < 2) {
|
||||
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
|
||||
mmu->id.u.pid);
|
||||
return;
|
||||
}
|
||||
os_kill_ptraced_process(mmu->id.u.pid, 1);
|
||||
/*
|
||||
* If init_new_context wasn't called, this will be
|
||||
* zero, resulting in a kill(0), which will result in the
|
||||
* whole UML suddenly dying. Also, cover negative and
|
||||
* 1 cases, since they shouldn't happen either.
|
||||
*/
|
||||
if (mmu->id.u.pid < 2) {
|
||||
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
|
||||
mmu->id.u.pid);
|
||||
return;
|
||||
}
|
||||
os_kill_ptraced_process(mmu->id.u.pid, 1);
|
||||
|
||||
if (skas_needs_stub)
|
||||
free_page(mmu->id.stack);
|
||||
|
||||
free_page(mmu->id.stack);
|
||||
free_ldt(mmu);
|
||||
}
|
||||
|
|
|
@ -10,25 +10,6 @@
|
|||
#include <os.h>
|
||||
#include <skas.h>
|
||||
|
||||
int new_mm(unsigned long stack)
|
||||
{
|
||||
int fd, err;
|
||||
|
||||
fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
if (skas_needs_stub) {
|
||||
err = map_stub_pages(fd, STUB_CODE, STUB_DATA, stack);
|
||||
if (err) {
|
||||
os_close_file(fd);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
extern void start_kernel(void);
|
||||
|
||||
static int __init start_kernel_proc(void *unused)
|
||||
|
@ -40,9 +21,7 @@ static int __init start_kernel_proc(void *unused)
|
|||
|
||||
cpu_tasks[0].pid = pid;
|
||||
cpu_tasks[0].task = current;
|
||||
#ifdef CONFIG_SMP
|
||||
init_cpu_online(get_cpu_mask(0));
|
||||
#endif
|
||||
|
||||
start_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
@ -55,14 +34,6 @@ int __init start_uml(void)
|
|||
{
|
||||
stack_protections((unsigned long) &cpu0_irqstack);
|
||||
set_sigstack(cpu0_irqstack, THREAD_SIZE);
|
||||
if (proc_mm) {
|
||||
userspace_pid[0] = start_userspace(0);
|
||||
if (userspace_pid[0] < 0) {
|
||||
printf("start_uml - start_userspace returned %d\n",
|
||||
userspace_pid[0]);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
init_new_thread_signals();
|
||||
|
||||
|
|
|
@ -1,238 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/spinlock.h>
|
||||
#include <kern.h>
|
||||
#include <irq_user.h>
|
||||
#include <os.h>
|
||||
|
||||
/* Per CPU bogomips and other parameters
|
||||
* The only piece used here is the ipi pipe, which is set before SMP is
|
||||
* started and never changed.
|
||||
*/
|
||||
struct cpuinfo_um cpu_data[NR_CPUS];
|
||||
|
||||
/* A statistic, can be a little off */
|
||||
int num_reschedules_sent = 0;
|
||||
|
||||
/* Not changed after boot */
|
||||
struct task_struct *idle_threads[NR_CPUS];
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
os_write_file(cpu_data[cpu].ipi_pipe[1], "R", 1);
|
||||
num_reschedules_sent++;
|
||||
}
|
||||
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
printk(KERN_INFO "Stopping all CPUs...");
|
||||
for (i = 0; i < num_online_cpus(); i++) {
|
||||
if (i == current_thread->cpu)
|
||||
continue;
|
||||
os_write_file(cpu_data[i].ipi_pipe[1], "S", 1);
|
||||
}
|
||||
printk(KERN_CONT "done\n");
|
||||
}
|
||||
|
||||
static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
|
||||
static cpumask_t cpu_callin_map = CPU_MASK_NONE;
|
||||
|
||||
static int idle_proc(void *cpup)
|
||||
{
|
||||
int cpu = (int) cpup, err;
|
||||
|
||||
err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
|
||||
if (err < 0)
|
||||
panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err);
|
||||
|
||||
os_set_fd_async(cpu_data[cpu].ipi_pipe[0]);
|
||||
|
||||
wmb();
|
||||
if (cpu_test_and_set(cpu, cpu_callin_map)) {
|
||||
printk(KERN_ERR "huh, CPU#%d already present??\n", cpu);
|
||||
BUG();
|
||||
}
|
||||
|
||||
while (!cpu_isset(cpu, smp_commenced_mask))
|
||||
cpu_relax();
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
set_cpu_online(cpu, true);
|
||||
default_idle();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct task_struct *idle_thread(int cpu)
|
||||
{
|
||||
struct task_struct *new_task;
|
||||
|
||||
current->thread.request.u.thread.proc = idle_proc;
|
||||
current->thread.request.u.thread.arg = (void *) cpu;
|
||||
new_task = fork_idle(cpu);
|
||||
if (IS_ERR(new_task))
|
||||
panic("copy_process failed in idle_thread, error = %ld",
|
||||
PTR_ERR(new_task));
|
||||
|
||||
cpu_tasks[cpu] = ((struct cpu_task)
|
||||
{ .pid = new_task->thread.mode.tt.extern_pid,
|
||||
.task = new_task } );
|
||||
idle_threads[cpu] = new_task;
|
||||
panic("skas mode doesn't support SMP");
|
||||
return new_task;
|
||||
}
|
||||
|
||||
void smp_prepare_cpus(unsigned int maxcpus)
|
||||
{
|
||||
struct task_struct *idle;
|
||||
unsigned long waittime;
|
||||
int err, cpu, me = smp_processor_id();
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ncpus; ++i)
|
||||
set_cpu_possible(i, true);
|
||||
|
||||
set_cpu_online(me, true);
|
||||
cpu_set(me, cpu_callin_map);
|
||||
|
||||
err = os_pipe(cpu_data[me].ipi_pipe, 1, 1);
|
||||
if (err < 0)
|
||||
panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
|
||||
|
||||
os_set_fd_async(cpu_data[me].ipi_pipe[0]);
|
||||
|
||||
for (cpu = 1; cpu < ncpus; cpu++) {
|
||||
printk(KERN_INFO "Booting processor %d...\n", cpu);
|
||||
|
||||
idle = idle_thread(cpu);
|
||||
|
||||
init_idle(idle, cpu);
|
||||
|
||||
waittime = 200000000;
|
||||
while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
|
||||
cpu_relax();
|
||||
|
||||
printk(KERN_INFO "%s\n",
|
||||
cpu_isset(cpu, cpu_calling_map) ? "done" : "failed");
|
||||
}
|
||||
}
|
||||
|
||||
void smp_prepare_boot_cpu(void)
|
||||
{
|
||||
set_cpu_online(smp_processor_id(), true);
|
||||
}
|
||||
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
cpu_set(cpu, smp_commenced_mask);
|
||||
while (!cpu_online(cpu))
|
||||
mb();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int setup_profiling_timer(unsigned int multiplier)
|
||||
{
|
||||
printk(KERN_INFO "setup_profiling_timer\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void smp_call_function_slave(int cpu);
|
||||
|
||||
void IPI_handler(int cpu)
|
||||
{
|
||||
unsigned char c;
|
||||
int fd;
|
||||
|
||||
fd = cpu_data[cpu].ipi_pipe[0];
|
||||
while (os_read_file(fd, &c, 1) == 1) {
|
||||
switch (c) {
|
||||
case 'C':
|
||||
smp_call_function_slave(cpu);
|
||||
break;
|
||||
|
||||
case 'R':
|
||||
scheduler_ipi();
|
||||
break;
|
||||
|
||||
case 'S':
|
||||
printk(KERN_INFO "CPU#%d stopping\n", cpu);
|
||||
while (1)
|
||||
pause();
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR "CPU#%d received unknown IPI [%c]!\n",
|
||||
cpu, c);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int hard_smp_processor_id(void)
|
||||
{
|
||||
return pid_to_processor_id(os_getpid());
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(call_lock);
|
||||
static atomic_t scf_started;
|
||||
static atomic_t scf_finished;
|
||||
static void (*func)(void *info);
|
||||
static void *info;
|
||||
|
||||
void smp_call_function_slave(int cpu)
|
||||
{
|
||||
atomic_inc(&scf_started);
|
||||
(*func)(info);
|
||||
atomic_inc(&scf_finished);
|
||||
}
|
||||
|
||||
int smp_call_function(void (*_func)(void *info), void *_info, int wait)
|
||||
{
|
||||
int cpus = num_online_cpus() - 1;
|
||||
int i;
|
||||
|
||||
if (!cpus)
|
||||
return 0;
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
spin_lock_bh(&call_lock);
|
||||
atomic_set(&scf_started, 0);
|
||||
atomic_set(&scf_finished, 0);
|
||||
func = _func;
|
||||
info = _info;
|
||||
|
||||
for_each_online_cpu(i)
|
||||
os_write_file(cpu_data[i].ipi_pipe[1], "C", 1);
|
||||
|
||||
while (atomic_read(&scf_started) != cpus)
|
||||
barrier();
|
||||
|
||||
if (wait)
|
||||
while (atomic_read(&scf_finished) != cpus)
|
||||
barrier();
|
||||
|
||||
spin_unlock_bh(&call_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -29,7 +29,7 @@ static const struct stacktrace_ops stackops = {
|
|||
|
||||
void show_stack(struct task_struct *task, unsigned long *stack)
|
||||
{
|
||||
unsigned long *sp = stack, bp = 0;
|
||||
unsigned long *sp = stack;
|
||||
struct pt_regs *segv_regs = current->thread.segv_regs;
|
||||
int i;
|
||||
|
||||
|
@ -39,10 +39,6 @@ void show_stack(struct task_struct *task, unsigned long *stack)
|
|||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
bp = get_frame_pointer(task, segv_regs);
|
||||
#endif
|
||||
|
||||
if (!stack)
|
||||
sp = get_stack_pointer(task, segv_regs);
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
|
|||
panic("Segfault with no mm");
|
||||
}
|
||||
|
||||
if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi))
|
||||
if (SEGV_IS_FIXABLE(&fi))
|
||||
err = handle_page_fault(address, ip, is_write, is_user,
|
||||
&si.si_code);
|
||||
else {
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kmsg_dump.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sections.h>
|
||||
|
@ -66,12 +67,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
{
|
||||
int index = 0;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
index = (struct cpuinfo_um *) v - cpu_data;
|
||||
if (!cpu_online(index))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
seq_printf(m, "processor\t: %d\n", index);
|
||||
seq_printf(m, "vendor_id\t: User Mode Linux\n");
|
||||
seq_printf(m, "model name\t: UML\n");
|
||||
|
@ -168,23 +163,6 @@ __uml_setup("debug", no_skas_debug_setup,
|
|||
" this flag is not needed to run gdb on UML in skas mode\n\n"
|
||||
);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int __init uml_ncpus_setup(char *line, int *add)
|
||||
{
|
||||
if (!sscanf(line, "%d", &ncpus)) {
|
||||
printf("Couldn't parse [%s]\n", line);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__uml_setup("ncpus=", uml_ncpus_setup,
|
||||
"ncpus=<# of desired CPUs>\n"
|
||||
" This tells an SMP kernel how many virtual processors to start.\n\n"
|
||||
);
|
||||
#endif
|
||||
|
||||
static int __init Usage(char *line, int *add)
|
||||
{
|
||||
const char **p;
|
||||
|
@ -234,6 +212,7 @@ static void __init uml_postsetup(void)
|
|||
static int panic_exit(struct notifier_block *self, unsigned long unused1,
|
||||
void *unused2)
|
||||
{
|
||||
kmsg_dump(KMSG_DUMP_PANIC);
|
||||
bust_spinlocks(1);
|
||||
bust_spinlocks(0);
|
||||
uml_exitcode = 1;
|
||||
|
@ -247,6 +226,16 @@ static struct notifier_block panic_exit_notifier = {
|
|||
.priority = 0
|
||||
};
|
||||
|
||||
void uml_finishsetup(void)
|
||||
{
|
||||
atomic_notifier_chain_register(&panic_notifier_list,
|
||||
&panic_exit_notifier);
|
||||
|
||||
uml_postsetup();
|
||||
|
||||
new_thread_handler();
|
||||
}
|
||||
|
||||
/* Set during early boot */
|
||||
unsigned long task_size;
|
||||
EXPORT_SYMBOL(task_size);
|
||||
|
@ -268,7 +257,6 @@ int __init linux_main(int argc, char **argv)
|
|||
unsigned long stack;
|
||||
unsigned int i;
|
||||
int add;
|
||||
char * mode;
|
||||
|
||||
for (i = 1; i < argc; i++) {
|
||||
if ((i == 1) && (argv[i][0] == ' '))
|
||||
|
@ -291,15 +279,6 @@ int __init linux_main(int argc, char **argv)
|
|||
/* OS sanity checks that need to happen before the kernel runs */
|
||||
os_early_checks();
|
||||
|
||||
can_do_skas();
|
||||
|
||||
if (proc_mm && ptrace_faultinfo)
|
||||
mode = "SKAS3";
|
||||
else
|
||||
mode = "SKAS0";
|
||||
|
||||
printf("UML running in %s mode\n", mode);
|
||||
|
||||
brk_start = (unsigned long) sbrk(0);
|
||||
|
||||
/*
|
||||
|
@ -334,11 +313,6 @@ int __init linux_main(int argc, char **argv)
|
|||
if (physmem_size + iomem_size > max_physmem) {
|
||||
highmem = physmem_size + iomem_size - max_physmem;
|
||||
physmem_size -= highmem;
|
||||
#ifndef CONFIG_HIGHMEM
|
||||
highmem = 0;
|
||||
printf("CONFIG_HIGHMEM not enabled - physical memory shrunk "
|
||||
"to %Lu bytes\n", physmem_size);
|
||||
#endif
|
||||
}
|
||||
|
||||
high_physmem = uml_physmem + physmem_size;
|
||||
|
@ -362,11 +336,6 @@ int __init linux_main(int argc, char **argv)
|
|||
printf("Kernel virtual memory size shrunk to %lu bytes\n",
|
||||
virtmem_size);
|
||||
|
||||
atomic_notifier_chain_register(&panic_notifier_list,
|
||||
&panic_exit_notifier);
|
||||
|
||||
uml_postsetup();
|
||||
|
||||
stack_protections((unsigned long) &init_thread_info);
|
||||
os_flush_stdout();
|
||||
|
||||
|
@ -390,15 +359,3 @@ void __init check_bugs(void)
|
|||
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void alternatives_smp_module_add(struct module *mod, char *name,
|
||||
void *locks, void *locks_end,
|
||||
void *text, void *text_end)
|
||||
{
|
||||
}
|
||||
|
||||
void alternatives_smp_module_del(struct module *mod)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include <init.h>
|
||||
#include <longjmp.h>
|
||||
#include <os.h>
|
||||
#include <skas_ptrace.h>
|
||||
|
||||
#define ARBITRARY_ADDR -1
|
||||
#define FAILURE_PID -1
|
||||
|
@ -102,21 +101,6 @@ void os_kill_process(int pid, int reap_child)
|
|||
CATCH_EINTR(waitpid(pid, NULL, __WALL));
|
||||
}
|
||||
|
||||
/* This is here uniquely to have access to the userspace errno, i.e. the one
|
||||
* used by ptrace in case of error.
|
||||
*/
|
||||
|
||||
long os_ptrace_ldt(long pid, long addr, long data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ptrace(PTRACE_LDT, pid, addr, data);
|
||||
|
||||
if (ret < 0)
|
||||
return -errno;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Kill off a ptraced child by all means available. kill it normally first,
|
||||
* then PTRACE_KILL it, then PTRACE_CONT it in case it's in a run state from
|
||||
* which it can't exit directly.
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <as-layout.h>
|
||||
#include <mm_id.h>
|
||||
#include <os.h>
|
||||
#include <proc_mm.h>
|
||||
#include <ptrace_user.h>
|
||||
#include <registers.h>
|
||||
#include <skas.h>
|
||||
|
@ -46,8 +45,6 @@ static int __init init_syscall_regs(void)
|
|||
|
||||
__initcall(init_syscall_regs);
|
||||
|
||||
extern int proc_mm;
|
||||
|
||||
static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
|
||||
{
|
||||
int n, i;
|
||||
|
@ -56,10 +53,6 @@ static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
|
|||
unsigned long * syscall;
|
||||
int err, pid = mm_idp->u.pid;
|
||||
|
||||
if (proc_mm)
|
||||
/* FIXME: Need to look up userspace_pid by cpu */
|
||||
pid = userspace_pid[0];
|
||||
|
||||
n = ptrace_setregs(pid, syscall_regs);
|
||||
if (n < 0) {
|
||||
printk(UM_KERN_ERR "Registers - \n");
|
||||
|
@ -178,38 +171,12 @@ int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, int prot,
|
|||
int phys_fd, unsigned long long offset, int done, void **data)
|
||||
{
|
||||
int ret;
|
||||
unsigned long args[] = { virt, len, prot,
|
||||
MAP_SHARED | MAP_FIXED, phys_fd,
|
||||
MMAP_OFFSET(offset) };
|
||||
|
||||
if (proc_mm) {
|
||||
struct proc_mm_op map;
|
||||
int fd = mm_idp->u.mm_fd;
|
||||
|
||||
map = ((struct proc_mm_op) { .op = MM_MMAP,
|
||||
.u =
|
||||
{ .mmap =
|
||||
{ .addr = virt,
|
||||
.len = len,
|
||||
.prot = prot,
|
||||
.flags = MAP_SHARED |
|
||||
MAP_FIXED,
|
||||
.fd = phys_fd,
|
||||
.offset= offset
|
||||
} } } );
|
||||
CATCH_EINTR(ret = write(fd, &map, sizeof(map)));
|
||||
if (ret != sizeof(map)) {
|
||||
ret = -errno;
|
||||
printk(UM_KERN_ERR "map : /proc/mm map failed, "
|
||||
"err = %d\n", -ret);
|
||||
}
|
||||
else ret = 0;
|
||||
}
|
||||
else {
|
||||
unsigned long args[] = { virt, len, prot,
|
||||
MAP_SHARED | MAP_FIXED, phys_fd,
|
||||
MMAP_OFFSET(offset) };
|
||||
|
||||
ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt,
|
||||
data, done);
|
||||
}
|
||||
ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt,
|
||||
data, done);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -218,32 +185,11 @@ int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
|
|||
int done, void **data)
|
||||
{
|
||||
int ret;
|
||||
unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
|
||||
0 };
|
||||
|
||||
if (proc_mm) {
|
||||
struct proc_mm_op unmap;
|
||||
int fd = mm_idp->u.mm_fd;
|
||||
|
||||
unmap = ((struct proc_mm_op) { .op = MM_MUNMAP,
|
||||
.u =
|
||||
{ .munmap =
|
||||
{ .addr =
|
||||
(unsigned long) addr,
|
||||
.len = len } } } );
|
||||
CATCH_EINTR(ret = write(fd, &unmap, sizeof(unmap)));
|
||||
if (ret != sizeof(unmap)) {
|
||||
ret = -errno;
|
||||
printk(UM_KERN_ERR "unmap - proc_mm write returned "
|
||||
"%d\n", ret);
|
||||
}
|
||||
else ret = 0;
|
||||
}
|
||||
else {
|
||||
unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
|
||||
0 };
|
||||
|
||||
ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0,
|
||||
data, done);
|
||||
}
|
||||
ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0,
|
||||
data, done);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -251,33 +197,11 @@ int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
|
|||
int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
|
||||
unsigned int prot, int done, void **data)
|
||||
{
|
||||
struct proc_mm_op protect;
|
||||
int ret;
|
||||
unsigned long args[] = { addr, len, prot, 0, 0, 0 };
|
||||
|
||||
if (proc_mm) {
|
||||
int fd = mm_idp->u.mm_fd;
|
||||
|
||||
protect = ((struct proc_mm_op) { .op = MM_MPROTECT,
|
||||
.u =
|
||||
{ .mprotect =
|
||||
{ .addr =
|
||||
(unsigned long) addr,
|
||||
.len = len,
|
||||
.prot = prot } } } );
|
||||
|
||||
CATCH_EINTR(ret = write(fd, &protect, sizeof(protect)));
|
||||
if (ret != sizeof(protect)) {
|
||||
ret = -errno;
|
||||
printk(UM_KERN_ERR "protect failed, err = %d", -ret);
|
||||
}
|
||||
else ret = 0;
|
||||
}
|
||||
else {
|
||||
unsigned long args[] = { addr, len, prot, 0, 0, 0 };
|
||||
|
||||
ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
|
||||
data, done);
|
||||
}
|
||||
ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
|
||||
data, done);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -16,11 +16,9 @@
|
|||
#include <kern_util.h>
|
||||
#include <mem.h>
|
||||
#include <os.h>
|
||||
#include <proc_mm.h>
|
||||
#include <ptrace_user.h>
|
||||
#include <registers.h>
|
||||
#include <skas.h>
|
||||
#include <skas_ptrace.h>
|
||||
#include <sysdep/stub.h>
|
||||
|
||||
int is_skas_winch(int pid, int fd, void *data)
|
||||
|
@ -91,50 +89,33 @@ extern unsigned long current_stub_stack(void);
|
|||
static void get_skas_faultinfo(int pid, struct faultinfo *fi)
|
||||
{
|
||||
int err;
|
||||
unsigned long fpregs[FP_SIZE];
|
||||
|
||||
if (ptrace_faultinfo) {
|
||||
err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
|
||||
if (err) {
|
||||
printk(UM_KERN_ERR "get_skas_faultinfo - "
|
||||
"PTRACE_FAULTINFO failed, errno = %d\n", errno);
|
||||
fatal_sigsegv();
|
||||
}
|
||||
|
||||
/* Special handling for i386, which has different structs */
|
||||
if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
|
||||
memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
|
||||
sizeof(struct faultinfo) -
|
||||
sizeof(struct ptrace_faultinfo));
|
||||
err = get_fp_registers(pid, fpregs);
|
||||
if (err < 0) {
|
||||
printk(UM_KERN_ERR "save_fp_registers returned %d\n",
|
||||
err);
|
||||
fatal_sigsegv();
|
||||
}
|
||||
else {
|
||||
unsigned long fpregs[FP_SIZE];
|
||||
err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
|
||||
if (err) {
|
||||
printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
|
||||
"errno = %d\n", pid, errno);
|
||||
fatal_sigsegv();
|
||||
}
|
||||
wait_stub_done(pid);
|
||||
|
||||
err = get_fp_registers(pid, fpregs);
|
||||
if (err < 0) {
|
||||
printk(UM_KERN_ERR "save_fp_registers returned %d\n",
|
||||
err);
|
||||
fatal_sigsegv();
|
||||
}
|
||||
err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
|
||||
if (err) {
|
||||
printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
|
||||
"errno = %d\n", pid, errno);
|
||||
fatal_sigsegv();
|
||||
}
|
||||
wait_stub_done(pid);
|
||||
/*
|
||||
* faultinfo is prepared by the stub-segv-handler at start of
|
||||
* the stub stack page. We just have to copy it.
|
||||
*/
|
||||
memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
|
||||
|
||||
/*
|
||||
* faultinfo is prepared by the stub-segv-handler at start of
|
||||
* the stub stack page. We just have to copy it.
|
||||
*/
|
||||
memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
|
||||
|
||||
err = put_fp_registers(pid, fpregs);
|
||||
if (err < 0) {
|
||||
printk(UM_KERN_ERR "put_fp_registers returned %d\n",
|
||||
err);
|
||||
fatal_sigsegv();
|
||||
}
|
||||
err = put_fp_registers(pid, fpregs);
|
||||
if (err < 0) {
|
||||
printk(UM_KERN_ERR "put_fp_registers returned %d\n",
|
||||
err);
|
||||
fatal_sigsegv();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -198,7 +179,8 @@ extern int __syscall_stub_start;
|
|||
static int userspace_tramp(void *stack)
|
||||
{
|
||||
void *addr;
|
||||
int err;
|
||||
int err, fd;
|
||||
unsigned long long offset;
|
||||
|
||||
ptrace(PTRACE_TRACEME, 0, 0, 0);
|
||||
|
||||
|
@ -211,36 +193,32 @@ static int userspace_tramp(void *stack)
|
|||
exit(1);
|
||||
}
|
||||
|
||||
if (!proc_mm) {
|
||||
/*
|
||||
* This has a pte, but it can't be mapped in with the usual
|
||||
* tlb_flush mechanism because this is part of that mechanism
|
||||
*/
|
||||
int fd;
|
||||
unsigned long long offset;
|
||||
fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
|
||||
addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
|
||||
PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
|
||||
/*
|
||||
* This has a pte, but it can't be mapped in with the usual
|
||||
* tlb_flush mechanism because this is part of that mechanism
|
||||
*/
|
||||
fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
|
||||
addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
|
||||
PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
|
||||
if (addr == MAP_FAILED) {
|
||||
printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
|
||||
"errno = %d\n", STUB_CODE, errno);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (stack != NULL) {
|
||||
fd = phys_mapping(to_phys(stack), &offset);
|
||||
addr = mmap((void *) STUB_DATA,
|
||||
UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_SHARED, fd, offset);
|
||||
if (addr == MAP_FAILED) {
|
||||
printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
|
||||
"errno = %d\n", STUB_CODE, errno);
|
||||
printk(UM_KERN_ERR "mapping segfault stack "
|
||||
"at 0x%lx failed, errno = %d\n",
|
||||
STUB_DATA, errno);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (stack != NULL) {
|
||||
fd = phys_mapping(to_phys(stack), &offset);
|
||||
addr = mmap((void *) STUB_DATA,
|
||||
UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_SHARED, fd, offset);
|
||||
if (addr == MAP_FAILED) {
|
||||
printk(UM_KERN_ERR "mapping segfault stack "
|
||||
"at 0x%lx failed, errno = %d\n",
|
||||
STUB_DATA, errno);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!ptrace_faultinfo && (stack != NULL)) {
|
||||
if (stack != NULL) {
|
||||
struct sigaction sa;
|
||||
|
||||
unsigned long v = STUB_CODE +
|
||||
|
@ -286,11 +264,7 @@ int start_userspace(unsigned long stub_stack)
|
|||
|
||||
sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
|
||||
|
||||
flags = CLONE_FILES;
|
||||
if (proc_mm)
|
||||
flags |= CLONE_VM;
|
||||
else
|
||||
flags |= SIGCHLD;
|
||||
flags = CLONE_FILES | SIGCHLD;
|
||||
|
||||
pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
|
||||
if (pid < 0) {
|
||||
|
@ -413,8 +387,7 @@ void userspace(struct uml_pt_regs *regs)
|
|||
|
||||
switch (sig) {
|
||||
case SIGSEGV:
|
||||
if (PTRACE_FULL_FAULTINFO ||
|
||||
!ptrace_faultinfo) {
|
||||
if (PTRACE_FULL_FAULTINFO) {
|
||||
get_skas_faultinfo(pid,
|
||||
®s->faultinfo);
|
||||
(*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
|
||||
|
@ -571,67 +544,6 @@ int copy_context_skas0(unsigned long new_stack, int pid)
|
|||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is used only, if stub pages are needed, while proc_mm is
|
||||
* available. Opening /proc/mm creates a new mm_context, which lacks
|
||||
* the stub-pages. Thus, we map them using /proc/mm-fd
|
||||
*/
|
||||
int map_stub_pages(int fd, unsigned long code, unsigned long data,
|
||||
unsigned long stack)
|
||||
{
|
||||
struct proc_mm_op mmop;
|
||||
int n;
|
||||
unsigned long long code_offset;
|
||||
int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
|
||||
&code_offset);
|
||||
|
||||
mmop = ((struct proc_mm_op) { .op = MM_MMAP,
|
||||
.u =
|
||||
{ .mmap =
|
||||
{ .addr = code,
|
||||
.len = UM_KERN_PAGE_SIZE,
|
||||
.prot = PROT_EXEC,
|
||||
.flags = MAP_FIXED | MAP_PRIVATE,
|
||||
.fd = code_fd,
|
||||
.offset = code_offset
|
||||
} } });
|
||||
CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
|
||||
if (n != sizeof(mmop)) {
|
||||
n = errno;
|
||||
printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
|
||||
"offset = %llx\n", code, code_fd,
|
||||
(unsigned long long) code_offset);
|
||||
printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code "
|
||||
"failed, err = %d\n", n);
|
||||
return -n;
|
||||
}
|
||||
|
||||
if (stack) {
|
||||
unsigned long long map_offset;
|
||||
int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
|
||||
mmop = ((struct proc_mm_op)
|
||||
{ .op = MM_MMAP,
|
||||
.u =
|
||||
{ .mmap =
|
||||
{ .addr = data,
|
||||
.len = UM_KERN_PAGE_SIZE,
|
||||
.prot = PROT_READ | PROT_WRITE,
|
||||
.flags = MAP_FIXED | MAP_SHARED,
|
||||
.fd = map_fd,
|
||||
.offset = map_offset
|
||||
} } });
|
||||
CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
|
||||
if (n != sizeof(mmop)) {
|
||||
n = errno;
|
||||
printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for "
|
||||
"data failed, err = %d\n", n);
|
||||
return -n;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
|
||||
{
|
||||
(*buf)[0].JB_IP = (unsigned long) handler;
|
||||
|
@ -674,7 +586,7 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf)
|
|||
n = setjmp(initial_jmpbuf);
|
||||
switch (n) {
|
||||
case INIT_JMP_NEW_THREAD:
|
||||
(*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
|
||||
(*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
|
||||
(*switch_buf)[0].JB_SP = (unsigned long) stack +
|
||||
UM_THREAD_SIZE - sizeof(void *);
|
||||
break;
|
||||
|
@ -728,17 +640,5 @@ void reboot_skas(void)
|
|||
|
||||
void __switch_mm(struct mm_id *mm_idp)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* FIXME: need cpu pid in __switch_mm */
|
||||
if (proc_mm) {
|
||||
err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
|
||||
mm_idp->u.mm_fd);
|
||||
if (err) {
|
||||
printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
|
||||
"failed, errno = %d\n", errno);
|
||||
fatal_sigsegv();
|
||||
}
|
||||
}
|
||||
else userspace_pid[0] = mm_idp->u.pid;
|
||||
userspace_pid[0] = mm_idp->u.pid;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <ptrace_user.h>
|
||||
#include <registers.h>
|
||||
#include <skas.h>
|
||||
#include <skas_ptrace.h>
|
||||
|
||||
static void ptrace_child(void)
|
||||
{
|
||||
|
@ -142,44 +141,6 @@ static int stop_ptraced_child(int pid, int exitcode, int mustexit)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Changed only during early boot */
|
||||
int ptrace_faultinfo;
|
||||
static int disable_ptrace_faultinfo;
|
||||
|
||||
int ptrace_ldt;
|
||||
static int disable_ptrace_ldt;
|
||||
|
||||
int proc_mm;
|
||||
static int disable_proc_mm;
|
||||
|
||||
int have_switch_mm;
|
||||
static int disable_switch_mm;
|
||||
|
||||
int skas_needs_stub;
|
||||
|
||||
static int __init skas0_cmd_param(char *str, int* add)
|
||||
{
|
||||
disable_ptrace_faultinfo = 1;
|
||||
disable_ptrace_ldt = 1;
|
||||
disable_proc_mm = 1;
|
||||
disable_switch_mm = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The two __uml_setup would conflict, without this stupid alias. */
|
||||
|
||||
static int __init mode_skas0_cmd_param(char *str, int* add)
|
||||
__attribute__((alias("skas0_cmd_param")));
|
||||
|
||||
__uml_setup("skas0", skas0_cmd_param,
|
||||
"skas0\n"
|
||||
" Disables SKAS3 and SKAS4 usage, so that SKAS0 is used\n\n");
|
||||
|
||||
__uml_setup("mode=skas0", mode_skas0_cmd_param,
|
||||
"mode=skas0\n"
|
||||
" Disables SKAS3 and SKAS4 usage, so that SKAS0 is used.\n\n");
|
||||
|
||||
/* Changed only during early boot */
|
||||
static int force_sysemu_disabled = 0;
|
||||
|
||||
|
@ -376,121 +337,6 @@ void __init os_early_checks(void)
|
|||
stop_ptraced_child(pid, 1, 1);
|
||||
}
|
||||
|
||||
static int __init noprocmm_cmd_param(char *str, int* add)
|
||||
{
|
||||
disable_proc_mm = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__uml_setup("noprocmm", noprocmm_cmd_param,
|
||||
"noprocmm\n"
|
||||
" Turns off usage of /proc/mm, even if host supports it.\n"
|
||||
" To support /proc/mm, the host needs to be patched using\n"
|
||||
" the current skas3 patch.\n\n");
|
||||
|
||||
static int __init noptracefaultinfo_cmd_param(char *str, int* add)
|
||||
{
|
||||
disable_ptrace_faultinfo = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__uml_setup("noptracefaultinfo", noptracefaultinfo_cmd_param,
|
||||
"noptracefaultinfo\n"
|
||||
" Turns off usage of PTRACE_FAULTINFO, even if host supports\n"
|
||||
" it. To support PTRACE_FAULTINFO, the host needs to be patched\n"
|
||||
" using the current skas3 patch.\n\n");
|
||||
|
||||
static int __init noptraceldt_cmd_param(char *str, int* add)
|
||||
{
|
||||
disable_ptrace_ldt = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__uml_setup("noptraceldt", noptraceldt_cmd_param,
|
||||
"noptraceldt\n"
|
||||
" Turns off usage of PTRACE_LDT, even if host supports it.\n"
|
||||
" To support PTRACE_LDT, the host needs to be patched using\n"
|
||||
" the current skas3 patch.\n\n");
|
||||
|
||||
static inline void check_skas3_ptrace_faultinfo(void)
|
||||
{
|
||||
struct ptrace_faultinfo fi;
|
||||
int pid, n;
|
||||
|
||||
non_fatal(" - PTRACE_FAULTINFO...");
|
||||
pid = start_ptraced_child();
|
||||
|
||||
n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
|
||||
if (n < 0) {
|
||||
if (errno == EIO)
|
||||
non_fatal("not found\n");
|
||||
else
|
||||
perror("not found");
|
||||
} else if (disable_ptrace_faultinfo)
|
||||
non_fatal("found but disabled on command line\n");
|
||||
else {
|
||||
ptrace_faultinfo = 1;
|
||||
non_fatal("found\n");
|
||||
}
|
||||
|
||||
stop_ptraced_child(pid, 1, 1);
|
||||
}
|
||||
|
||||
static inline void check_skas3_ptrace_ldt(void)
|
||||
{
|
||||
#ifdef PTRACE_LDT
|
||||
int pid, n;
|
||||
unsigned char ldtbuf[40];
|
||||
struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
|
||||
.func = 2, /* read default ldt */
|
||||
.ptr = ldtbuf,
|
||||
.bytecount = sizeof(ldtbuf)};
|
||||
|
||||
non_fatal(" - PTRACE_LDT...");
|
||||
pid = start_ptraced_child();
|
||||
|
||||
n = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op);
|
||||
if (n < 0) {
|
||||
if (errno == EIO)
|
||||
non_fatal("not found\n");
|
||||
else
|
||||
perror("not found");
|
||||
} else if (disable_ptrace_ldt)
|
||||
non_fatal("found, but use is disabled\n");
|
||||
else {
|
||||
ptrace_ldt = 1;
|
||||
non_fatal("found\n");
|
||||
}
|
||||
|
||||
stop_ptraced_child(pid, 1, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void check_skas3_proc_mm(void)
|
||||
{
|
||||
non_fatal(" - /proc/mm...");
|
||||
if (access("/proc/mm", W_OK) < 0)
|
||||
perror("not found");
|
||||
else if (disable_proc_mm)
|
||||
non_fatal("found but disabled on command line\n");
|
||||
else {
|
||||
proc_mm = 1;
|
||||
non_fatal("found\n");
|
||||
}
|
||||
}
|
||||
|
||||
void can_do_skas(void)
|
||||
{
|
||||
non_fatal("Checking for the skas3 patch in the host:\n");
|
||||
|
||||
check_skas3_proc_mm();
|
||||
check_skas3_ptrace_faultinfo();
|
||||
check_skas3_ptrace_ldt();
|
||||
|
||||
if (!proc_mm || !ptrace_faultinfo || !ptrace_ldt)
|
||||
skas_needs_stub = 1;
|
||||
}
|
||||
|
||||
int __init parse_iomem(char *str, int *add)
|
||||
{
|
||||
struct iomem_region *new;
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
OBJ = built-in.o
|
||||
|
||||
OBJS =
|
||||
|
||||
all: $(OBJ)
|
||||
|
||||
$(OBJ): $(OBJS)
|
||||
rm -f $@
|
||||
$(LD) $(LINKFLAGS) --start-group $^ --end-group -o $@
|
||||
|
||||
clean-files := $(OBJS) link.ld
|
|
@ -1,16 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SYSDEP_IA64_PTRACE_H
|
||||
#define __SYSDEP_IA64_PTRACE_H
|
||||
|
||||
struct sys_pt_regs {
|
||||
int foo;
|
||||
};
|
||||
|
||||
#define EMPTY_REGS { 0 }
|
||||
|
||||
#endif
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SYSDEP_IA64_SIGCONTEXT_H
|
||||
#define __SYSDEP_IA64_SIGCONTEXT_H
|
||||
|
||||
#endif
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SYSDEP_IA64_SKAS_PTRACE_H
|
||||
#define __SYSDEP_IA64_SKAS_PTRACE_H
|
||||
|
||||
struct ptrace_faultinfo {
|
||||
int is_write;
|
||||
unsigned long addr;
|
||||
};
|
||||
|
||||
struct ptrace_ldt {
|
||||
int func;
|
||||
void *ptr;
|
||||
unsigned long bytecount;
|
||||
};
|
||||
|
||||
#define PTRACE_LDT 54
|
||||
|
||||
#endif
|
|
@ -1,10 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SYSDEP_IA64_SYSCALLS_H
|
||||
#define __SYSDEP_IA64_SYSCALLS_H
|
||||
|
||||
#endif
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
OBJ = built-in.o
|
||||
|
||||
.S.o:
|
||||
$(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
|
||||
|
||||
OBJS = ptrace.o sigcontext.o checksum.o miscthings.o misc.o \
|
||||
ptrace_user.o sysrq.o
|
||||
|
||||
asflags-y := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel
|
||||
|
||||
all: $(OBJ)
|
||||
|
||||
$(OBJ): $(OBJS)
|
||||
rm -f $@
|
||||
$(LD) $(LINKFLAGS) --start-group $^ --end-group -o $@
|
||||
|
||||
ptrace_user.o: ptrace_user.c
|
||||
$(CC) -D__KERNEL__ $(USER_CFLAGS) $(ccflags-y) -c -o $@ $<
|
||||
|
||||
sigcontext.o: sigcontext.c
|
||||
$(CC) $(USER_CFLAGS) $(ccflags-y) -c -o $@ $<
|
||||
|
||||
checksum.S:
|
||||
rm -f $@
|
||||
ln -s $(srctree)/arch/ppc/lib/$@ $@
|
||||
|
||||
mk_defs.c:
|
||||
rm -f $@
|
||||
ln -s $(srctree)/arch/ppc/kernel/$@ $@
|
||||
|
||||
ppc_defs.head:
|
||||
rm -f $@
|
||||
ln -s $(srctree)/arch/ppc/kernel/$@ $@
|
||||
|
||||
ppc_defs.h: mk_defs.c ppc_defs.head \
|
||||
$(srctree)/include/asm-ppc/mmu.h \
|
||||
$(srctree)/include/asm-ppc/processor.h \
|
||||
$(srctree)/include/asm-ppc/pgtable.h \
|
||||
$(srctree)/include/asm-ppc/ptrace.h
|
||||
# $(CC) $(CFLAGS) -S mk_defs.c
|
||||
cp ppc_defs.head ppc_defs.h
|
||||
# for bk, this way we can write to the file even if it's not checked out
|
||||
echo '#define THREAD 608' >> ppc_defs.h
|
||||
echo '#define PT_REGS 8' >> ppc_defs.h
|
||||
echo '#define CLONE_VM 256' >> ppc_defs.h
|
||||
# chmod u+w ppc_defs.h
|
||||
# grep '^#define' mk_defs.s >> ppc_defs.h
|
||||
# rm mk_defs.s
|
||||
|
||||
# the asm link is horrible, and breaks the other targets. This is also
|
||||
# not going to work with parallel makes.
|
||||
|
||||
checksum.o: checksum.S
|
||||
rm -f asm
|
||||
ln -s $(srctree)/include/asm-ppc asm
|
||||
$(CC) $(asflags-y) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
|
||||
rm -f asm
|
||||
|
||||
misc.o: misc.S ppc_defs.h
|
||||
rm -f asm
|
||||
ln -s $(srctree)/include/asm-ppc asm
|
||||
$(CC) $(asflags-y) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
|
||||
rm -f asm
|
||||
|
||||
clean-files := $(OBJS) ppc_defs.h checksum.S mk_defs.c
|
|
@ -1,8 +0,0 @@
|
|||
#ifndef __UM_ARCHPARAM_PPC_H
|
||||
#define __UM_ARCHPARAM_PPC_H
|
||||
|
||||
/********* Bits for asm-um/string.h **********/
|
||||
|
||||
#define __HAVE_ARCH_STRRCHR
|
||||
|
||||
#endif
|
|
@ -1,51 +0,0 @@
|
|||
#ifndef __UM_ELF_PPC_H
|
||||
#define __UM_ELF_PPC_H
|
||||
|
||||
|
||||
extern long elf_aux_hwcap;
|
||||
#define ELF_HWCAP (elf_aux_hwcap)
|
||||
|
||||
#define SET_PERSONALITY(ex) do ; while(0)
|
||||
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
#define elf_check_arch(x) (1)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define ELF_CLASS ELFCLASS64
|
||||
#else
|
||||
#define ELF_CLASS ELFCLASS32
|
||||
#endif
|
||||
|
||||
#define R_386_NONE 0
|
||||
#define R_386_32 1
|
||||
#define R_386_PC32 2
|
||||
#define R_386_GOT32 3
|
||||
#define R_386_PLT32 4
|
||||
#define R_386_COPY 5
|
||||
#define R_386_GLOB_DAT 6
|
||||
#define R_386_JMP_SLOT 7
|
||||
#define R_386_RELATIVE 8
|
||||
#define R_386_GOTOFF 9
|
||||
#define R_386_GOTPC 10
|
||||
#define R_386_NUM 11
|
||||
|
||||
#define ELF_PLATFORM (0)
|
||||
|
||||
#define ELF_ET_DYN_BASE (0x08000000)
|
||||
|
||||
/* the following stolen from asm-ppc/elf.h */
|
||||
#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
|
||||
#define ELF_NFPREG 33 /* includes fpscr */
|
||||
/* General registers */
|
||||
typedef unsigned long elf_greg_t;
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
/* Floating point registers */
|
||||
typedef double elf_fpreg_t;
|
||||
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
||||
|
||||
#define ELF_DATA ELFDATA2MSB
|
||||
#define ELF_ARCH EM_PPC
|
||||
|
||||
#endif
|
|
@ -1,15 +0,0 @@
|
|||
#ifndef __UM_PROCESSOR_PPC_H
|
||||
#define __UM_PROCESSOR_PPC_H
|
||||
|
||||
#if defined(__ASSEMBLY__)
|
||||
|
||||
#define CONFIG_PPC_MULTIPLATFORM
|
||||
#include "arch/processor.h"
|
||||
|
||||
#else
|
||||
|
||||
#include "asm/processor-generic.h"
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,111 +0,0 @@
|
|||
/*
|
||||
* This file contains miscellaneous low-level functions.
|
||||
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
||||
*
|
||||
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
|
||||
* and Paul Mackerras.
|
||||
*
|
||||
* A couple of functions stolen from arch/ppc/kernel/misc.S for UML
|
||||
* by Chris Emerson.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include "ppc_asm.h"
|
||||
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_8xx)
|
||||
#define CACHE_LINE_SIZE 16
|
||||
#define LG_CACHE_LINE_SIZE 4
|
||||
#define MAX_COPY_PREFETCH 1
|
||||
#else
|
||||
#define CACHE_LINE_SIZE 32
|
||||
#define LG_CACHE_LINE_SIZE 5
|
||||
#define MAX_COPY_PREFETCH 4
|
||||
#endif /* CONFIG_4xx || CONFIG_8xx */
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* Clear a page using the dcbz instruction, which doesn't cause any
|
||||
* memory traffic (except to write out any cache lines which get
|
||||
* displaced). This only works on cacheable memory.
|
||||
*/
|
||||
_GLOBAL(clear_page)
|
||||
li r0,4096/CACHE_LINE_SIZE
|
||||
mtctr r0
|
||||
#ifdef CONFIG_8xx
|
||||
li r4, 0
|
||||
1: stw r4, 0(r3)
|
||||
stw r4, 4(r3)
|
||||
stw r4, 8(r3)
|
||||
stw r4, 12(r3)
|
||||
#else
|
||||
1: dcbz 0,r3
|
||||
#endif
|
||||
addi r3,r3,CACHE_LINE_SIZE
|
||||
bdnz 1b
|
||||
blr
|
||||
|
||||
/*
|
||||
* Copy a whole page. We use the dcbz instruction on the destination
|
||||
* to reduce memory traffic (it eliminates the unnecessary reads of
|
||||
* the destination into cache). This requires that the destination
|
||||
* is cacheable.
|
||||
*/
|
||||
#define COPY_16_BYTES \
|
||||
lwz r6,4(r4); \
|
||||
lwz r7,8(r4); \
|
||||
lwz r8,12(r4); \
|
||||
lwzu r9,16(r4); \
|
||||
stw r6,4(r3); \
|
||||
stw r7,8(r3); \
|
||||
stw r8,12(r3); \
|
||||
stwu r9,16(r3)
|
||||
|
||||
_GLOBAL(copy_page)
|
||||
addi r3,r3,-4
|
||||
addi r4,r4,-4
|
||||
li r5,4
|
||||
|
||||
#ifndef CONFIG_8xx
|
||||
#if MAX_COPY_PREFETCH > 1
|
||||
li r0,MAX_COPY_PREFETCH
|
||||
li r11,4
|
||||
mtctr r0
|
||||
11: dcbt r11,r4
|
||||
addi r11,r11,CACHE_LINE_SIZE
|
||||
bdnz 11b
|
||||
#else /* MAX_COPY_PREFETCH == 1 */
|
||||
dcbt r5,r4
|
||||
li r11,CACHE_LINE_SIZE+4
|
||||
#endif /* MAX_COPY_PREFETCH */
|
||||
#endif /* CONFIG_8xx */
|
||||
|
||||
li r0,4096/CACHE_LINE_SIZE
|
||||
mtctr r0
|
||||
1:
|
||||
#ifndef CONFIG_8xx
|
||||
dcbt r11,r4
|
||||
dcbz r5,r3
|
||||
#endif
|
||||
COPY_16_BYTES
|
||||
#if CACHE_LINE_SIZE >= 32
|
||||
COPY_16_BYTES
|
||||
#if CACHE_LINE_SIZE >= 64
|
||||
COPY_16_BYTES
|
||||
COPY_16_BYTES
|
||||
#if CACHE_LINE_SIZE >= 128
|
||||
COPY_16_BYTES
|
||||
COPY_16_BYTES
|
||||
COPY_16_BYTES
|
||||
COPY_16_BYTES
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
bdnz 1b
|
||||
blr
|
|
@ -1,42 +0,0 @@
|
|||
#include <linux/threads.h>
|
||||
#include <linux/stddef.h> // for NULL
|
||||
#include <linux/elf.h> // for AT_NULL
|
||||
|
||||
/* The following function nicked from arch/ppc/kernel/process.c and
|
||||
* adapted slightly */
|
||||
/*
|
||||
* XXX ld.so expects the auxiliary table to start on
|
||||
* a 16-byte boundary, so we have to find it and
|
||||
* move it up. :-(
|
||||
*/
|
||||
void shove_aux_table(unsigned long sp)
|
||||
{
|
||||
int argc;
|
||||
char *p;
|
||||
unsigned long e;
|
||||
unsigned long aux_start, offset;
|
||||
|
||||
argc = *(int *)sp;
|
||||
sp += sizeof(int) + (argc + 1) * sizeof(char *);
|
||||
/* skip over the environment pointers */
|
||||
do {
|
||||
p = *(char **)sp;
|
||||
sp += sizeof(char *);
|
||||
} while (p != NULL);
|
||||
aux_start = sp;
|
||||
/* skip to the end of the auxiliary table */
|
||||
do {
|
||||
e = *(unsigned long *)sp;
|
||||
sp += 2 * sizeof(unsigned long);
|
||||
} while (e != AT_NULL);
|
||||
offset = ((aux_start + 15) & ~15) - aux_start;
|
||||
if (offset != 0) {
|
||||
do {
|
||||
sp -= sizeof(unsigned long);
|
||||
e = *(unsigned long *)sp;
|
||||
*(unsigned long *)(sp + offset) = e;
|
||||
} while (sp > aux_start);
|
||||
}
|
||||
}
|
||||
/* END stuff taken from arch/ppc/kernel/process.c */
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
#include <linux/sched.h>
|
||||
#include "asm/ptrace.h"
|
||||
|
||||
int putreg(struct task_struct *child, unsigned long regno,
|
||||
unsigned long value)
|
||||
{
|
||||
child->thread.process_regs.regs[regno >> 2] = value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int poke_user(struct task_struct *child, long addr, long data)
|
||||
{
|
||||
if ((addr & 3) || addr < 0)
|
||||
return -EIO;
|
||||
|
||||
if (addr < MAX_REG_OFFSET)
|
||||
return putreg(child, addr, data);
|
||||
|
||||
else if((addr >= offsetof(struct user, u_debugreg[0])) &&
|
||||
(addr <= offsetof(struct user, u_debugreg[7]))){
|
||||
addr -= offsetof(struct user, u_debugreg[0]);
|
||||
addr = addr >> 2;
|
||||
if((addr == 4) || (addr == 5)) return -EIO;
|
||||
child->thread.arch.debugregs[addr] = data;
|
||||
return 0;
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
unsigned long getreg(struct task_struct *child, unsigned long regno)
|
||||
{
|
||||
unsigned long retval = ~0UL;
|
||||
|
||||
retval &= child->thread.process_regs.regs[regno >> 2];
|
||||
return retval;
|
||||
}
|
||||
|
||||
int peek_user(struct task_struct *child, long addr, long data)
|
||||
{
|
||||
/* read the word at location addr in the USER area. */
|
||||
unsigned long tmp;
|
||||
|
||||
if ((addr & 3) || addr < 0)
|
||||
return -EIO;
|
||||
|
||||
tmp = 0; /* Default return condition */
|
||||
if(addr < MAX_REG_OFFSET){
|
||||
tmp = getreg(child, addr);
|
||||
}
|
||||
else if((addr >= offsetof(struct user, u_debugreg[0])) &&
|
||||
(addr <= offsetof(struct user, u_debugreg[7]))){
|
||||
addr -= offsetof(struct user, u_debugreg[0]);
|
||||
addr = addr >> 2;
|
||||
tmp = child->thread.arch.debugregs[addr];
|
||||
}
|
||||
return put_user(tmp, (unsigned long *) data);
|
||||
}
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
#include <errno.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <sysdep/ptrace.h>
|
||||
|
||||
int ptrace_getregs(long pid, unsigned long *regs_out)
|
||||
{
|
||||
int i;
|
||||
for (i=0; i < sizeof(struct sys_pt_regs)/sizeof(PPC_REG); ++i) {
|
||||
errno = 0;
|
||||
regs_out->regs[i] = ptrace(PTRACE_PEEKUSR, pid, i*4, 0);
|
||||
if (errno) {
|
||||
return -errno;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ptrace_setregs(long pid, unsigned long *regs_in)
|
||||
{
|
||||
int i;
|
||||
for (i=0; i < sizeof(struct sys_pt_regs)/sizeof(PPC_REG); ++i) {
|
||||
if (i != 34 /* FIXME: PT_ORIG_R3 */ && i <= PT_MQ) {
|
||||
if (ptrace(PTRACE_POKEUSR, pid, i*4, regs_in->regs[i]) < 0) {
|
||||
return -errno;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
/*
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SYS_PTRACE_PPC_H
|
||||
#define __SYS_PTRACE_PPC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* the following taken from <asm-ppc/ptrace.h> */
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define PPC_REG unsigned long /*long*/
|
||||
#else
|
||||
#define PPC_REG unsigned long
|
||||
#endif
|
||||
struct sys_pt_regs_s {
|
||||
PPC_REG gpr[32];
|
||||
PPC_REG nip;
|
||||
PPC_REG msr;
|
||||
PPC_REG orig_gpr3; /* Used for restarting system calls */
|
||||
PPC_REG ctr;
|
||||
PPC_REG link;
|
||||
PPC_REG xer;
|
||||
PPC_REG ccr;
|
||||
PPC_REG mq; /* 601 only (not used at present) */
|
||||
/* Used on APUS to hold IPL value. */
|
||||
PPC_REG trap; /* Reason for being here */
|
||||
PPC_REG dar; /* Fault registers */
|
||||
PPC_REG dsisr;
|
||||
PPC_REG result; /* Result of a system call */
|
||||
};
|
||||
|
||||
#define NUM_REGS (sizeof(struct sys_pt_regs_s) / sizeof(PPC_REG))
|
||||
|
||||
struct sys_pt_regs {
|
||||
PPC_REG regs[sizeof(struct sys_pt_regs_s) / sizeof(PPC_REG)];
|
||||
};
|
||||
|
||||
#define UM_MAX_REG (PT_FPR0)
|
||||
#define UM_MAX_REG_OFFSET (UM_MAX_REG * sizeof(PPC_REG))
|
||||
|
||||
#define EMPTY_REGS { { [ 0 ... NUM_REGS - 1] = 0 } }
|
||||
|
||||
#define UM_REG(r, n) ((r)->regs[n])
|
||||
|
||||
#define UM_SYSCALL_RET(r) UM_REG(r, PT_R3)
|
||||
#define UM_SP(r) UM_REG(r, PT_R1)
|
||||
#define UM_IP(r) UM_REG(r, PT_NIP)
|
||||
#define UM_ELF_ZERO(r) UM_REG(r, PT_FPSCR)
|
||||
#define UM_SYSCALL_NR(r) UM_REG(r, PT_R0)
|
||||
#define UM_SYSCALL_ARG1(r) UM_REG(r, PT_ORIG_R3)
|
||||
#define UM_SYSCALL_ARG2(r) UM_REG(r, PT_R4)
|
||||
#define UM_SYSCALL_ARG3(r) UM_REG(r, PT_R5)
|
||||
#define UM_SYSCALL_ARG4(r) UM_REG(r, PT_R6)
|
||||
#define UM_SYSCALL_ARG5(r) UM_REG(r, PT_R7)
|
||||
#define UM_SYSCALL_ARG6(r) UM_REG(r, PT_R8)
|
||||
|
||||
#define UM_SYSCALL_NR_OFFSET (PT_R0 * sizeof(PPC_REG))
|
||||
#define UM_SYSCALL_RET_OFFSET (PT_R3 * sizeof(PPC_REG))
|
||||
#define UM_SYSCALL_ARG1_OFFSET (PT_R3 * sizeof(PPC_REG))
|
||||
#define UM_SYSCALL_ARG2_OFFSET (PT_R4 * sizeof(PPC_REG))
|
||||
#define UM_SYSCALL_ARG3_OFFSET (PT_R5 * sizeof(PPC_REG))
|
||||
#define UM_SYSCALL_ARG4_OFFSET (PT_R6 * sizeof(PPC_REG))
|
||||
#define UM_SYSCALL_ARG5_OFFSET (PT_R7 * sizeof(PPC_REG))
|
||||
#define UM_SYSCALL_ARG6_OFFSET (PT_R8 * sizeof(PPC_REG))
|
||||
#define UM_SP_OFFSET (PT_R1 * sizeof(PPC_REG))
|
||||
#define UM_IP_OFFSET (PT_NIP * sizeof(PPC_REG))
|
||||
#define UM_ELF_ZERO_OFFSET (PT_R3 * sizeof(PPC_REG))
|
||||
|
||||
#define UM_SET_SYSCALL_RETURN(_regs, result) \
|
||||
do { \
|
||||
if (result < 0) { \
|
||||
(_regs)->regs[PT_CCR] |= 0x10000000; \
|
||||
UM_SYSCALL_RET((_regs)) = -result; \
|
||||
} else { \
|
||||
UM_SYSCALL_RET((_regs)) = result; \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
extern void shove_aux_table(unsigned long sp);
|
||||
#define UM_FIX_EXEC_STACK(sp) shove_aux_table(sp);
|
||||
|
||||
/* These aren't actually defined. The undefs are just to make sure
|
||||
* everyone's clear on the concept.
|
||||
*/
|
||||
#undef UML_HAVE_GETREGS
|
||||
#undef UML_HAVE_GETFPREGS
|
||||
#undef UML_HAVE_SETREGS
|
||||
#undef UML_HAVE_SETFPREGS
|
||||
|
||||
#endif
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SYS_SIGCONTEXT_PPC_H
|
||||
#define __SYS_SIGCONTEXT_PPC_H
|
||||
|
||||
#define DSISR_WRITE 0x02000000
|
||||
|
||||
#define SC_FAULT_ADDR(sc) ({ \
|
||||
struct sigcontext *_sc = (sc); \
|
||||
long retval = -1; \
|
||||
switch (_sc->regs->trap) { \
|
||||
case 0x300: \
|
||||
/* data exception */ \
|
||||
retval = _sc->regs->dar; \
|
||||
break; \
|
||||
case 0x400: \
|
||||
/* instruction exception */ \
|
||||
retval = _sc->regs->nip; \
|
||||
break; \
|
||||
default: \
|
||||
panic("SC_FAULT_ADDR: unhandled trap type\n"); \
|
||||
} \
|
||||
retval; \
|
||||
})
|
||||
|
||||
#define SC_FAULT_WRITE(sc) ({ \
|
||||
struct sigcontext *_sc = (sc); \
|
||||
long retval = -1; \
|
||||
switch (_sc->regs->trap) { \
|
||||
case 0x300: \
|
||||
/* data exception */ \
|
||||
retval = !!(_sc->regs->dsisr & DSISR_WRITE); \
|
||||
break; \
|
||||
case 0x400: \
|
||||
/* instruction exception: not a write */ \
|
||||
retval = 0; \
|
||||
break; \
|
||||
default: \
|
||||
panic("SC_FAULT_ADDR: unhandled trap type\n"); \
|
||||
} \
|
||||
retval; \
|
||||
})
|
||||
|
||||
#define SC_IP(sc) ((sc)->regs->nip)
|
||||
#define SC_SP(sc) ((sc)->regs->gpr[1])
|
||||
#define SEGV_IS_FIXABLE(sc) (1)
|
||||
|
||||
#endif
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SYSDEP_PPC_SKAS_PTRACE_H
|
||||
#define __SYSDEP_PPC_SKAS_PTRACE_H
|
||||
|
||||
struct ptrace_faultinfo {
|
||||
int is_write;
|
||||
unsigned long addr;
|
||||
};
|
||||
|
||||
struct ptrace_ldt {
|
||||
int func;
|
||||
void *ptr;
|
||||
unsigned long bytecount;
|
||||
};
|
||||
|
||||
#define PTRACE_LDT 54
|
||||
|
||||
#endif
|
|
@ -1,43 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
typedef long syscall_handler_t(unsigned long arg1, unsigned long arg2,
|
||||
unsigned long arg3, unsigned long arg4,
|
||||
unsigned long arg5, unsigned long arg6);
|
||||
|
||||
#define EXECUTE_SYSCALL(syscall, regs) \
|
||||
(*sys_call_table[syscall])(UM_SYSCALL_ARG1(®s), \
|
||||
UM_SYSCALL_ARG2(®s), \
|
||||
UM_SYSCALL_ARG3(®s), \
|
||||
UM_SYSCALL_ARG4(®s), \
|
||||
UM_SYSCALL_ARG5(®s), \
|
||||
UM_SYSCALL_ARG6(®s))
|
||||
|
||||
extern syscall_handler_t sys_mincore;
|
||||
extern syscall_handler_t sys_madvise;
|
||||
|
||||
/* old_mmap needs the correct prototype since syscall_kern.c includes
|
||||
* this file.
|
||||
*/
|
||||
int old_mmap(unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, unsigned long offset);
|
||||
|
||||
#define ARCH_SYSCALLS \
|
||||
[ __NR_modify_ldt ] = sys_ni_syscall, \
|
||||
[ __NR_pciconfig_read ] = sys_ni_syscall, \
|
||||
[ __NR_pciconfig_write ] = sys_ni_syscall, \
|
||||
[ __NR_pciconfig_iobase ] = sys_ni_syscall, \
|
||||
[ __NR_pivot_root ] = sys_ni_syscall, \
|
||||
[ __NR_multiplexer ] = sys_ni_syscall, \
|
||||
[ __NR_mmap ] = old_mmap, \
|
||||
[ __NR_madvise ] = sys_madvise, \
|
||||
[ __NR_mincore ] = sys_mincore, \
|
||||
[ __NR_iopl ] = (syscall_handler_t *) sys_ni_syscall, \
|
||||
[ __NR_utimes ] = (syscall_handler_t *) sys_utimes, \
|
||||
[ __NR_fadvise64 ] = (syscall_handler_t *) sys_fadvise64,
|
||||
|
||||
#define LAST_ARCH_SYSCALL __NR_fadvise64
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
#include "asm/ptrace.h"
|
||||
#include "asm/sigcontext.h"
|
||||
#include <sysdep/ptrace.h>
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/smp.h>
|
||||
#include "asm/ptrace.h"
|
||||
#include "sysrq.h"
|
||||
|
||||
void show_regs(struct pt_regs_subarch *regs)
|
||||
{
|
||||
printk("\n");
|
||||
show_regs_print_info(KERN_DEFAULT);
|
||||
|
||||
printk("show_regs(): insert regs here.\n");
|
||||
#if 0
|
||||
printk("\n");
|
||||
printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs, regs->eip,
|
||||
smp_processor_id());
|
||||
if (regs->xcs & 3)
|
||||
printk(" ESP: %04x:%08lx",0xffff & regs->xss, regs->esp);
|
||||
printk(" EFLAGS: %08lx\n", regs->eflags);
|
||||
printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
|
||||
regs->eax, regs->ebx, regs->ecx, regs->edx);
|
||||
printk("ESI: %08lx EDI: %08lx EBP: %08lx",
|
||||
regs->esi, regs->edi, regs->ebp);
|
||||
printk(" DS: %04x ES: %04x\n",
|
||||
0xffff & regs->xds, 0xffff & regs->xes);
|
||||
#endif
|
||||
|
||||
show_trace(current, ®s->gpr[1]);
|
||||
}
|
|
@ -21,7 +21,6 @@ obj-$(CONFIG_BINFMT_ELF) += elfcore.o
|
|||
|
||||
subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
|
||||
subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o
|
||||
subarch-$(CONFIG_HIGHMEM) += ../mm/highmem_32.o
|
||||
|
||||
else
|
||||
|
||||
|
|
|
@ -36,22 +36,11 @@
|
|||
#endif /* CONFIG_X86_PPRO_FENCE */
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#define smp_mb() mb()
|
||||
#define smp_rmb() dma_rmb()
|
||||
#define smp_wmb() barrier()
|
||||
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#define set_mb(var, value) do { var = value; barrier(); } while (0)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
|
|||
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
|
||||
extern long elf_aux_hwcap;
|
||||
#define ELF_HWCAP (elf_aux_hwcap)
|
||||
|
|
|
@ -8,9 +8,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <os.h>
|
||||
#include <proc_mm.h>
|
||||
#include <skas.h>
|
||||
#include <skas_ptrace.h>
|
||||
#include <sysdep/tls.h>
|
||||
|
||||
extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
|
||||
|
@ -19,105 +17,20 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func,
|
|||
struct user_desc *desc, void **addr, int done)
|
||||
{
|
||||
long res;
|
||||
|
||||
if (proc_mm) {
|
||||
/*
|
||||
* This is a special handling for the case, that the mm to
|
||||
* modify isn't current->active_mm.
|
||||
* If this is called directly by modify_ldt,
|
||||
* (current->active_mm->context.skas.u == mm_idp)
|
||||
* will be true. So no call to __switch_mm(mm_idp) is done.
|
||||
* If this is called in case of init_new_ldt or PTRACE_LDT,
|
||||
* mm_idp won't belong to current->active_mm, but child->mm.
|
||||
* So we need to switch child's mm into our userspace, then
|
||||
* later switch back.
|
||||
*
|
||||
* Note: I'm unsure: should interrupts be disabled here?
|
||||
*/
|
||||
if (!current->active_mm || current->active_mm == &init_mm ||
|
||||
mm_idp != ¤t->active_mm->context.id)
|
||||
__switch_mm(mm_idp);
|
||||
void *stub_addr;
|
||||
res = syscall_stub_data(mm_idp, (unsigned long *)desc,
|
||||
(sizeof(*desc) + sizeof(long) - 1) &
|
||||
~(sizeof(long) - 1),
|
||||
addr, &stub_addr);
|
||||
if (!res) {
|
||||
unsigned long args[] = { func,
|
||||
(unsigned long)stub_addr,
|
||||
sizeof(*desc),
|
||||
0, 0, 0 };
|
||||
res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
|
||||
0, addr, done);
|
||||
}
|
||||
|
||||
if (ptrace_ldt) {
|
||||
struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
|
||||
.func = func,
|
||||
.ptr = desc,
|
||||
.bytecount = sizeof(*desc)};
|
||||
u32 cpu;
|
||||
int pid;
|
||||
|
||||
if (!proc_mm)
|
||||
pid = mm_idp->u.pid;
|
||||
else {
|
||||
cpu = get_cpu();
|
||||
pid = userspace_pid[cpu];
|
||||
}
|
||||
|
||||
res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
|
||||
|
||||
if (proc_mm)
|
||||
put_cpu();
|
||||
}
|
||||
else {
|
||||
void *stub_addr;
|
||||
res = syscall_stub_data(mm_idp, (unsigned long *)desc,
|
||||
(sizeof(*desc) + sizeof(long) - 1) &
|
||||
~(sizeof(long) - 1),
|
||||
addr, &stub_addr);
|
||||
if (!res) {
|
||||
unsigned long args[] = { func,
|
||||
(unsigned long)stub_addr,
|
||||
sizeof(*desc),
|
||||
0, 0, 0 };
|
||||
res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
|
||||
0, addr, done);
|
||||
}
|
||||
}
|
||||
|
||||
if (proc_mm) {
|
||||
/*
|
||||
* This is the second part of special handling, that makes
|
||||
* PTRACE_LDT possible to implement.
|
||||
*/
|
||||
if (current->active_mm && current->active_mm != &init_mm &&
|
||||
mm_idp != ¤t->active_mm->context.id)
|
||||
__switch_mm(¤t->active_mm->context.id);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
|
||||
{
|
||||
int res, n;
|
||||
struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
|
||||
.func = 0,
|
||||
.bytecount = bytecount,
|
||||
.ptr = kmalloc(bytecount, GFP_KERNEL)};
|
||||
u32 cpu;
|
||||
|
||||
if (ptrace_ldt.ptr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* This is called from sys_modify_ldt only, so userspace_pid gives
|
||||
* us the right number
|
||||
*/
|
||||
|
||||
cpu = get_cpu();
|
||||
res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
|
||||
put_cpu();
|
||||
if (res < 0)
|
||||
goto out;
|
||||
|
||||
n = copy_to_user(ptr, ptrace_ldt.ptr, res);
|
||||
if (n != 0)
|
||||
res = -EFAULT;
|
||||
|
||||
out:
|
||||
kfree(ptrace_ldt.ptr);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -145,9 +58,6 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
|
|||
bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
|
||||
err = bytecount;
|
||||
|
||||
if (ptrace_ldt)
|
||||
return read_ldt_from_host(ptr, bytecount);
|
||||
|
||||
mutex_lock(&ldt->lock);
|
||||
if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
|
||||
size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
|
||||
|
@ -229,17 +139,11 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!ptrace_ldt)
|
||||
mutex_lock(&ldt->lock);
|
||||
mutex_lock(&ldt->lock);
|
||||
|
||||
err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
else if (ptrace_ldt) {
|
||||
/* With PTRACE_LDT available, this is used as a flag only */
|
||||
ldt->entry_count = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ldt_info.entry_number >= ldt->entry_count &&
|
||||
ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
|
||||
|
@ -393,91 +297,56 @@ long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
|
|||
int i;
|
||||
long page, err=0;
|
||||
void *addr = NULL;
|
||||
struct proc_mm_op copy;
|
||||
|
||||
|
||||
if (!ptrace_ldt)
|
||||
mutex_init(&new_mm->arch.ldt.lock);
|
||||
mutex_init(&new_mm->arch.ldt.lock);
|
||||
|
||||
if (!from_mm) {
|
||||
memset(&desc, 0, sizeof(desc));
|
||||
/*
|
||||
* We have to initialize a clean ldt.
|
||||
* Now we try to retrieve info about the ldt, we
|
||||
* inherited from the host. All ldt-entries found
|
||||
* will be reset in the following loop
|
||||
*/
|
||||
if (proc_mm) {
|
||||
/*
|
||||
* If the new mm was created using proc_mm, host's
|
||||
* default-ldt currently is assigned, which normally
|
||||
* contains the call-gates for lcall7 and lcall27.
|
||||
* To remove these gates, we simply write an empty
|
||||
* entry as number 0 to the host.
|
||||
*/
|
||||
err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
|
||||
}
|
||||
else{
|
||||
/*
|
||||
* Now we try to retrieve info about the ldt, we
|
||||
* inherited from the host. All ldt-entries found
|
||||
* will be reset in the following loop
|
||||
*/
|
||||
ldt_get_host_info();
|
||||
for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
|
||||
desc.entry_number = *num_p;
|
||||
err = write_ldt_entry(&new_mm->id, 1, &desc,
|
||||
&addr, *(num_p + 1) == -1);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
ldt_get_host_info();
|
||||
for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
|
||||
desc.entry_number = *num_p;
|
||||
err = write_ldt_entry(&new_mm->id, 1, &desc,
|
||||
&addr, *(num_p + 1) == -1);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
new_mm->arch.ldt.entry_count = 0;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (proc_mm) {
|
||||
/*
|
||||
* We have a valid from_mm, so we now have to copy the LDT of
|
||||
* from_mm to new_mm, because using proc_mm an new mm with
|
||||
* an empty/default LDT was created in new_mm()
|
||||
*/
|
||||
copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
|
||||
.u =
|
||||
{ .copy_segments =
|
||||
from_mm->id.u.mm_fd } } );
|
||||
i = os_write_file(new_mm->id.u.mm_fd, ©, sizeof(copy));
|
||||
if (i != sizeof(copy))
|
||||
printk(KERN_ERR "new_mm : /proc/mm copy_segments "
|
||||
"failed, err = %d\n", -i);
|
||||
}
|
||||
|
||||
if (!ptrace_ldt) {
|
||||
/*
|
||||
* Our local LDT is used to supply the data for
|
||||
* modify_ldt(READLDT), if PTRACE_LDT isn't available,
|
||||
* i.e., we have to use the stub for modify_ldt, which
|
||||
* can't handle the big read buffer of up to 64kB.
|
||||
*/
|
||||
mutex_lock(&from_mm->arch.ldt.lock);
|
||||
if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
|
||||
memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
|
||||
sizeof(new_mm->arch.ldt.u.entries));
|
||||
else {
|
||||
i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
|
||||
while (i-->0) {
|
||||
page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||
if (!page) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
new_mm->arch.ldt.u.pages[i] =
|
||||
(struct ldt_entry *) page;
|
||||
memcpy(new_mm->arch.ldt.u.pages[i],
|
||||
from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
|
||||
/*
|
||||
* Our local LDT is used to supply the data for
|
||||
* modify_ldt(READLDT), if PTRACE_LDT isn't available,
|
||||
* i.e., we have to use the stub for modify_ldt, which
|
||||
* can't handle the big read buffer of up to 64kB.
|
||||
*/
|
||||
mutex_lock(&from_mm->arch.ldt.lock);
|
||||
if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
|
||||
memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
|
||||
sizeof(new_mm->arch.ldt.u.entries));
|
||||
else {
|
||||
i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
|
||||
while (i-->0) {
|
||||
page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||
if (!page) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
new_mm->arch.ldt.u.pages[i] =
|
||||
(struct ldt_entry *) page;
|
||||
memcpy(new_mm->arch.ldt.u.pages[i],
|
||||
from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
|
||||
}
|
||||
new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
|
||||
mutex_unlock(&from_mm->arch.ldt.lock);
|
||||
}
|
||||
new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
|
||||
mutex_unlock(&from_mm->arch.ldt.lock);
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
@ -488,7 +357,7 @@ void free_ldt(struct mm_context *mm)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!ptrace_ldt && mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
|
||||
if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
|
||||
i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
|
||||
while (i-- > 0)
|
||||
free_page((long) mm->arch.ldt.u.pages[i]);
|
||||
|
|
|
@ -27,9 +27,6 @@ struct faultinfo {
|
|||
/* This is Page Fault */
|
||||
#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
|
||||
|
||||
/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */
|
||||
#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo)
|
||||
|
||||
#define PTRACE_FULL_FAULTINFO 0
|
||||
|
||||
#endif
|
||||
|
|
|
@ -27,9 +27,6 @@ struct faultinfo {
|
|||
/* This is Page Fault */
|
||||
#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
|
||||
|
||||
/* No broken SKAS API, which doesn't pass trap_no, here. */
|
||||
#define SEGV_MAYBE_FIXABLE(fi) 0
|
||||
|
||||
#define PTRACE_FULL_FAULTINFO 1
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __SYSDEP_X86_SKAS_PTRACE_H
|
||||
#define __SYSDEP_X86_SKAS_PTRACE_H
|
||||
|
||||
struct ptrace_faultinfo {
|
||||
int is_write;
|
||||
unsigned long addr;
|
||||
};
|
||||
|
||||
struct ptrace_ldt {
|
||||
int func;
|
||||
void *ptr;
|
||||
unsigned long bytecount;
|
||||
};
|
||||
|
||||
#define PTRACE_LDT 54
|
||||
|
||||
#endif
|
|
@ -66,7 +66,8 @@ extern int stat_file(const char *path, struct hostfs_stat *p, int fd);
|
|||
extern int access_file(char *path, int r, int w, int x);
|
||||
extern int open_file(char *path, int r, int w, int append);
|
||||
extern void *open_dir(char *path, int *err_out);
|
||||
extern char *read_dir(void *stream, unsigned long long *pos,
|
||||
extern void seek_dir(void *stream, unsigned long long pos);
|
||||
extern char *read_dir(void *stream, unsigned long long *pos_out,
|
||||
unsigned long long *ino_out, int *len_out,
|
||||
unsigned int *type_out);
|
||||
extern void close_file(void *stream);
|
||||
|
@ -77,8 +78,7 @@ extern int write_file(int fd, unsigned long long *offset, const char *buf,
|
|||
int len);
|
||||
extern int lseek_file(int fd, long long offset, int whence);
|
||||
extern int fsync_file(int fd, int datasync);
|
||||
extern int file_create(char *name, int ur, int uw, int ux, int gr,
|
||||
int gw, int gx, int or, int ow, int ox);
|
||||
extern int file_create(char *name, int mode);
|
||||
extern int set_attr(const char *file, struct hostfs_iattr *attrs, int fd);
|
||||
extern int make_symlink(const char *from, const char *to);
|
||||
extern int unlink_file(const char *file);
|
||||
|
|
|
@ -24,6 +24,7 @@ struct hostfs_inode_info {
|
|||
int fd;
|
||||
fmode_t mode;
|
||||
struct inode vfs_inode;
|
||||
struct mutex open_mutex;
|
||||
};
|
||||
|
||||
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
|
||||
|
@ -92,16 +93,22 @@ static char *__dentry_name(struct dentry *dentry, char *name)
|
|||
__putname(name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function relies on the fact that dentry_path_raw() will place
|
||||
* the path name at the end of the provided buffer.
|
||||
*/
|
||||
BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
|
||||
|
||||
strlcpy(name, root, PATH_MAX);
|
||||
if (len > p - name) {
|
||||
__putname(name);
|
||||
return NULL;
|
||||
}
|
||||
if (p > name + len) {
|
||||
char *s = name + len;
|
||||
while ((*s++ = *p++) != '\0')
|
||||
;
|
||||
}
|
||||
|
||||
if (p > name + len)
|
||||
strcpy(name + len, p);
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
|
@ -135,21 +142,19 @@ static char *follow_link(char *link)
|
|||
int len, n;
|
||||
char *name, *resolved, *end;
|
||||
|
||||
len = 64;
|
||||
while (1) {
|
||||
name = __getname();
|
||||
if (!name) {
|
||||
n = -ENOMEM;
|
||||
name = kmalloc(len, GFP_KERNEL);
|
||||
if (name == NULL)
|
||||
goto out;
|
||||
|
||||
n = hostfs_do_readlink(link, name, len);
|
||||
if (n < len)
|
||||
break;
|
||||
len *= 2;
|
||||
kfree(name);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
n = hostfs_do_readlink(link, name, PATH_MAX);
|
||||
if (n < 0)
|
||||
goto out_free;
|
||||
else if (n == PATH_MAX) {
|
||||
n = -E2BIG;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (*name == '/')
|
||||
return name;
|
||||
|
@ -168,13 +173,12 @@ static char *follow_link(char *link)
|
|||
}
|
||||
|
||||
sprintf(resolved, "%s%s", link, name);
|
||||
kfree(name);
|
||||
__putname(name);
|
||||
kfree(link);
|
||||
return resolved;
|
||||
|
||||
out_free:
|
||||
kfree(name);
|
||||
out:
|
||||
__putname(name);
|
||||
return ERR_PTR(n);
|
||||
}
|
||||
|
||||
|
@ -225,6 +229,7 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb)
|
|||
hi->fd = -1;
|
||||
hi->mode = 0;
|
||||
inode_init_once(&hi->vfs_inode);
|
||||
mutex_init(&hi->open_mutex);
|
||||
return &hi->vfs_inode;
|
||||
}
|
||||
|
||||
|
@ -257,6 +262,9 @@ static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
|
|||
if (strlen(root_path) > offset)
|
||||
seq_printf(seq, ",%s", root_path + offset);
|
||||
|
||||
if (append)
|
||||
seq_puts(seq, ",append");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -284,6 +292,7 @@ static int hostfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
if (dir == NULL)
|
||||
return -error;
|
||||
next = ctx->pos;
|
||||
seek_dir(dir, next);
|
||||
while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) {
|
||||
if (!dir_emit(ctx, name, len, ino, type))
|
||||
break;
|
||||
|
@ -293,13 +302,12 @@ static int hostfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hostfs_file_open(struct inode *ino, struct file *file)
|
||||
static int hostfs_open(struct inode *ino, struct file *file)
|
||||
{
|
||||
static DEFINE_MUTEX(open_mutex);
|
||||
char *name;
|
||||
fmode_t mode = 0;
|
||||
fmode_t mode;
|
||||
int err;
|
||||
int r = 0, w = 0, fd;
|
||||
int r, w, fd;
|
||||
|
||||
mode = file->f_mode & (FMODE_READ | FMODE_WRITE);
|
||||
if ((mode & HOSTFS_I(ino)->mode) == mode)
|
||||
|
@ -308,12 +316,12 @@ static int hostfs_file_open(struct inode *ino, struct file *file)
|
|||
mode |= HOSTFS_I(ino)->mode;
|
||||
|
||||
retry:
|
||||
r = w = 0;
|
||||
|
||||
if (mode & FMODE_READ)
|
||||
r = 1;
|
||||
if (mode & FMODE_WRITE)
|
||||
w = 1;
|
||||
if (w)
|
||||
r = 1;
|
||||
r = w = 1;
|
||||
|
||||
name = dentry_name(file->f_path.dentry);
|
||||
if (name == NULL)
|
||||
|
@ -324,15 +332,16 @@ retry:
|
|||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
mutex_lock(&open_mutex);
|
||||
mutex_lock(&HOSTFS_I(ino)->open_mutex);
|
||||
/* somebody else had handled it first? */
|
||||
if ((mode & HOSTFS_I(ino)->mode) == mode) {
|
||||
mutex_unlock(&open_mutex);
|
||||
mutex_unlock(&HOSTFS_I(ino)->open_mutex);
|
||||
close_file(&fd);
|
||||
return 0;
|
||||
}
|
||||
if ((mode | HOSTFS_I(ino)->mode) != mode) {
|
||||
mode |= HOSTFS_I(ino)->mode;
|
||||
mutex_unlock(&open_mutex);
|
||||
mutex_unlock(&HOSTFS_I(ino)->open_mutex);
|
||||
close_file(&fd);
|
||||
goto retry;
|
||||
}
|
||||
|
@ -342,12 +351,12 @@ retry:
|
|||
err = replace_file(fd, HOSTFS_I(ino)->fd);
|
||||
close_file(&fd);
|
||||
if (err < 0) {
|
||||
mutex_unlock(&open_mutex);
|
||||
mutex_unlock(&HOSTFS_I(ino)->open_mutex);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
HOSTFS_I(ino)->mode = mode;
|
||||
mutex_unlock(&open_mutex);
|
||||
mutex_unlock(&HOSTFS_I(ino)->open_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -382,7 +391,7 @@ static const struct file_operations hostfs_file_fops = {
|
|||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.open = hostfs_file_open,
|
||||
.open = hostfs_open,
|
||||
.release = hostfs_file_release,
|
||||
.fsync = hostfs_fsync,
|
||||
};
|
||||
|
@ -391,6 +400,8 @@ static const struct file_operations hostfs_dir_fops = {
|
|||
.llseek = generic_file_llseek,
|
||||
.iterate = hostfs_readdir,
|
||||
.read = generic_read_dir,
|
||||
.open = hostfs_open,
|
||||
.fsync = hostfs_fsync,
|
||||
};
|
||||
|
||||
static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
|
||||
|
@ -398,7 +409,7 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
|
|||
struct address_space *mapping = page->mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
char *buffer;
|
||||
unsigned long long base;
|
||||
loff_t base = page_offset(page);
|
||||
int count = PAGE_CACHE_SIZE;
|
||||
int end_index = inode->i_size >> PAGE_CACHE_SHIFT;
|
||||
int err;
|
||||
|
@ -407,7 +418,6 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
|
|||
count = inode->i_size & (PAGE_CACHE_SIZE-1);
|
||||
|
||||
buffer = kmap(page);
|
||||
base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT;
|
||||
|
||||
err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count);
|
||||
if (err != count) {
|
||||
|
@ -432,26 +442,29 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
|
|||
static int hostfs_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
char *buffer;
|
||||
long long start;
|
||||
int err = 0;
|
||||
loff_t start = page_offset(page);
|
||||
int bytes_read, ret = 0;
|
||||
|
||||
start = (long long) page->index << PAGE_CACHE_SHIFT;
|
||||
buffer = kmap(page);
|
||||
err = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
|
||||
bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
|
||||
PAGE_CACHE_SIZE);
|
||||
if (err < 0)
|
||||
if (bytes_read < 0) {
|
||||
ClearPageUptodate(page);
|
||||
SetPageError(page);
|
||||
ret = bytes_read;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&buffer[err], 0, PAGE_CACHE_SIZE - err);
|
||||
memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read);
|
||||
|
||||
flush_dcache_page(page);
|
||||
ClearPageError(page);
|
||||
SetPageUptodate(page);
|
||||
if (PageError(page)) ClearPageError(page);
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
flush_dcache_page(page);
|
||||
kunmap(page);
|
||||
unlock_page(page);
|
||||
return err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hostfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
@ -528,11 +541,13 @@ static int read_name(struct inode *ino, char *name)
|
|||
init_special_inode(ino, st.mode & S_IFMT, rdev);
|
||||
ino->i_op = &hostfs_iops;
|
||||
break;
|
||||
|
||||
default:
|
||||
case S_IFREG:
|
||||
ino->i_op = &hostfs_iops;
|
||||
ino->i_fop = &hostfs_file_fops;
|
||||
ino->i_mapping->a_ops = &hostfs_aops;
|
||||
break;
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ino->i_ino = st.ino;
|
||||
|
@ -566,10 +581,7 @@ static int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
|
|||
if (name == NULL)
|
||||
goto out_put;
|
||||
|
||||
fd = file_create(name,
|
||||
mode & S_IRUSR, mode & S_IWUSR, mode & S_IXUSR,
|
||||
mode & S_IRGRP, mode & S_IWGRP, mode & S_IXGRP,
|
||||
mode & S_IROTH, mode & S_IWOTH, mode & S_IXOTH);
|
||||
fd = file_create(name, mode & S_IFMT);
|
||||
if (fd < 0)
|
||||
error = fd;
|
||||
else
|
||||
|
|
|
@ -97,21 +97,27 @@ void *open_dir(char *path, int *err_out)
|
|||
return dir;
|
||||
}
|
||||
|
||||
char *read_dir(void *stream, unsigned long long *pos,
|
||||
void seek_dir(void *stream, unsigned long long pos)
|
||||
{
|
||||
DIR *dir = stream;
|
||||
|
||||
seekdir(dir, pos);
|
||||
}
|
||||
|
||||
char *read_dir(void *stream, unsigned long long *pos_out,
|
||||
unsigned long long *ino_out, int *len_out,
|
||||
unsigned int *type_out)
|
||||
{
|
||||
DIR *dir = stream;
|
||||
struct dirent *ent;
|
||||
|
||||
seekdir(dir, *pos);
|
||||
ent = readdir(dir);
|
||||
if (ent == NULL)
|
||||
return NULL;
|
||||
*len_out = strlen(ent->d_name);
|
||||
*ino_out = ent->d_ino;
|
||||
*type_out = ent->d_type;
|
||||
*pos = telldir(dir);
|
||||
*pos_out = ent->d_off;
|
||||
return ent->d_name;
|
||||
}
|
||||
|
||||
|
@ -175,21 +181,10 @@ void close_dir(void *stream)
|
|||
closedir(stream);
|
||||
}
|
||||
|
||||
int file_create(char *name, int ur, int uw, int ux, int gr,
|
||||
int gw, int gx, int or, int ow, int ox)
|
||||
int file_create(char *name, int mode)
|
||||
{
|
||||
int mode, fd;
|
||||
int fd;
|
||||
|
||||
mode = 0;
|
||||
mode |= ur ? S_IRUSR : 0;
|
||||
mode |= uw ? S_IWUSR : 0;
|
||||
mode |= ux ? S_IXUSR : 0;
|
||||
mode |= gr ? S_IRGRP : 0;
|
||||
mode |= gw ? S_IWGRP : 0;
|
||||
mode |= gx ? S_IXGRP : 0;
|
||||
mode |= or ? S_IROTH : 0;
|
||||
mode |= ow ? S_IWOTH : 0;
|
||||
mode |= ox ? S_IXOTH : 0;
|
||||
fd = open64(name, O_CREAT | O_RDWR, mode);
|
||||
if (fd < 0)
|
||||
return -errno;
|
||||
|
|
Loading…
Reference in New Issue