powerpc: Introduce entry_{32,64}.S, misc_{32,64}.S, systbl.S
The system call table has been consolidated into systbl.S. We have separate 32-bit and 64-bit versions of entry.S and misc.S since the code is mostly sufficiently different to be not worth merging. There are some common bits that will be extracted in future. Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
06d67d5474
commit
9994a33865
|
@ -119,10 +119,9 @@ head-$(CONFIG_4xx) := arch/powerpc/kernel/head_4xx.o
|
|||
head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
|
||||
head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
|
||||
|
||||
ifeq ($(CONFIG_PPC32),y)
|
||||
head-$(CONFIG_6xx) += arch/powerpc/kernel/idle_6xx.o
|
||||
head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
|
||||
head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
|
||||
endif
|
||||
|
||||
core-y += arch/powerpc/kernel/ \
|
||||
arch/$(OLDARCH)/kernel/ \
|
||||
|
|
|
@ -17,12 +17,14 @@ extra-$(CONFIG_44x) := head_44x.o
|
|||
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
|
||||
extra-$(CONFIG_8xx) := head_8xx.o
|
||||
extra-$(CONFIG_6xx) += idle_6xx.o
|
||||
extra-$(CONFIG_PPC64) += entry_64.o
|
||||
extra-$(CONFIG_PPC_FPU) += fpu.o
|
||||
extra-y += vmlinux.lds
|
||||
|
||||
obj-y := traps.o prom.o semaphore.o
|
||||
obj-$(CONFIG_PPC32) += setup_32.o process.o
|
||||
obj-y += traps.o prom.o semaphore.o
|
||||
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o
|
||||
obj-$(CONFIG_PPC64) += idle_power4.o
|
||||
obj-$(CONFIG_PPC64) += misc_64.o
|
||||
ifeq ($(CONFIG_PPC32),y)
|
||||
obj-$(CONFIG_PPC_OF) += prom_init.o of_device.o
|
||||
obj-$(CONFIG_MODULES) += ppc_ksyms.o
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,842 @@
|
|||
/*
|
||||
* arch/ppc64/kernel/entry.S
|
||||
*
|
||||
* PowerPC version
|
||||
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
||||
* Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
|
||||
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
|
||||
* Adapted for Power Macintosh by Paul Mackerras.
|
||||
* Low-level exception handlers and MMU support
|
||||
* rewritten by Paul Mackerras.
|
||||
* Copyright (C) 1996 Paul Mackerras.
|
||||
* MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
|
||||
*
|
||||
* This file contains the system call entry code, context switch
|
||||
* code, and exception/interrupt return code for PowerPC.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
#define DO_SOFT_DISABLE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* System calls.
|
||||
*/
|
||||
.section ".toc","aw"
|
||||
.SYS_CALL_TABLE:
|
||||
.tc .sys_call_table[TC],.sys_call_table
|
||||
|
||||
/* This value is used to mark exception frames on the stack. */
|
||||
exception_marker:
|
||||
.tc ID_72656773_68657265[TC],0x7265677368657265
|
||||
|
||||
.section ".text"
|
||||
.align 7
|
||||
|
||||
#undef SHOW_SYSCALLS
|
||||
|
||||
.globl system_call_common
|
||||
system_call_common:
|
||||
andi. r10,r12,MSR_PR
|
||||
mr r10,r1
|
||||
addi r1,r1,-INT_FRAME_SIZE
|
||||
beq- 1f
|
||||
ld r1,PACAKSAVE(r13)
|
||||
1: std r10,0(r1)
|
||||
std r11,_NIP(r1)
|
||||
std r12,_MSR(r1)
|
||||
std r0,GPR0(r1)
|
||||
std r10,GPR1(r1)
|
||||
std r2,GPR2(r1)
|
||||
std r3,GPR3(r1)
|
||||
std r4,GPR4(r1)
|
||||
std r5,GPR5(r1)
|
||||
std r6,GPR6(r1)
|
||||
std r7,GPR7(r1)
|
||||
std r8,GPR8(r1)
|
||||
li r11,0
|
||||
std r11,GPR9(r1)
|
||||
std r11,GPR10(r1)
|
||||
std r11,GPR11(r1)
|
||||
std r11,GPR12(r1)
|
||||
std r9,GPR13(r1)
|
||||
crclr so
|
||||
mfcr r9
|
||||
mflr r10
|
||||
li r11,0xc01
|
||||
std r9,_CCR(r1)
|
||||
std r10,_LINK(r1)
|
||||
std r11,_TRAP(r1)
|
||||
mfxer r9
|
||||
mfctr r10
|
||||
std r9,_XER(r1)
|
||||
std r10,_CTR(r1)
|
||||
std r3,ORIG_GPR3(r1)
|
||||
ld r2,PACATOC(r13)
|
||||
addi r9,r1,STACK_FRAME_OVERHEAD
|
||||
ld r11,exception_marker@toc(r2)
|
||||
std r11,-16(r9) /* "regshere" marker */
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
/* Hack for handling interrupts when soft-enabling on iSeries */
|
||||
cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
|
||||
andi. r10,r12,MSR_PR /* from kernel */
|
||||
crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
|
||||
beq hardware_interrupt_entry
|
||||
lbz r10,PACAPROCENABLED(r13)
|
||||
std r10,SOFTE(r1)
|
||||
#endif
|
||||
mfmsr r11
|
||||
ori r11,r11,MSR_EE
|
||||
mtmsrd r11,1
|
||||
|
||||
#ifdef SHOW_SYSCALLS
|
||||
bl .do_show_syscall
|
||||
REST_GPR(0,r1)
|
||||
REST_4GPRS(3,r1)
|
||||
REST_2GPRS(7,r1)
|
||||
addi r9,r1,STACK_FRAME_OVERHEAD
|
||||
#endif
|
||||
clrrdi r11,r1,THREAD_SHIFT
|
||||
li r12,0
|
||||
ld r10,TI_FLAGS(r11)
|
||||
stb r12,TI_SC_NOERR(r11)
|
||||
andi. r11,r10,_TIF_SYSCALL_T_OR_A
|
||||
bne- syscall_dotrace
|
||||
syscall_dotrace_cont:
|
||||
cmpldi 0,r0,NR_syscalls
|
||||
bge- syscall_enosys
|
||||
|
||||
system_call: /* label this so stack traces look sane */
|
||||
/*
|
||||
* Need to vector to 32 Bit or default sys_call_table here,
|
||||
* based on caller's run-mode / personality.
|
||||
*/
|
||||
ld r11,.SYS_CALL_TABLE@toc(2)
|
||||
andi. r10,r10,_TIF_32BIT
|
||||
beq 15f
|
||||
addi r11,r11,8 /* use 32-bit syscall entries */
|
||||
clrldi r3,r3,32
|
||||
clrldi r4,r4,32
|
||||
clrldi r5,r5,32
|
||||
clrldi r6,r6,32
|
||||
clrldi r7,r7,32
|
||||
clrldi r8,r8,32
|
||||
15:
|
||||
slwi r0,r0,4
|
||||
ldx r10,r11,r0 /* Fetch system call handler [ptr] */
|
||||
mtctr r10
|
||||
bctrl /* Call handler */
|
||||
|
||||
syscall_exit:
|
||||
#ifdef SHOW_SYSCALLS
|
||||
std r3,GPR3(r1)
|
||||
bl .do_show_syscall_exit
|
||||
ld r3,GPR3(r1)
|
||||
#endif
|
||||
std r3,RESULT(r1)
|
||||
ld r5,_CCR(r1)
|
||||
li r10,-_LAST_ERRNO
|
||||
cmpld r3,r10
|
||||
clrrdi r12,r1,THREAD_SHIFT
|
||||
bge- syscall_error
|
||||
syscall_error_cont:
|
||||
|
||||
/* check for syscall tracing or audit */
|
||||
ld r9,TI_FLAGS(r12)
|
||||
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
|
||||
bne- syscall_exit_trace
|
||||
syscall_exit_trace_cont:
|
||||
|
||||
/* disable interrupts so current_thread_info()->flags can't change,
|
||||
and so that we don't get interrupted after loading SRR0/1. */
|
||||
ld r8,_MSR(r1)
|
||||
andi. r10,r8,MSR_RI
|
||||
beq- unrecov_restore
|
||||
mfmsr r10
|
||||
rldicl r10,r10,48,1
|
||||
rotldi r10,r10,16
|
||||
mtmsrd r10,1
|
||||
ld r9,TI_FLAGS(r12)
|
||||
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
|
||||
bne- syscall_exit_work
|
||||
ld r7,_NIP(r1)
|
||||
stdcx. r0,0,r1 /* to clear the reservation */
|
||||
andi. r6,r8,MSR_PR
|
||||
ld r4,_LINK(r1)
|
||||
beq- 1f /* only restore r13 if */
|
||||
ld r13,GPR13(r1) /* returning to usermode */
|
||||
1: ld r2,GPR2(r1)
|
||||
li r12,MSR_RI
|
||||
andc r10,r10,r12
|
||||
mtmsrd r10,1 /* clear MSR.RI */
|
||||
ld r1,GPR1(r1)
|
||||
mtlr r4
|
||||
mtcr r5
|
||||
mtspr SPRN_SRR0,r7
|
||||
mtspr SPRN_SRR1,r8
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
syscall_enosys:
|
||||
li r3,-ENOSYS
|
||||
std r3,RESULT(r1)
|
||||
clrrdi r12,r1,THREAD_SHIFT
|
||||
ld r5,_CCR(r1)
|
||||
|
||||
syscall_error:
|
||||
lbz r11,TI_SC_NOERR(r12)
|
||||
cmpwi 0,r11,0
|
||||
bne- syscall_error_cont
|
||||
neg r3,r3
|
||||
oris r5,r5,0x1000 /* Set SO bit in CR */
|
||||
std r5,_CCR(r1)
|
||||
b syscall_error_cont
|
||||
|
||||
/* Traced system call support */
|
||||
syscall_dotrace:
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_syscall_trace_enter
|
||||
ld r0,GPR0(r1) /* Restore original registers */
|
||||
ld r3,GPR3(r1)
|
||||
ld r4,GPR4(r1)
|
||||
ld r5,GPR5(r1)
|
||||
ld r6,GPR6(r1)
|
||||
ld r7,GPR7(r1)
|
||||
ld r8,GPR8(r1)
|
||||
addi r9,r1,STACK_FRAME_OVERHEAD
|
||||
clrrdi r10,r1,THREAD_SHIFT
|
||||
ld r10,TI_FLAGS(r10)
|
||||
b syscall_dotrace_cont
|
||||
|
||||
syscall_exit_trace:
|
||||
std r3,GPR3(r1)
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_syscall_trace_leave
|
||||
REST_NVGPRS(r1)
|
||||
ld r3,GPR3(r1)
|
||||
ld r5,_CCR(r1)
|
||||
clrrdi r12,r1,THREAD_SHIFT
|
||||
b syscall_exit_trace_cont
|
||||
|
||||
/* Stuff to do on exit from a system call. */
|
||||
syscall_exit_work:
|
||||
std r3,GPR3(r1)
|
||||
std r5,_CCR(r1)
|
||||
b .ret_from_except_lite
|
||||
|
||||
/* Save non-volatile GPRs, if not already saved. */
|
||||
_GLOBAL(save_nvgprs)
|
||||
ld r11,_TRAP(r1)
|
||||
andi. r0,r11,1
|
||||
beqlr-
|
||||
SAVE_NVGPRS(r1)
|
||||
clrrdi r0,r11,1
|
||||
std r0,_TRAP(r1)
|
||||
blr
|
||||
|
||||
/*
|
||||
* The sigsuspend and rt_sigsuspend system calls can call do_signal
|
||||
* and thus put the process into the stopped state where we might
|
||||
* want to examine its user state with ptrace. Therefore we need
|
||||
* to save all the nonvolatile registers (r14 - r31) before calling
|
||||
* the C code. Similarly, fork, vfork and clone need the full
|
||||
* register state on the stack so that it can be copied to the child.
|
||||
*/
|
||||
_GLOBAL(ppc32_sigsuspend)
|
||||
bl .save_nvgprs
|
||||
bl .sys32_sigsuspend
|
||||
b 70f
|
||||
|
||||
_GLOBAL(ppc64_rt_sigsuspend)
|
||||
bl .save_nvgprs
|
||||
bl .sys_rt_sigsuspend
|
||||
b 70f
|
||||
|
||||
_GLOBAL(ppc32_rt_sigsuspend)
|
||||
bl .save_nvgprs
|
||||
bl .sys32_rt_sigsuspend
|
||||
70: cmpdi 0,r3,0
|
||||
/* If it returned an error, we need to return via syscall_exit to set
|
||||
the SO bit in cr0 and potentially stop for ptrace. */
|
||||
bne syscall_exit
|
||||
/* If sigsuspend() returns zero, we are going into a signal handler. We
|
||||
may need to call audit_syscall_exit() to mark the exit from sigsuspend() */
|
||||
#ifdef CONFIG_AUDIT
|
||||
ld r3,PACACURRENT(r13)
|
||||
ld r4,AUDITCONTEXT(r3)
|
||||
cmpdi 0,r4,0
|
||||
beq .ret_from_except /* No audit_context: Leave immediately. */
|
||||
li r4, 2 /* AUDITSC_FAILURE */
|
||||
li r5,-4 /* It's always -EINTR */
|
||||
bl .audit_syscall_exit
|
||||
#endif
|
||||
b .ret_from_except
|
||||
|
||||
_GLOBAL(ppc_fork)
|
||||
bl .save_nvgprs
|
||||
bl .sys_fork
|
||||
b syscall_exit
|
||||
|
||||
_GLOBAL(ppc_vfork)
|
||||
bl .save_nvgprs
|
||||
bl .sys_vfork
|
||||
b syscall_exit
|
||||
|
||||
_GLOBAL(ppc_clone)
|
||||
bl .save_nvgprs
|
||||
bl .sys_clone
|
||||
b syscall_exit
|
||||
|
||||
_GLOBAL(ppc32_swapcontext)
|
||||
bl .save_nvgprs
|
||||
bl .sys32_swapcontext
|
||||
b 80f
|
||||
|
||||
_GLOBAL(ppc64_swapcontext)
|
||||
bl .save_nvgprs
|
||||
bl .sys_swapcontext
|
||||
b 80f
|
||||
|
||||
_GLOBAL(ppc32_sigreturn)
|
||||
bl .sys32_sigreturn
|
||||
b 80f
|
||||
|
||||
_GLOBAL(ppc32_rt_sigreturn)
|
||||
bl .sys32_rt_sigreturn
|
||||
b 80f
|
||||
|
||||
_GLOBAL(ppc64_rt_sigreturn)
|
||||
bl .sys_rt_sigreturn
|
||||
|
||||
80: cmpdi 0,r3,0
|
||||
blt syscall_exit
|
||||
clrrdi r4,r1,THREAD_SHIFT
|
||||
ld r4,TI_FLAGS(r4)
|
||||
andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
|
||||
beq+ 81f
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_syscall_trace_leave
|
||||
81: b .ret_from_except
|
||||
|
||||
_GLOBAL(ret_from_fork)
|
||||
bl .schedule_tail
|
||||
REST_NVGPRS(r1)
|
||||
li r3,0
|
||||
b syscall_exit
|
||||
|
||||
/*
|
||||
* This routine switches between two different tasks. The process
|
||||
* state of one is saved on its kernel stack. Then the state
|
||||
* of the other is restored from its kernel stack. The memory
|
||||
* management hardware is updated to the second process's state.
|
||||
* Finally, we can return to the second process, via ret_from_except.
|
||||
* On entry, r3 points to the THREAD for the current task, r4
|
||||
* points to the THREAD for the new task.
|
||||
*
|
||||
* Note: there are two ways to get to the "going out" portion
|
||||
* of this code; either by coming in via the entry (_switch)
|
||||
* or via "fork" which must set up an environment equivalent
|
||||
* to the "_switch" path. If you change this you'll have to change
|
||||
* the fork code also.
|
||||
*
|
||||
* The code which creates the new task context is in 'copy_thread'
|
||||
* in arch/ppc64/kernel/process.c
|
||||
*/
|
||||
.align 7
|
||||
_GLOBAL(_switch)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
stdu r1,-SWITCH_FRAME_SIZE(r1)
|
||||
/* r3-r13 are caller saved -- Cort */
|
||||
SAVE_8GPRS(14, r1)
|
||||
SAVE_10GPRS(22, r1)
|
||||
mflr r20 /* Return to switch caller */
|
||||
mfmsr r22
|
||||
li r0, MSR_FP
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
BEGIN_FTR_SECTION
|
||||
oris r0,r0,MSR_VEC@h /* Disable altivec */
|
||||
mfspr r24,SPRN_VRSAVE /* save vrsave register value */
|
||||
std r24,THREAD_VRSAVE(r3)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
and. r0,r0,r22
|
||||
beq+ 1f
|
||||
andc r22,r22,r0
|
||||
mtmsrd r22
|
||||
isync
|
||||
1: std r20,_NIP(r1)
|
||||
mfcr r23
|
||||
std r23,_CCR(r1)
|
||||
std r1,KSP(r3) /* Set old stack pointer */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* We need a sync somewhere here to make sure that if the
|
||||
* previous task gets rescheduled on another CPU, it sees all
|
||||
* stores it has performed on this one.
|
||||
*/
|
||||
sync
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
addi r6,r4,-THREAD /* Convert THREAD to 'current' */
|
||||
std r6,PACACURRENT(r13) /* Set new 'current' */
|
||||
|
||||
ld r8,KSP(r4) /* new stack pointer */
|
||||
BEGIN_FTR_SECTION
|
||||
clrrdi r6,r8,28 /* get its ESID */
|
||||
clrrdi r9,r1,28 /* get current sp ESID */
|
||||
clrldi. r0,r6,2 /* is new ESID c00000000? */
|
||||
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
|
||||
cror eq,4*cr1+eq,eq
|
||||
beq 2f /* if yes, don't slbie it */
|
||||
|
||||
/* Bolt in the new stack SLB entry */
|
||||
ld r7,KSP_VSID(r4) /* Get new stack's VSID */
|
||||
oris r0,r6,(SLB_ESID_V)@h
|
||||
ori r0,r0,(SLB_NUM_BOLTED-1)@l
|
||||
slbie r6
|
||||
slbie r6 /* Workaround POWER5 < DD2.1 issue */
|
||||
slbmte r7,r0
|
||||
isync
|
||||
|
||||
2:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_SLB)
|
||||
clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
|
||||
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
|
||||
because we don't need to leave the 288-byte ABI gap at the
|
||||
top of the kernel stack. */
|
||||
addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
|
||||
|
||||
mr r1,r8 /* start using new stack pointer */
|
||||
std r7,PACAKSAVE(r13)
|
||||
|
||||
ld r6,_CCR(r1)
|
||||
mtcrf 0xFF,r6
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
BEGIN_FTR_SECTION
|
||||
ld r0,THREAD_VRSAVE(r4)
|
||||
mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
/* r3-r13 are destroyed -- Cort */
|
||||
REST_8GPRS(14, r1)
|
||||
REST_10GPRS(22, r1)
|
||||
|
||||
/* convert old thread to its task_struct for return value */
|
||||
addi r3,r3,-THREAD
|
||||
ld r7,_NIP(r1) /* Return to _switch caller in new task */
|
||||
mtlr r7
|
||||
addi r1,r1,SWITCH_FRAME_SIZE
|
||||
blr
|
||||
|
||||
.align 7
|
||||
_GLOBAL(ret_from_except)
|
||||
ld r11,_TRAP(r1)
|
||||
andi. r0,r11,1
|
||||
bne .ret_from_except_lite
|
||||
REST_NVGPRS(r1)
|
||||
|
||||
_GLOBAL(ret_from_except_lite)
|
||||
/*
|
||||
* Disable interrupts so that current_thread_info()->flags
|
||||
* can't change between when we test it and when we return
|
||||
* from the interrupt.
|
||||
*/
|
||||
mfmsr r10 /* Get current interrupt state */
|
||||
rldicl r9,r10,48,1 /* clear MSR_EE */
|
||||
rotldi r9,r9,16
|
||||
mtmsrd r9,1 /* Update machine state */
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
|
||||
li r0,_TIF_NEED_RESCHED /* bits to check */
|
||||
ld r3,_MSR(r1)
|
||||
ld r4,TI_FLAGS(r9)
|
||||
/* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
|
||||
rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
|
||||
and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
|
||||
bne do_work
|
||||
|
||||
#else /* !CONFIG_PREEMPT */
|
||||
ld r3,_MSR(r1) /* Returning to user mode? */
|
||||
andi. r3,r3,MSR_PR
|
||||
beq restore /* if not, just restore regs and return */
|
||||
|
||||
/* Check current_thread_info()->flags */
|
||||
clrrdi r9,r1,THREAD_SHIFT
|
||||
ld r4,TI_FLAGS(r9)
|
||||
andi. r0,r4,_TIF_USER_WORK_MASK
|
||||
bne do_work
|
||||
#endif
|
||||
|
||||
restore:
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
ld r5,SOFTE(r1)
|
||||
cmpdi 0,r5,0
|
||||
beq 4f
|
||||
/* Check for pending interrupts (iSeries) */
|
||||
ld r3,PACALPPACA+LPPACAANYINT(r13)
|
||||
cmpdi r3,0
|
||||
beq+ 4f /* skip do_IRQ if no interrupts */
|
||||
|
||||
li r3,0
|
||||
stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
|
||||
ori r10,r10,MSR_EE
|
||||
mtmsrd r10 /* hard-enable again */
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_IRQ
|
||||
b .ret_from_except_lite /* loop back and handle more */
|
||||
|
||||
4: stb r5,PACAPROCENABLED(r13)
|
||||
#endif
|
||||
|
||||
ld r3,_MSR(r1)
|
||||
andi. r0,r3,MSR_RI
|
||||
beq- unrecov_restore
|
||||
|
||||
andi. r0,r3,MSR_PR
|
||||
|
||||
/*
|
||||
* r13 is our per cpu area, only restore it if we are returning to
|
||||
* userspace
|
||||
*/
|
||||
beq 1f
|
||||
REST_GPR(13, r1)
|
||||
1:
|
||||
ld r3,_CTR(r1)
|
||||
ld r0,_LINK(r1)
|
||||
mtctr r3
|
||||
mtlr r0
|
||||
ld r3,_XER(r1)
|
||||
mtspr SPRN_XER,r3
|
||||
|
||||
REST_8GPRS(5, r1)
|
||||
|
||||
stdcx. r0,0,r1 /* to clear the reservation */
|
||||
|
||||
mfmsr r0
|
||||
li r2, MSR_RI
|
||||
andc r0,r0,r2
|
||||
mtmsrd r0,1
|
||||
|
||||
ld r0,_MSR(r1)
|
||||
mtspr SPRN_SRR1,r0
|
||||
|
||||
ld r2,_CCR(r1)
|
||||
mtcrf 0xFF,r2
|
||||
ld r2,_NIP(r1)
|
||||
mtspr SPRN_SRR0,r2
|
||||
|
||||
ld r0,GPR0(r1)
|
||||
ld r2,GPR2(r1)
|
||||
ld r3,GPR3(r1)
|
||||
ld r4,GPR4(r1)
|
||||
ld r1,GPR1(r1)
|
||||
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
|
||||
do_work:
|
||||
#ifdef CONFIG_PREEMPT
|
||||
andi. r0,r3,MSR_PR /* Returning to user mode? */
|
||||
bne user_work
|
||||
/* Check that preempt_count() == 0 and interrupts are enabled */
|
||||
lwz r8,TI_PREEMPT(r9)
|
||||
cmpwi cr1,r8,0
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
ld r0,SOFTE(r1)
|
||||
cmpdi r0,0
|
||||
#else
|
||||
andi. r0,r3,MSR_EE
|
||||
#endif
|
||||
crandc eq,cr1*4+eq,eq
|
||||
bne restore
|
||||
/* here we are preempting the current task */
|
||||
1:
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
li r0,1
|
||||
stb r0,PACAPROCENABLED(r13)
|
||||
#endif
|
||||
ori r10,r10,MSR_EE
|
||||
mtmsrd r10,1 /* reenable interrupts */
|
||||
bl .preempt_schedule
|
||||
mfmsr r10
|
||||
clrrdi r9,r1,THREAD_SHIFT
|
||||
rldicl r10,r10,48,1 /* disable interrupts again */
|
||||
rotldi r10,r10,16
|
||||
mtmsrd r10,1
|
||||
ld r4,TI_FLAGS(r9)
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
bne 1b
|
||||
b restore
|
||||
|
||||
user_work:
|
||||
#endif
|
||||
/* Enable interrupts */
|
||||
ori r10,r10,MSR_EE
|
||||
mtmsrd r10,1
|
||||
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 1f
|
||||
bl .schedule
|
||||
b .ret_from_except_lite
|
||||
|
||||
1: bl .save_nvgprs
|
||||
li r3,0
|
||||
addi r4,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_signal
|
||||
b .ret_from_except
|
||||
|
||||
unrecov_restore:
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .unrecoverable_exception
|
||||
b unrecov_restore
|
||||
|
||||
#ifdef CONFIG_PPC_RTAS
|
||||
/*
|
||||
* On CHRP, the Run-Time Abstraction Services (RTAS) have to be
|
||||
* called with the MMU off.
|
||||
*
|
||||
* In addition, we need to be in 32b mode, at least for now.
|
||||
*
|
||||
* Note: r3 is an input parameter to rtas, so don't trash it...
|
||||
*/
|
||||
_GLOBAL(enter_rtas)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
|
||||
|
||||
/* Because RTAS is running in 32b mode, it clobbers the high order half
|
||||
* of all registers that it saves. We therefore save those registers
|
||||
* RTAS might touch to the stack. (r0, r3-r13 are caller saved)
|
||||
*/
|
||||
SAVE_GPR(2, r1) /* Save the TOC */
|
||||
SAVE_GPR(13, r1) /* Save paca */
|
||||
SAVE_8GPRS(14, r1) /* Save the non-volatiles */
|
||||
SAVE_10GPRS(22, r1) /* ditto */
|
||||
|
||||
mfcr r4
|
||||
std r4,_CCR(r1)
|
||||
mfctr r5
|
||||
std r5,_CTR(r1)
|
||||
mfspr r6,SPRN_XER
|
||||
std r6,_XER(r1)
|
||||
mfdar r7
|
||||
std r7,_DAR(r1)
|
||||
mfdsisr r8
|
||||
std r8,_DSISR(r1)
|
||||
mfsrr0 r9
|
||||
std r9,_SRR0(r1)
|
||||
mfsrr1 r10
|
||||
std r10,_SRR1(r1)
|
||||
|
||||
/* There is no way it is acceptable to get here with interrupts enabled,
|
||||
* check it with the asm equivalent of WARN_ON
|
||||
*/
|
||||
mfmsr r6
|
||||
andi. r0,r6,MSR_EE
|
||||
1: tdnei r0,0
|
||||
.section __bug_table,"a"
|
||||
.llong 1b,__LINE__ + 0x1000000, 1f, 2f
|
||||
.previous
|
||||
.section .rodata,"a"
|
||||
1: .asciz __FILE__
|
||||
2: .asciz "enter_rtas"
|
||||
.previous
|
||||
|
||||
/* Unfortunately, the stack pointer and the MSR are also clobbered,
|
||||
* so they are saved in the PACA which allows us to restore
|
||||
* our original state after RTAS returns.
|
||||
*/
|
||||
std r1,PACAR1(r13)
|
||||
std r6,PACASAVEDMSR(r13)
|
||||
|
||||
/* Setup our real return addr */
|
||||
SET_REG_TO_LABEL(r4,.rtas_return_loc)
|
||||
SET_REG_TO_CONST(r9,KERNELBASE)
|
||||
sub r4,r4,r9
|
||||
mtlr r4
|
||||
|
||||
li r0,0
|
||||
ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
|
||||
andc r0,r6,r0
|
||||
|
||||
li r9,1
|
||||
rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
|
||||
ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
|
||||
andc r6,r0,r9
|
||||
ori r6,r6,MSR_RI
|
||||
sync /* disable interrupts so SRR0/1 */
|
||||
mtmsrd r0 /* don't get trashed */
|
||||
|
||||
SET_REG_TO_LABEL(r4,rtas)
|
||||
ld r5,RTASENTRY(r4) /* get the rtas->entry value */
|
||||
ld r4,RTASBASE(r4) /* get the rtas->base value */
|
||||
|
||||
mtspr SPRN_SRR0,r5
|
||||
mtspr SPRN_SRR1,r6
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
_STATIC(rtas_return_loc)
|
||||
/* relocation is off at this point */
|
||||
mfspr r4,SPRN_SPRG3 /* Get PACA */
|
||||
SET_REG_TO_CONST(r5, KERNELBASE)
|
||||
sub r4,r4,r5 /* RELOC the PACA base pointer */
|
||||
|
||||
mfmsr r6
|
||||
li r0,MSR_RI
|
||||
andc r6,r6,r0
|
||||
sync
|
||||
mtmsrd r6
|
||||
|
||||
ld r1,PACAR1(r4) /* Restore our SP */
|
||||
LOADADDR(r3,.rtas_restore_regs)
|
||||
ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
|
||||
|
||||
mtspr SPRN_SRR0,r3
|
||||
mtspr SPRN_SRR1,r4
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
_STATIC(rtas_restore_regs)
|
||||
/* relocation is on at this point */
|
||||
REST_GPR(2, r1) /* Restore the TOC */
|
||||
REST_GPR(13, r1) /* Restore paca */
|
||||
REST_8GPRS(14, r1) /* Restore the non-volatiles */
|
||||
REST_10GPRS(22, r1) /* ditto */
|
||||
|
||||
mfspr r13,SPRN_SPRG3
|
||||
|
||||
ld r4,_CCR(r1)
|
||||
mtcr r4
|
||||
ld r5,_CTR(r1)
|
||||
mtctr r5
|
||||
ld r6,_XER(r1)
|
||||
mtspr SPRN_XER,r6
|
||||
ld r7,_DAR(r1)
|
||||
mtdar r7
|
||||
ld r8,_DSISR(r1)
|
||||
mtdsisr r8
|
||||
ld r9,_SRR0(r1)
|
||||
mtsrr0 r9
|
||||
ld r10,_SRR1(r1)
|
||||
mtsrr1 r10
|
||||
|
||||
addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
|
||||
ld r0,16(r1) /* get return address */
|
||||
|
||||
mtlr r0
|
||||
blr /* return to caller */
|
||||
|
||||
#endif /* CONFIG_PPC_RTAS */
|
||||
|
||||
#ifdef CONFIG_PPC_MULTIPLATFORM
|
||||
|
||||
_GLOBAL(enter_prom)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
|
||||
|
||||
/* Because PROM is running in 32b mode, it clobbers the high order half
|
||||
* of all registers that it saves. We therefore save those registers
|
||||
* PROM might touch to the stack. (r0, r3-r13 are caller saved)
|
||||
*/
|
||||
SAVE_8GPRS(2, r1)
|
||||
SAVE_GPR(13, r1)
|
||||
SAVE_8GPRS(14, r1)
|
||||
SAVE_10GPRS(22, r1)
|
||||
mfcr r4
|
||||
std r4,_CCR(r1)
|
||||
mfctr r5
|
||||
std r5,_CTR(r1)
|
||||
mfspr r6,SPRN_XER
|
||||
std r6,_XER(r1)
|
||||
mfdar r7
|
||||
std r7,_DAR(r1)
|
||||
mfdsisr r8
|
||||
std r8,_DSISR(r1)
|
||||
mfsrr0 r9
|
||||
std r9,_SRR0(r1)
|
||||
mfsrr1 r10
|
||||
std r10,_SRR1(r1)
|
||||
mfmsr r11
|
||||
std r11,_MSR(r1)
|
||||
|
||||
/* Get the PROM entrypoint */
|
||||
ld r0,GPR4(r1)
|
||||
mtlr r0
|
||||
|
||||
/* Switch MSR to 32 bits mode
|
||||
*/
|
||||
mfmsr r11
|
||||
li r12,1
|
||||
rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
|
||||
andc r11,r11,r12
|
||||
li r12,1
|
||||
rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
|
||||
andc r11,r11,r12
|
||||
mtmsrd r11
|
||||
isync
|
||||
|
||||
/* Restore arguments & enter PROM here... */
|
||||
ld r3,GPR3(r1)
|
||||
blrl
|
||||
|
||||
/* Just make sure that r1 top 32 bits didn't get
|
||||
* corrupt by OF
|
||||
*/
|
||||
rldicl r1,r1,0,32
|
||||
|
||||
/* Restore the MSR (back to 64 bits) */
|
||||
ld r0,_MSR(r1)
|
||||
mtmsrd r0
|
||||
isync
|
||||
|
||||
/* Restore other registers */
|
||||
REST_GPR(2, r1)
|
||||
REST_GPR(13, r1)
|
||||
REST_8GPRS(14, r1)
|
||||
REST_10GPRS(22, r1)
|
||||
ld r4,_CCR(r1)
|
||||
mtcr r4
|
||||
ld r5,_CTR(r1)
|
||||
mtctr r5
|
||||
ld r6,_XER(r1)
|
||||
mtspr SPRN_XER,r6
|
||||
ld r7,_DAR(r1)
|
||||
mtdar r7
|
||||
ld r8,_DSISR(r1)
|
||||
mtdsisr r8
|
||||
ld r9,_SRR0(r1)
|
||||
mtsrr0 r9
|
||||
ld r10,_SRR1(r1)
|
||||
mtsrr1 r10
|
||||
|
||||
addi r1,r1,PROM_FRAME_SIZE
|
||||
ld r0,16(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
#endif /* CONFIG_PPC_MULTIPLATFORM */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,898 @@
|
|||
/*
|
||||
* arch/powerpc/kernel/misc64.S
|
||||
*
|
||||
* This file contains miscellaneous low-level functions.
|
||||
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
||||
*
|
||||
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
|
||||
* and Paul Mackerras.
|
||||
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
|
||||
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/sys.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* Returns (address we are running at) - (address we were linked at)
|
||||
* for use before the text and data are mapped to KERNELBASE.
|
||||
*/
|
||||
|
||||
_GLOBAL(reloc_offset)
|
||||
mflr r0
|
||||
bl 1f
|
||||
1: mflr r3
|
||||
LOADADDR(r4,1b)
|
||||
subf r3,r4,r3
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
/*
|
||||
* add_reloc_offset(x) returns x + reloc_offset().
|
||||
*/
|
||||
_GLOBAL(add_reloc_offset)
|
||||
mflr r0
|
||||
bl 1f
|
||||
1: mflr r5
|
||||
LOADADDR(r4,1b)
|
||||
subf r5,r4,r5
|
||||
add r3,r3,r5
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(get_msr)
|
||||
mfmsr r3
|
||||
blr
|
||||
|
||||
_GLOBAL(get_dar)
|
||||
mfdar r3
|
||||
blr
|
||||
|
||||
_GLOBAL(get_srr0)
|
||||
mfsrr0 r3
|
||||
blr
|
||||
|
||||
_GLOBAL(get_srr1)
|
||||
mfsrr1 r3
|
||||
blr
|
||||
|
||||
_GLOBAL(get_sp)
|
||||
mr r3,r1
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
_GLOBAL(call_do_softirq)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
stdu r1,THREAD_SIZE-112(r3)
|
||||
mr r1,r3
|
||||
bl .__do_softirq
|
||||
ld r1,0(r1)
|
||||
ld r0,16(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(call_handle_IRQ_event)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
stdu r1,THREAD_SIZE-112(r6)
|
||||
mr r1,r6
|
||||
bl .handle_IRQ_event
|
||||
ld r1,0(r1)
|
||||
ld r0,16(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
#endif /* CONFIG_IRQSTACKS */
|
||||
|
||||
/*
|
||||
* To be called by C code which needs to do some operations with MMU
|
||||
* disabled. Note that interrupts have to be disabled by the caller
|
||||
* prior to calling us. The code called _MUST_ be in the RMO of course
|
||||
* and part of the linear mapping as we don't attempt to translate the
|
||||
* stack pointer at all. The function is called with the stack switched
|
||||
* to this CPU emergency stack
|
||||
*
|
||||
* prototype is void *call_with_mmu_off(void *func, void *data);
|
||||
*
|
||||
* the called function is expected to be of the form
|
||||
*
|
||||
* void *called(void *data);
|
||||
*/
|
||||
_GLOBAL(call_with_mmu_off)
|
||||
mflr r0 /* get link, save it on stackframe */
|
||||
std r0,16(r1)
|
||||
mr r1,r5 /* save old stack ptr */
|
||||
ld r1,PACAEMERGSP(r13) /* get emerg. stack */
|
||||
subi r1,r1,STACK_FRAME_OVERHEAD
|
||||
std r0,16(r1) /* save link on emerg. stack */
|
||||
std r5,0(r1) /* save old stack ptr in backchain */
|
||||
ld r3,0(r3) /* get to real function ptr (assume same TOC) */
|
||||
bl 2f /* we need LR to return, continue at label 2 */
|
||||
|
||||
ld r0,16(r1) /* we return here from the call, get LR and */
|
||||
ld r1,0(r1) /* .. old stack ptr */
|
||||
mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
|
||||
mfmsr r4
|
||||
ori r4,r4,MSR_IR|MSR_DR
|
||||
mtspr SPRN_SRR1,r4
|
||||
rfid
|
||||
|
||||
2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
|
||||
mr r3,r4 /* get parameter */
|
||||
mfmsr r0
|
||||
ori r0,r0,MSR_IR|MSR_DR
|
||||
xori r0,r0,MSR_IR|MSR_DR
|
||||
mtspr SPRN_SRR1,r0
|
||||
rfid
|
||||
|
||||
|
||||
.section ".toc","aw"
|
||||
PPC64_CACHES:
|
||||
.tc ppc64_caches[TC],ppc64_caches
|
||||
.section ".text"
|
||||
|
||||
/*
|
||||
* Write any modified data cache blocks out to memory
|
||||
* and invalidate the corresponding instruction cache blocks.
|
||||
*
|
||||
* flush_icache_range(unsigned long start, unsigned long stop)
|
||||
*
|
||||
* flush all bytes from start through stop-1 inclusive
|
||||
*/
|
||||
|
||||
_KPROBE(__flush_icache_range)
|
||||
|
||||
/*
|
||||
* Flush the data cache to memory
|
||||
*
|
||||
* Different systems have different cache line sizes
|
||||
* and in some cases i-cache and d-cache line sizes differ from
|
||||
* each other.
|
||||
*/
|
||||
ld r10,PPC64_CACHES@toc(r2)
|
||||
lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
|
||||
addi r5,r7,-1
|
||||
andc r6,r3,r5 /* round low to line bdy */
|
||||
subf r8,r6,r4 /* compute length */
|
||||
add r8,r8,r5 /* ensure we get enough */
|
||||
lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
|
||||
srw. r8,r8,r9 /* compute line count */
|
||||
beqlr /* nothing to do? */
|
||||
mtctr r8
|
||||
1: dcbst 0,r6
|
||||
add r6,r6,r7
|
||||
bdnz 1b
|
||||
sync
|
||||
|
||||
/* Now invalidate the instruction cache */
|
||||
|
||||
lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
|
||||
addi r5,r7,-1
|
||||
andc r6,r3,r5 /* round low to line bdy */
|
||||
subf r8,r6,r4 /* compute length */
|
||||
add r8,r8,r5
|
||||
lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
|
||||
srw. r8,r8,r9 /* compute line count */
|
||||
beqlr /* nothing to do? */
|
||||
mtctr r8
|
||||
2: icbi 0,r6
|
||||
add r6,r6,r7
|
||||
bdnz 2b
|
||||
isync
|
||||
blr
|
||||
.previous .text
|
||||
/*
|
||||
* Like above, but only do the D-cache.
|
||||
*
|
||||
* flush_dcache_range(unsigned long start, unsigned long stop)
|
||||
*
|
||||
* flush all bytes from start to stop-1 inclusive
|
||||
*/
|
||||
_GLOBAL(flush_dcache_range)
|
||||
|
||||
/*
|
||||
* Flush the data cache to memory
|
||||
*
|
||||
* Different systems have different cache line sizes
|
||||
*/
|
||||
ld r10,PPC64_CACHES@toc(r2)
|
||||
lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
|
||||
addi r5,r7,-1
|
||||
andc r6,r3,r5 /* round low to line bdy */
|
||||
subf r8,r6,r4 /* compute length */
|
||||
add r8,r8,r5 /* ensure we get enough */
|
||||
lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
|
||||
srw. r8,r8,r9 /* compute line count */
|
||||
beqlr /* nothing to do? */
|
||||
mtctr r8
|
||||
0: dcbst 0,r6
|
||||
add r6,r6,r7
|
||||
bdnz 0b
|
||||
sync
|
||||
blr
|
||||
|
||||
/*
|
||||
* Like above, but works on non-mapped physical addresses.
|
||||
* Use only for non-LPAR setups ! It also assumes real mode
|
||||
* is cacheable. Used for flushing out the DART before using
|
||||
* it as uncacheable memory
|
||||
*
|
||||
* flush_dcache_phys_range(unsigned long start, unsigned long stop)
|
||||
*
|
||||
* flush all bytes from start to stop-1 inclusive
|
||||
*/
|
||||
_GLOBAL(flush_dcache_phys_range)
|
||||
ld r10,PPC64_CACHES@toc(r2)
|
||||
lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
|
||||
addi r5,r7,-1
|
||||
andc r6,r3,r5 /* round low to line bdy */
|
||||
subf r8,r6,r4 /* compute length */
|
||||
add r8,r8,r5 /* ensure we get enough */
|
||||
lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
|
||||
srw. r8,r8,r9 /* compute line count */
|
||||
beqlr /* nothing to do? */
|
||||
mfmsr r5 /* Disable MMU Data Relocation */
|
||||
ori r0,r5,MSR_DR
|
||||
xori r0,r0,MSR_DR
|
||||
sync
|
||||
mtmsr r0
|
||||
sync
|
||||
isync
|
||||
mtctr r8
|
||||
0: dcbst 0,r6
|
||||
add r6,r6,r7
|
||||
bdnz 0b
|
||||
sync
|
||||
isync
|
||||
mtmsr r5 /* Re-enable MMU Data Relocation */
|
||||
sync
|
||||
isync
|
||||
blr
|
||||
|
||||
_GLOBAL(flush_inval_dcache_range)
|
||||
ld r10,PPC64_CACHES@toc(r2)
|
||||
lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
|
||||
addi r5,r7,-1
|
||||
andc r6,r3,r5 /* round low to line bdy */
|
||||
subf r8,r6,r4 /* compute length */
|
||||
add r8,r8,r5 /* ensure we get enough */
|
||||
lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
|
||||
srw. r8,r8,r9 /* compute line count */
|
||||
beqlr /* nothing to do? */
|
||||
sync
|
||||
isync
|
||||
mtctr r8
|
||||
0: dcbf 0,r6
|
||||
add r6,r6,r7
|
||||
bdnz 0b
|
||||
sync
|
||||
isync
|
||||
blr
|
||||
|
||||
|
||||
/*
|
||||
* Flush a particular page from the data cache to RAM.
|
||||
* Note: this is necessary because the instruction cache does *not*
|
||||
* snoop from the data cache.
|
||||
*
|
||||
* void __flush_dcache_icache(void *page)
|
||||
*/
|
||||
_GLOBAL(__flush_dcache_icache)
|
||||
/*
|
||||
* Flush the data cache to memory
|
||||
*
|
||||
* Different systems have different cache line sizes
|
||||
*/
|
||||
|
||||
/* Flush the dcache */
|
||||
ld r7,PPC64_CACHES@toc(r2)
|
||||
clrrdi r3,r3,PAGE_SHIFT /* Page align */
|
||||
lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
|
||||
lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
|
||||
mr r6,r3
|
||||
mtctr r4
|
||||
0: dcbst 0,r6
|
||||
add r6,r6,r5
|
||||
bdnz 0b
|
||||
sync
|
||||
|
||||
/* Now invalidate the icache */
|
||||
|
||||
lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
|
||||
lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
|
||||
mtctr r4
|
||||
1: icbi 0,r3
|
||||
add r3,r3,r5
|
||||
bdnz 1b
|
||||
isync
|
||||
blr
|
||||
|
||||
/*
|
||||
* I/O string operations
|
||||
*
|
||||
* insb(port, buf, len)
|
||||
* outsb(port, buf, len)
|
||||
* insw(port, buf, len)
|
||||
* outsw(port, buf, len)
|
||||
* insl(port, buf, len)
|
||||
* outsl(port, buf, len)
|
||||
* insw_ns(port, buf, len)
|
||||
* outsw_ns(port, buf, len)
|
||||
* insl_ns(port, buf, len)
|
||||
* outsl_ns(port, buf, len)
|
||||
*
|
||||
* The *_ns versions don't do byte-swapping.
|
||||
*/
|
||||
_GLOBAL(_insb)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,1
|
||||
blelr-
|
||||
00: lbz r5,0(r3)
|
||||
eieio
|
||||
stbu r5,1(r4)
|
||||
bdnz 00b
|
||||
twi 0,r5,0
|
||||
isync
|
||||
blr
|
||||
|
||||
_GLOBAL(_outsb)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,1
|
||||
blelr-
|
||||
00: lbzu r5,1(r4)
|
||||
stb r5,0(r3)
|
||||
bdnz 00b
|
||||
sync
|
||||
blr
|
||||
|
||||
_GLOBAL(_insw)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,2
|
||||
blelr-
|
||||
00: lhbrx r5,0,r3
|
||||
eieio
|
||||
sthu r5,2(r4)
|
||||
bdnz 00b
|
||||
twi 0,r5,0
|
||||
isync
|
||||
blr
|
||||
|
||||
_GLOBAL(_outsw)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,2
|
||||
blelr-
|
||||
00: lhzu r5,2(r4)
|
||||
sthbrx r5,0,r3
|
||||
bdnz 00b
|
||||
sync
|
||||
blr
|
||||
|
||||
_GLOBAL(_insl)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,4
|
||||
blelr-
|
||||
00: lwbrx r5,0,r3
|
||||
eieio
|
||||
stwu r5,4(r4)
|
||||
bdnz 00b
|
||||
twi 0,r5,0
|
||||
isync
|
||||
blr
|
||||
|
||||
_GLOBAL(_outsl)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,4
|
||||
blelr-
|
||||
00: lwzu r5,4(r4)
|
||||
stwbrx r5,0,r3
|
||||
bdnz 00b
|
||||
sync
|
||||
blr
|
||||
|
||||
/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
|
||||
_GLOBAL(_insw_ns)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,2
|
||||
blelr-
|
||||
00: lhz r5,0(r3)
|
||||
eieio
|
||||
sthu r5,2(r4)
|
||||
bdnz 00b
|
||||
twi 0,r5,0
|
||||
isync
|
||||
blr
|
||||
|
||||
/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
|
||||
_GLOBAL(_outsw_ns)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,2
|
||||
blelr-
|
||||
00: lhzu r5,2(r4)
|
||||
sth r5,0(r3)
|
||||
bdnz 00b
|
||||
sync
|
||||
blr
|
||||
|
||||
_GLOBAL(_insl_ns)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,4
|
||||
blelr-
|
||||
00: lwz r5,0(r3)
|
||||
eieio
|
||||
stwu r5,4(r4)
|
||||
bdnz 00b
|
||||
twi 0,r5,0
|
||||
isync
|
||||
blr
|
||||
|
||||
_GLOBAL(_outsl_ns)
|
||||
cmpwi 0,r5,0
|
||||
mtctr r5
|
||||
subi r4,r4,4
|
||||
blelr-
|
||||
00: lwzu r5,4(r4)
|
||||
stw r5,0(r3)
|
||||
bdnz 00b
|
||||
sync
|
||||
blr
|
||||
|
||||
|
||||
_GLOBAL(cvt_fd)
|
||||
lfd 0,0(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfs 0,0(r3)
|
||||
stfd 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,0(r5)
|
||||
blr
|
||||
|
||||
_GLOBAL(cvt_df)
|
||||
lfd 0,0(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfd 0,0(r3)
|
||||
stfs 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,0(r5)
|
||||
blr
|
||||
|
||||
/*
|
||||
* identify_cpu and calls setup_cpu
|
||||
* In: r3 = base of the cpu_specs array
|
||||
* r4 = address of cur_cpu_spec
|
||||
* r5 = relocation offset
|
||||
*/
|
||||
_GLOBAL(identify_cpu)
|
||||
mfpvr r7
|
||||
1:
|
||||
lwz r8,CPU_SPEC_PVR_MASK(r3)
|
||||
and r8,r8,r7
|
||||
lwz r9,CPU_SPEC_PVR_VALUE(r3)
|
||||
cmplw 0,r9,r8
|
||||
beq 1f
|
||||
addi r3,r3,CPU_SPEC_ENTRY_SIZE
|
||||
b 1b
|
||||
1:
|
||||
sub r0,r3,r5
|
||||
std r0,0(r4)
|
||||
ld r4,CPU_SPEC_SETUP(r3)
|
||||
add r4,r4,r5
|
||||
ld r4,0(r4)
|
||||
add r4,r4,r5
|
||||
mtctr r4
|
||||
/* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
|
||||
mr r4,r3
|
||||
mr r3,r5
|
||||
bctr
|
||||
|
||||
/*
|
||||
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
|
||||
* and writes nop's over sections of code that don't apply for this cpu.
|
||||
* r3 = data offset (not changed)
|
||||
*/
|
||||
_GLOBAL(do_cpu_ftr_fixups)
|
||||
/* Get CPU 0 features */
|
||||
LOADADDR(r6,cur_cpu_spec)
|
||||
sub r6,r6,r3
|
||||
ld r4,0(r6)
|
||||
sub r4,r4,r3
|
||||
ld r4,CPU_SPEC_FEATURES(r4)
|
||||
/* Get the fixup table */
|
||||
LOADADDR(r6,__start___ftr_fixup)
|
||||
sub r6,r6,r3
|
||||
LOADADDR(r7,__stop___ftr_fixup)
|
||||
sub r7,r7,r3
|
||||
/* Do the fixup */
|
||||
1: cmpld r6,r7
|
||||
bgelr
|
||||
addi r6,r6,32
|
||||
ld r8,-32(r6) /* mask */
|
||||
and r8,r8,r4
|
||||
ld r9,-24(r6) /* value */
|
||||
cmpld r8,r9
|
||||
beq 1b
|
||||
ld r8,-16(r6) /* section begin */
|
||||
ld r9,-8(r6) /* section end */
|
||||
subf. r9,r8,r9
|
||||
beq 1b
|
||||
/* write nops over the section of code */
|
||||
/* todo: if large section, add a branch at the start of it */
|
||||
srwi r9,r9,2
|
||||
mtctr r9
|
||||
sub r8,r8,r3
|
||||
lis r0,0x60000000@h /* nop */
|
||||
3: stw r0,0(r8)
|
||||
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
|
||||
beq 2f
|
||||
dcbst 0,r8 /* suboptimal, but simpler */
|
||||
sync
|
||||
icbi 0,r8
|
||||
2: addi r8,r8,4
|
||||
bdnz 3b
|
||||
sync /* additional sync needed on g4 */
|
||||
isync
|
||||
b 1b
|
||||
|
||||
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
|
||||
/*
|
||||
* Do an IO access in real mode
|
||||
*/
|
||||
_GLOBAL(real_readb)
|
||||
mfmsr r7
|
||||
ori r0,r7,MSR_DR
|
||||
xori r0,r0,MSR_DR
|
||||
sync
|
||||
mtmsrd r0
|
||||
sync
|
||||
isync
|
||||
mfspr r6,SPRN_HID4
|
||||
rldicl r5,r6,32,0
|
||||
ori r5,r5,0x100
|
||||
rldicl r5,r5,32,0
|
||||
sync
|
||||
mtspr SPRN_HID4,r5
|
||||
isync
|
||||
slbia
|
||||
isync
|
||||
lbz r3,0(r3)
|
||||
sync
|
||||
mtspr SPRN_HID4,r6
|
||||
isync
|
||||
slbia
|
||||
isync
|
||||
mtmsrd r7
|
||||
sync
|
||||
isync
|
||||
blr
|
||||
|
||||
/*
|
||||
* Do an IO access in real mode
|
||||
*/
|
||||
_GLOBAL(real_writeb)
|
||||
mfmsr r7
|
||||
ori r0,r7,MSR_DR
|
||||
xori r0,r0,MSR_DR
|
||||
sync
|
||||
mtmsrd r0
|
||||
sync
|
||||
isync
|
||||
mfspr r6,SPRN_HID4
|
||||
rldicl r5,r6,32,0
|
||||
ori r5,r5,0x100
|
||||
rldicl r5,r5,32,0
|
||||
sync
|
||||
mtspr SPRN_HID4,r5
|
||||
isync
|
||||
slbia
|
||||
isync
|
||||
stb r3,0(r4)
|
||||
sync
|
||||
mtspr SPRN_HID4,r6
|
||||
isync
|
||||
slbia
|
||||
isync
|
||||
mtmsrd r7
|
||||
sync
|
||||
isync
|
||||
blr
|
||||
#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
|
||||
|
||||
/*
|
||||
* Create a kernel thread
|
||||
* kernel_thread(fn, arg, flags)
|
||||
*/
|
||||
_GLOBAL(kernel_thread)
|
||||
std r29,-24(r1)
|
||||
std r30,-16(r1)
|
||||
stdu r1,-STACK_FRAME_OVERHEAD(r1)
|
||||
mr r29,r3
|
||||
mr r30,r4
|
||||
ori r3,r5,CLONE_VM /* flags */
|
||||
oris r3,r3,(CLONE_UNTRACED>>16)
|
||||
li r4,0 /* new sp (unused) */
|
||||
li r0,__NR_clone
|
||||
sc
|
||||
cmpdi 0,r3,0 /* parent or child? */
|
||||
bne 1f /* return if parent */
|
||||
li r0,0
|
||||
stdu r0,-STACK_FRAME_OVERHEAD(r1)
|
||||
ld r2,8(r29)
|
||||
ld r29,0(r29)
|
||||
mtlr r29 /* fn addr in lr */
|
||||
mr r3,r30 /* load arg and call fn */
|
||||
blrl
|
||||
li r0,__NR_exit /* exit after child exits */
|
||||
li r3,0
|
||||
sc
|
||||
1: addi r1,r1,STACK_FRAME_OVERHEAD
|
||||
ld r29,-24(r1)
|
||||
ld r30,-16(r1)
|
||||
blr
|
||||
|
||||
/*
|
||||
* disable_kernel_fp()
|
||||
* Disable the FPU.
|
||||
*/
|
||||
_GLOBAL(disable_kernel_fp)
|
||||
mfmsr r3
|
||||
rldicl r0,r3,(63-MSR_FP_LG),1
|
||||
rldicl r3,r0,(MSR_FP_LG+1),0
|
||||
mtmsrd r3 /* disable use of fpu now */
|
||||
isync
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
|
||||
#if 0 /* this has no callers for now */
|
||||
/*
|
||||
* disable_kernel_altivec()
|
||||
* Disable the VMX.
|
||||
*/
|
||||
_GLOBAL(disable_kernel_altivec)
|
||||
mfmsr r3
|
||||
rldicl r0,r3,(63-MSR_VEC_LG),1
|
||||
rldicl r3,r0,(MSR_VEC_LG+1),0
|
||||
mtmsrd r3 /* disable use of VMX now */
|
||||
isync
|
||||
blr
|
||||
#endif /* 0 */
|
||||
|
||||
/*
|
||||
* giveup_altivec(tsk)
|
||||
* Disable VMX for the task given as the argument,
|
||||
* and save the vector registers in its thread_struct.
|
||||
* Enables the VMX for use in the kernel on return.
|
||||
*/
|
||||
_GLOBAL(giveup_altivec)
|
||||
mfmsr r5
|
||||
oris r5,r5,MSR_VEC@h
|
||||
mtmsrd r5 /* enable use of VMX now */
|
||||
isync
|
||||
cmpdi 0,r3,0
|
||||
beqlr- /* if no previous owner, done */
|
||||
addi r3,r3,THREAD /* want THREAD of task */
|
||||
ld r5,PT_REGS(r3)
|
||||
cmpdi 0,r5,0
|
||||
SAVE_32VRS(0,r4,r3)
|
||||
mfvscr vr0
|
||||
li r4,THREAD_VSCR
|
||||
stvx vr0,r4,r3
|
||||
beq 1f
|
||||
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
lis r3,MSR_VEC@h
|
||||
andc r4,r4,r3 /* disable FP for previous task */
|
||||
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#ifndef CONFIG_SMP
|
||||
li r5,0
|
||||
ld r4,last_task_used_altivec@got(r2)
|
||||
std r5,0(r4)
|
||||
#endif /* CONFIG_SMP */
|
||||
blr
|
||||
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
_GLOBAL(__setup_cpu_power3)
|
||||
blr
|
||||
|
||||
_GLOBAL(execve)
|
||||
li r0,__NR_execve
|
||||
sc
|
||||
bnslr
|
||||
neg r3,r3
|
||||
blr
|
||||
|
||||
/* kexec_wait(phys_cpu)
|
||||
*
|
||||
* wait for the flag to change, indicating this kernel is going away but
|
||||
* the slave code for the next one is at addresses 0 to 100.
|
||||
*
|
||||
* This is used by all slaves.
|
||||
*
|
||||
* Physical (hardware) cpu id should be in r3.
|
||||
*/
|
||||
_GLOBAL(kexec_wait)
|
||||
bl 1f
|
||||
1: mflr r5
|
||||
addi r5,r5,kexec_flag-1b
|
||||
|
||||
99: HMT_LOW
|
||||
#ifdef CONFIG_KEXEC /* use no memory without kexec */
|
||||
lwz r4,0(r5)
|
||||
cmpwi 0,r4,0
|
||||
bnea 0x60
|
||||
#endif
|
||||
b 99b
|
||||
|
||||
/* this can be in text because we won't change it until we are
|
||||
* running in real anyways
|
||||
*/
|
||||
kexec_flag:
|
||||
.long 0
|
||||
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
|
||||
/* kexec_smp_wait(void)
|
||||
*
|
||||
* call with interrupts off
|
||||
* note: this is a terminal routine, it does not save lr
|
||||
*
|
||||
* get phys id from paca
|
||||
* set paca id to -1 to say we got here
|
||||
* switch to real mode
|
||||
* join other cpus in kexec_wait(phys_id)
|
||||
*/
|
||||
_GLOBAL(kexec_smp_wait)
|
||||
lhz r3,PACAHWCPUID(r13)
|
||||
li r4,-1
|
||||
sth r4,PACAHWCPUID(r13) /* let others know we left */
|
||||
bl real_mode
|
||||
b .kexec_wait
|
||||
|
||||
/*
|
||||
* switch to real mode (turn mmu off)
|
||||
* we use the early kernel trick that the hardware ignores bits
|
||||
* 0 and 1 (big endian) of the effective address in real mode
|
||||
*
|
||||
* don't overwrite r3 here, it is live for kexec_wait above.
|
||||
*/
|
||||
real_mode: /* assume normal blr return */
|
||||
1: li r9,MSR_RI
|
||||
li r10,MSR_DR|MSR_IR
|
||||
mflr r11 /* return address to SRR0 */
|
||||
mfmsr r12
|
||||
andc r9,r12,r9
|
||||
andc r10,r12,r10
|
||||
|
||||
mtmsrd r9,1
|
||||
mtspr SPRN_SRR1,r10
|
||||
mtspr SPRN_SRR0,r11
|
||||
rfid
|
||||
|
||||
|
||||
/*
|
||||
* kexec_sequence(newstack, start, image, control, clear_all())
|
||||
*
|
||||
* does the grungy work with stack switching and real mode switches
|
||||
* also does simple calls to other code
|
||||
*/
|
||||
|
||||
_GLOBAL(kexec_sequence)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
|
||||
/* switch stacks to newstack -- &kexec_stack.stack */
|
||||
stdu r1,THREAD_SIZE-112(r3)
|
||||
mr r1,r3
|
||||
|
||||
li r0,0
|
||||
std r0,16(r1)
|
||||
|
||||
/* save regs for local vars on new stack.
|
||||
* yes, we won't go back, but ...
|
||||
*/
|
||||
std r31,-8(r1)
|
||||
std r30,-16(r1)
|
||||
std r29,-24(r1)
|
||||
std r28,-32(r1)
|
||||
std r27,-40(r1)
|
||||
std r26,-48(r1)
|
||||
std r25,-56(r1)
|
||||
|
||||
stdu r1,-112-64(r1)
|
||||
|
||||
/* save args into preserved regs */
|
||||
mr r31,r3 /* newstack (both) */
|
||||
mr r30,r4 /* start (real) */
|
||||
mr r29,r5 /* image (virt) */
|
||||
mr r28,r6 /* control, unused */
|
||||
mr r27,r7 /* clear_all() fn desc */
|
||||
mr r26,r8 /* spare */
|
||||
lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
|
||||
|
||||
/* disable interrupts, we are overwriting kernel data next */
|
||||
mfmsr r3
|
||||
rlwinm r3,r3,0,17,15
|
||||
mtmsrd r3,1
|
||||
|
||||
/* copy dest pages, flush whole dest image */
|
||||
mr r3,r29
|
||||
bl .kexec_copy_flush /* (image) */
|
||||
|
||||
/* turn off mmu */
|
||||
bl real_mode
|
||||
|
||||
/* clear out hardware hash page table and tlb */
|
||||
ld r5,0(r27) /* deref function descriptor */
|
||||
mtctr r5
|
||||
bctrl /* ppc_md.hash_clear_all(void); */
|
||||
|
||||
/*
|
||||
* kexec image calling is:
|
||||
* the first 0x100 bytes of the entry point are copied to 0
|
||||
*
|
||||
* all slaves branch to slave = 0x60 (absolute)
|
||||
* slave(phys_cpu_id);
|
||||
*
|
||||
* master goes to start = entry point
|
||||
* start(phys_cpu_id, start, 0);
|
||||
*
|
||||
*
|
||||
* a wrapper is needed to call existing kernels, here is an approximate
|
||||
* description of one method:
|
||||
*
|
||||
* v2: (2.6.10)
|
||||
* start will be near the boot_block (maybe 0x100 bytes before it?)
|
||||
* it will have a 0x60, which will b to boot_block, where it will wait
|
||||
* and 0 will store phys into struct boot-block and load r3 from there,
|
||||
* copy kernel 0-0x100 and tell slaves to back down to 0x60 again
|
||||
*
|
||||
* v1: (2.6.9)
|
||||
* boot block will have all cpus scanning device tree to see if they
|
||||
* are the boot cpu ?????
|
||||
* other device tree differences (prop sizes, va vs pa, etc)...
|
||||
*/
|
||||
|
||||
/* copy 0x100 bytes starting at start to 0 */
|
||||
li r3,0
|
||||
mr r4,r30
|
||||
li r5,0x100
|
||||
li r6,0
|
||||
bl .copy_and_flush /* (dest, src, copy limit, start offset) */
|
||||
1: /* assume normal blr return */
|
||||
|
||||
/* release other cpus to the new kernel secondary start at 0x60 */
|
||||
mflr r5
|
||||
li r6,1
|
||||
stw r6,kexec_flag-1b(5)
|
||||
mr r3,r25 # my phys cpu
|
||||
mr r4,r30 # start, aka phys mem offset
|
||||
mtlr 4
|
||||
li r5,0
|
||||
blr /* image->start(physid, image->start, 0); */
|
||||
#endif /* CONFIG_KEXEC */
|
|
@ -0,0 +1,323 @@
|
|||
/*
|
||||
* This file contains the table of syscall-handling functions.
|
||||
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
||||
*
|
||||
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
|
||||
* and Paul Mackerras.
|
||||
*
|
||||
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
|
||||
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define SYSCALL(func) .llong .sys_##func,.sys_##func
|
||||
#define SYSCALL32(func) .llong .sys_##func,.sys32_##func
|
||||
#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func
|
||||
#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func
|
||||
#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall
|
||||
#define SYS32ONLY(func) .llong .sys_ni_syscall,.sys32_##func
|
||||
#define SYSX(f, f3264, f32) .llong .f,.f3264
|
||||
#else
|
||||
#define SYSCALL(func) .long sys_##func
|
||||
#define SYSCALL32(func) .long sys_##func
|
||||
#define COMPAT_SYS(func) .long sys_##func
|
||||
#define PPC_SYS(func) .long ppc_##func
|
||||
#define OLDSYS(func) .long sys_##func
|
||||
#define SYS32ONLY(func) .long sys_##func
|
||||
#define SYSX(f, f3264, f32) .long f32
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define sys_sigpending sys_ni_syscall
|
||||
#define sys_old_getrlimit sys_ni_syscall
|
||||
#else
|
||||
#define ppc_rtas sys_ni_syscall
|
||||
#endif
|
||||
|
||||
_GLOBAL(sys_call_table)
|
||||
SYSCALL(restart_syscall)
|
||||
SYSCALL(exit)
|
||||
PPC_SYS(fork)
|
||||
SYSCALL(read)
|
||||
SYSCALL(write)
|
||||
COMPAT_SYS(open)
|
||||
SYSCALL(close)
|
||||
SYSCALL32(waitpid)
|
||||
SYSCALL32(creat)
|
||||
SYSCALL(link)
|
||||
SYSCALL(unlink)
|
||||
SYSCALL32(execve)
|
||||
SYSCALL(chdir)
|
||||
SYSX(sys64_time,compat_sys_time,sys_time)
|
||||
SYSCALL(mknod)
|
||||
SYSCALL(chmod)
|
||||
SYSCALL(lchown)
|
||||
SYSCALL(ni_syscall)
|
||||
OLDSYS(stat)
|
||||
SYSX(sys_lseek,ppc32_lseek,sys_lseek)
|
||||
SYSCALL(getpid)
|
||||
COMPAT_SYS(mount)
|
||||
SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
|
||||
SYSCALL(setuid)
|
||||
SYSCALL(getuid)
|
||||
COMPAT_SYS(stime)
|
||||
SYSCALL32(ptrace)
|
||||
SYSCALL(alarm)
|
||||
OLDSYS(fstat)
|
||||
SYSCALL32(pause)
|
||||
COMPAT_SYS(utime)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL32(access)
|
||||
SYSCALL32(nice)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(sync)
|
||||
SYSCALL32(kill)
|
||||
SYSCALL(rename)
|
||||
SYSCALL32(mkdir)
|
||||
SYSCALL(rmdir)
|
||||
SYSCALL(dup)
|
||||
SYSCALL(pipe)
|
||||
COMPAT_SYS(times)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(brk)
|
||||
SYSCALL(setgid)
|
||||
SYSCALL(getgid)
|
||||
SYSCALL(signal)
|
||||
SYSCALL(geteuid)
|
||||
SYSCALL(getegid)
|
||||
SYSCALL(acct)
|
||||
SYSCALL(umount)
|
||||
SYSCALL(ni_syscall)
|
||||
COMPAT_SYS(ioctl)
|
||||
COMPAT_SYS(fcntl)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL32(setpgid)
|
||||
SYSCALL(ni_syscall)
|
||||
SYS32ONLY(olduname)
|
||||
SYSCALL32(umask)
|
||||
SYSCALL(chroot)
|
||||
SYSCALL(ustat)
|
||||
SYSCALL(dup2)
|
||||
SYSCALL(getppid)
|
||||
SYSCALL(getpgrp)
|
||||
SYSCALL(setsid)
|
||||
SYS32ONLY(sigaction)
|
||||
SYSCALL(sgetmask)
|
||||
SYSCALL32(ssetmask)
|
||||
SYSCALL(setreuid)
|
||||
SYSCALL(setregid)
|
||||
SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend)
|
||||
COMPAT_SYS(sigpending)
|
||||
SYSCALL32(sethostname)
|
||||
COMPAT_SYS(setrlimit)
|
||||
COMPAT_SYS(old_getrlimit)
|
||||
COMPAT_SYS(getrusage)
|
||||
SYSCALL32(gettimeofday)
|
||||
SYSCALL32(settimeofday)
|
||||
SYSCALL32(getgroups)
|
||||
SYSCALL32(setgroups)
|
||||
SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
|
||||
SYSCALL(symlink)
|
||||
OLDSYS(lstat)
|
||||
SYSCALL32(readlink)
|
||||
SYSCALL(uselib)
|
||||
SYSCALL(swapon)
|
||||
SYSCALL(reboot)
|
||||
SYSX(sys_ni_syscall,old32_readdir,old_readdir)
|
||||
SYSCALL(mmap)
|
||||
SYSCALL(munmap)
|
||||
SYSCALL(truncate)
|
||||
SYSCALL(ftruncate)
|
||||
SYSCALL(fchmod)
|
||||
SYSCALL(fchown)
|
||||
SYSCALL32(getpriority)
|
||||
SYSCALL32(setpriority)
|
||||
SYSCALL(ni_syscall)
|
||||
COMPAT_SYS(statfs)
|
||||
COMPAT_SYS(fstatfs)
|
||||
SYSCALL(ni_syscall)
|
||||
COMPAT_SYS(socketcall)
|
||||
SYSCALL32(syslog)
|
||||
COMPAT_SYS(setitimer)
|
||||
COMPAT_SYS(getitimer)
|
||||
COMPAT_SYS(newstat)
|
||||
COMPAT_SYS(newlstat)
|
||||
COMPAT_SYS(newfstat)
|
||||
SYSX(sys_ni_syscall,sys32_uname,sys_uname)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(vhangup)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
COMPAT_SYS(wait4)
|
||||
SYSCALL(swapoff)
|
||||
SYSCALL32(sysinfo)
|
||||
SYSCALL32(ipc)
|
||||
SYSCALL(fsync)
|
||||
SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn)
|
||||
PPC_SYS(clone)
|
||||
SYSCALL32(setdomainname)
|
||||
SYSX(ppc64_newuname,ppc64_newuname,sys_newuname)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL32(adjtimex)
|
||||
SYSCALL(mprotect)
|
||||
SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(init_module)
|
||||
SYSCALL(delete_module)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(quotactl)
|
||||
SYSCALL32(getpgid)
|
||||
SYSCALL(fchdir)
|
||||
SYSCALL(bdflush)
|
||||
SYSCALL32(sysfs)
|
||||
SYSX(ppc64_personality,ppc64_personality,sys_personality)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(setfsuid)
|
||||
SYSCALL(setfsgid)
|
||||
SYSCALL(llseek)
|
||||
SYSCALL32(getdents)
|
||||
SYSX(sys_select,ppc32_select,ppc_select)
|
||||
SYSCALL(flock)
|
||||
SYSCALL(msync)
|
||||
COMPAT_SYS(readv)
|
||||
COMPAT_SYS(writev)
|
||||
SYSCALL32(getsid)
|
||||
SYSCALL(fdatasync)
|
||||
SYSCALL32(sysctl)
|
||||
SYSCALL(mlock)
|
||||
SYSCALL(munlock)
|
||||
SYSCALL(mlockall)
|
||||
SYSCALL(munlockall)
|
||||
SYSCALL32(sched_setparam)
|
||||
SYSCALL32(sched_getparam)
|
||||
SYSCALL32(sched_setscheduler)
|
||||
SYSCALL32(sched_getscheduler)
|
||||
SYSCALL(sched_yield)
|
||||
SYSCALL32(sched_get_priority_max)
|
||||
SYSCALL32(sched_get_priority_min)
|
||||
SYSCALL32(sched_rr_get_interval)
|
||||
COMPAT_SYS(nanosleep)
|
||||
SYSCALL(mremap)
|
||||
SYSCALL(setresuid)
|
||||
SYSCALL(getresuid)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(poll)
|
||||
COMPAT_SYS(nfsservctl)
|
||||
SYSCALL(setresgid)
|
||||
SYSCALL(getresgid)
|
||||
SYSCALL32(prctl)
|
||||
SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn)
|
||||
SYSCALL32(rt_sigaction)
|
||||
SYSCALL32(rt_sigprocmask)
|
||||
SYSCALL32(rt_sigpending)
|
||||
COMPAT_SYS(rt_sigtimedwait)
|
||||
SYSCALL32(rt_sigqueueinfo)
|
||||
SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend)
|
||||
SYSCALL32(pread64)
|
||||
SYSCALL32(pwrite64)
|
||||
SYSCALL(chown)
|
||||
SYSCALL(getcwd)
|
||||
SYSCALL(capget)
|
||||
SYSCALL(capset)
|
||||
SYSCALL32(sigaltstack)
|
||||
SYSX(sys_sendfile64,sys32_sendfile,sys_sendfile)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
PPC_SYS(vfork)
|
||||
COMPAT_SYS(getrlimit)
|
||||
SYSCALL32(readahead)
|
||||
SYS32ONLY(mmap2)
|
||||
SYS32ONLY(truncate64)
|
||||
SYS32ONLY(ftruncate64)
|
||||
SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
|
||||
SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
|
||||
SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
|
||||
SYSCALL32(pciconfig_read)
|
||||
SYSCALL32(pciconfig_write)
|
||||
SYSCALL32(pciconfig_iobase)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(getdents64)
|
||||
SYSCALL(pivot_root)
|
||||
SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
|
||||
SYSCALL(madvise)
|
||||
SYSCALL(mincore)
|
||||
SYSCALL(gettid)
|
||||
SYSCALL(tkill)
|
||||
SYSCALL(setxattr)
|
||||
SYSCALL(lsetxattr)
|
||||
SYSCALL(fsetxattr)
|
||||
SYSCALL(getxattr)
|
||||
SYSCALL(lgetxattr)
|
||||
SYSCALL(fgetxattr)
|
||||
SYSCALL(listxattr)
|
||||
SYSCALL(llistxattr)
|
||||
SYSCALL(flistxattr)
|
||||
SYSCALL(removexattr)
|
||||
SYSCALL(lremovexattr)
|
||||
SYSCALL(fremovexattr)
|
||||
COMPAT_SYS(futex)
|
||||
COMPAT_SYS(sched_setaffinity)
|
||||
COMPAT_SYS(sched_getaffinity)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYS32ONLY(sendfile64)
|
||||
COMPAT_SYS(io_setup)
|
||||
SYSCALL(io_destroy)
|
||||
COMPAT_SYS(io_getevents)
|
||||
COMPAT_SYS(io_submit)
|
||||
SYSCALL(io_cancel)
|
||||
SYSCALL(set_tid_address)
|
||||
SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
|
||||
SYSCALL(exit_group)
|
||||
SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
|
||||
SYSCALL(epoll_create)
|
||||
SYSCALL(epoll_ctl)
|
||||
SYSCALL(epoll_wait)
|
||||
SYSCALL(remap_file_pages)
|
||||
SYSX(sys_timer_create,ppc32_timer_create,sys_timer_create)
|
||||
COMPAT_SYS(timer_settime)
|
||||
COMPAT_SYS(timer_gettime)
|
||||
SYSCALL(timer_getoverrun)
|
||||
SYSCALL(timer_delete)
|
||||
COMPAT_SYS(clock_settime)
|
||||
COMPAT_SYS(clock_gettime)
|
||||
COMPAT_SYS(clock_getres)
|
||||
COMPAT_SYS(clock_nanosleep)
|
||||
SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
|
||||
SYSCALL32(tgkill)
|
||||
SYSCALL32(utimes)
|
||||
COMPAT_SYS(statfs64)
|
||||
COMPAT_SYS(fstatfs64)
|
||||
SYSX(sys_ni_syscall, ppc32_fadvise64_64, sys_fadvise64_64)
|
||||
PPC_SYS(rtas)
|
||||
OLDSYS(debug_setcontext)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
COMPAT_SYS(mbind)
|
||||
COMPAT_SYS(get_mempolicy)
|
||||
COMPAT_SYS(set_mempolicy)
|
||||
COMPAT_SYS(mq_open)
|
||||
SYSCALL(mq_unlink)
|
||||
COMPAT_SYS(mq_timedsend)
|
||||
COMPAT_SYS(mq_timedreceive)
|
||||
COMPAT_SYS(mq_notify)
|
||||
COMPAT_SYS(mq_getsetattr)
|
||||
COMPAT_SYS(kexec_load)
|
||||
SYSCALL32(add_key)
|
||||
SYSCALL32(request_key)
|
||||
COMPAT_SYS(keyctl)
|
||||
COMPAT_SYS(waitid)
|
||||
SYSCALL32(ioprio_set)
|
||||
SYSCALL32(ioprio_get)
|
||||
SYSCALL(inotify_init)
|
||||
SYSCALL(inotify_add_watch)
|
||||
SYSCALL(inotify_rm_watch)
|
Loading…
Reference in New Issue