Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge

This commit is contained in:
Linus Torvalds 2005-11-10 07:37:51 -08:00
commit 3ae0af12b4
143 changed files with 2560 additions and 2562 deletions

View File

@ -599,6 +599,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
# between a node's start and end pfns, it may not

View File

@ -4,6 +4,7 @@
ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
CFLAGS_ioctl32.o += -Ifs/
endif
ifeq ($(CONFIG_PPC32),y)
CFLAGS_prom_init.o += -fPIC
@ -11,15 +12,21 @@ CFLAGS_btext.o += -fPIC
endif
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
signal_32.o pmc.o
irq.o signal_32.o pmc.o
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
signal_64.o ptrace32.o systbl.o
signal_64.o ptrace32.o systbl.o \
paca.o ioctl32.o cpu_setup_power4.o \
firmware.o sysfs.o
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
obj-$(CONFIG_POWER4) += idle_power4.o
obj-$(CONFIG_PPC_OF) += of_device.o
obj-$(CONFIG_PPC_RTAS) += rtas.o
procfs-$(CONFIG_PPC64) := proc_ppc64.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64) := rtas_pci.o
obj-$(CONFIG_PPC_RTAS) += rtas.o $(rtaspci-y)
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o

View File

@ -106,7 +106,6 @@ int main(void)
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
/* paca */

View File

@ -52,6 +52,9 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
PPC_FEATURE_HAS_MMU)
#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5)
#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS)
/* We only set the spe features if the kernel was compiled with
@ -160,7 +163,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00350000,
.cpu_name = "POWER4 (gp)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_PPC64,
.cpu_user_features = COMMON_USER_POWER4,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@ -175,7 +178,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00380000,
.cpu_name = "POWER4+ (gq)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_PPC64,
.cpu_user_features = COMMON_USER_POWER4,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@ -190,7 +193,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00390000,
.cpu_name = "PPC970",
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_PPC64 |
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
@ -212,7 +215,7 @@ struct cpu_spec cpu_specs[] = {
#else
.cpu_features = CPU_FTRS_PPC970,
#endif
.cpu_user_features = COMMON_USER_PPC64 |
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
@ -230,7 +233,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00440000,
.cpu_name = "PPC970MP",
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_PPC64 |
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
@ -245,7 +248,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x003a0000,
.cpu_name = "POWER5 (gr)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_PPC64,
.cpu_user_features = COMMON_USER_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@ -260,7 +263,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x003b0000,
.cpu_name = "POWER5 (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_PPC64,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@ -276,7 +279,7 @@ struct cpu_spec cpu_specs[] = {
.cpu_name = "Cell Broadband Engine",
.cpu_features = CPU_FTRS_CELL,
.cpu_user_features = COMMON_USER_PPC64 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
.cpu_setup = __setup_cpu_be,

View File

@ -1,6 +1,4 @@
/*
* arch/ppc64/kernel/firmware.c
*
* Extracted from cputable.c
*
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)

View File

@ -41,20 +41,20 @@ _GLOBAL(load_up_fpu)
#ifndef CONFIG_SMP
LOADBASE(r3, last_task_used_math)
toreal(r3)
LDL r4,OFF(last_task_used_math)(r3)
CMPI 0,r4,0
PPC_LL r4,OFF(last_task_used_math)(r3)
PPC_LCMPI 0,r4,0
beq 1f
toreal(r4)
addi r4,r4,THREAD /* want last_task_used_math->thread */
SAVE_32FPRS(0, r4)
mffs fr0
stfd fr0,THREAD_FPSCR(r4)
LDL r5,PT_REGS(r4)
PPC_LL r5,PT_REGS(r4)
toreal(r5)
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r10,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r10 /* disable FP for previous task */
STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
@ -77,7 +77,7 @@ _GLOBAL(load_up_fpu)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
STL r4,OFF(last_task_used_math)(r3)
PPC_STL r4,OFF(last_task_used_math)(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
@ -97,24 +97,24 @@ _GLOBAL(giveup_fpu)
MTMSRD(r5) /* enable use of fpu now */
SYNC_601
isync
CMPI 0,r3,0
PPC_LCMPI 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
LDL r5,PT_REGS(r3)
CMPI 0,r5,0
PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0
SAVE_32FPRS(0, r3)
mffs fr0
stfd fr0,THREAD_FPSCR(r3)
beq 1f
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r3,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r3 /* disable FP for previous task */
STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
LOADBASE(r4,last_task_used_math)
STL r5,OFF(last_task_used_math)(r4)
PPC_STL r5,OFF(last_task_used_math)(r4)
#endif /* CONFIG_SMP */
blr

View File

@ -28,7 +28,6 @@
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/systemcfg.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
@ -1697,25 +1696,14 @@ _GLOBAL(pmac_secondary_start)
* SPRG3 = paca virtual address
*/
_GLOBAL(__secondary_start)
/* Set thread priority to MEDIUM */
HMT_MEDIUM
HMT_MEDIUM /* Set thread priority to MEDIUM */
/* Load TOC */
ld r2,PACATOC(r13)
li r6,0
stb r6,PACAPROCENABLED(r13)
#ifndef CONFIG_PPC_ISERIES
/* Initialize the page table pointer register. */
LOADADDR(r6,_SDR1)
ld r6,0(r6) /* get the value of _SDR1 */
mtspr SPRN_SDR1,r6 /* set the htab location */
#endif
/* Initialize the first segment table (or SLB) entry */
ld r3,PACASTABVIRT(r13) /* get addr of segment table */
BEGIN_FTR_SECTION
bl .stab_initialize
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
bl .slb_initialize
/* Do early setup for that CPU (stab, slb, hash table pointer) */
bl .early_setup_secondary
/* Initialize the kernel stack. Just a repeat for iSeries. */
LOADADDR(r3,current_set)
@ -1724,37 +1712,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
std r1,PACAKSAVE(r13)
ld r3,PACASTABREAL(r13) /* get raddr of segment table */
ori r4,r3,1 /* turn on valid bit */
#ifdef CONFIG_PPC_ISERIES
li r0,-1 /* hypervisor call */
li r3,1
sldi r3,r3,63 /* 0x8000000000000000 */
ori r3,r3,4 /* 0x8000000000000004 */
sc /* HvCall_setASR */
#else
/* set the ASR */
ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
ld r3,0(r3)
lwz r3,PLATFORM(r3) /* r3 = platform flags */
andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
beq 98f /* branch if result is 0 */
mfspr r3,SPRN_PVR
srwi r3,r3,16
cmpwi r3,0x37 /* SStar */
beq 97f
cmpwi r3,0x36 /* IStar */
beq 97f
cmpwi r3,0x34 /* Pulsar */
bne 98f
97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
HVSC /* Invoking hcall */
b 99f
98: /* !(rpa hypervisor) || !(star) */
mtasr r4 /* set the stab location */
99:
#endif
/* Clear backchain so we get nice backtraces */
li r7,0
mtlr r7
@ -1777,6 +1735,7 @@ _GLOBAL(start_secondary_prolog)
li r3,0
std r3,0(r1) /* Zero the stack frame pointer */
bl .start_secondary
b .
#endif
/*
@ -1896,40 +1855,6 @@ _STATIC(start_here_multiplatform)
mr r3,r31
bl .early_setup
/* set the ASR */
ld r3,PACASTABREAL(r13)
ori r4,r3,1 /* turn on valid bit */
ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
ld r3,0(r3)
lwz r3,PLATFORM(r3) /* r3 = platform flags */
andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
beq 98f /* branch if result is 0 */
mfspr r3,SPRN_PVR
srwi r3,r3,16
cmpwi r3,0x37 /* SStar */
beq 97f
cmpwi r3,0x36 /* IStar */
beq 97f
cmpwi r3,0x34 /* Pulsar */
bne 98f
97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
HVSC /* Invoking hcall */
b 99f
98: /* !(rpa hypervisor) || !(star) */
mtasr r4 /* set the stab location */
99:
/* Set SDR1 (hash table pointer) */
ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
ld r3,0(r3)
lwz r3,PLATFORM(r3) /* r3 = platform flags */
/* Test if bit 0 is set (LPAR bit) */
andi. r3,r3,PLATFORM_LPAR
bne 98f /* branch if result is !0 */
LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
add r6,r6,r26
ld r6,0(r6) /* get the value of _SDR1 */
mtspr SPRN_SDR1,r6 /* set the htab location */
98:
LOADADDR(r3,.start_here_common)
SET_REG_TO_CONST(r4, MSR_KERNEL)
mtspr SPRN_SRR0,r3

View File

@ -5,8 +5,8 @@
* Copyright (C) 1992 Linus Torvalds
* Adapted from arch/i386 by Gary Thomas
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Updated and modified by Cort Dougan (cort@cs.nmt.edu)
* Copyright (C) 1996 Cort Dougan
* Updated and modified by Cort Dougan <cort@fsmlabs.com>
* Copyright (C) 1996-2001 Cort Dougan
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
@ -21,6 +21,14 @@
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*
* The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
* interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
* mask register (of which only 16 are defined), hence the weird shifting
* and complement of the cached_irq_mask. I want to be able to stuff
* this right into the SIU SMASK register.
* Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
* to reduce code space and undefined function references.
*/
#include <linux/errno.h>
@ -29,6 +37,7 @@
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
@ -40,9 +49,13 @@
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#ifdef CONFIG_PPC64
#include <linux/kallsyms.h>
#endif
#include <asm/uaccess.h>
#include <asm/system.h>
@ -52,21 +65,42 @@
#include <asm/cache.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/machdep.h>
#ifdef CONFIG_PPC64
#include <asm/iseries/it_lp_queue.h>
#include <asm/paca.h>
#endif
#ifdef CONFIG_SMP
static int ppc_spurious_interrupts;
#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
extern void iSeries_smp_message_recv(struct pt_regs *);
#endif
extern irq_desc_t irq_desc[NR_IRQS];
#ifdef CONFIG_PPC32
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
atomic_t ppc_n_lost_interrupts;
#ifdef CONFIG_TAU_INT
extern int tau_initialized;
extern int tau_interrupts(int);
#endif
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
extern atomic_t ipi_recv;
extern atomic_t ipi_sent;
#endif
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
EXPORT_SYMBOL(irq_desc);
int distribute_irqs = 1;
int __irq_offset_value;
int ppc_spurious_interrupts;
u64 ppc64_interrupt_controller;
#endif /* CONFIG_PPC64 */
int show_interrupts(struct seq_file *p, void *v)
{
@ -76,11 +110,9 @@ int show_interrupts(struct seq_file *p, void *v)
unsigned long flags;
if (i == 0) {
seq_printf(p, " ");
for (j=0; j<NR_CPUS; j++) {
if (cpu_online(j))
seq_puts(p, " ");
for_each_online_cpu(j)
seq_printf(p, "CPU%d ", j);
}
seq_putc(p, '\n');
}
@ -92,17 +124,15 @@ int show_interrupts(struct seq_file *p, void *v)
goto skip;
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for (j = 0; j < NR_CPUS; j++) {
if (cpu_online(j))
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
}
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif /* CONFIG_SMP */
if (desc->handler)
seq_printf(p, " %s ", desc->handler->typename);
else
seq_printf(p, " None ");
seq_puts(p, " None ");
seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
@ -110,8 +140,25 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == NR_IRQS)
} else if (i == NR_IRQS) {
#ifdef CONFIG_PPC32
#ifdef CONFIG_TAU_INT
if (tau_initialized){
seq_puts(p, "TAU: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", tau_interrupts(j));
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
}
#endif
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
/* should this be per processor send/receive? */
seq_printf(p, "IPI (recv/sent): %10u/%u\n",
atomic_read(&ipi_recv), atomic_read(&ipi_sent));
#endif
#endif /* CONFIG_PPC32 */
seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
}
return 0;
}
@ -144,126 +191,6 @@ void fixup_irqs(cpumask_t map)
}
#endif
extern int noirqdebug;
/*
* Eventually, this should take an array of interrupts and an array size
* so it can dispatch multiple interrupts.
*/
void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
{
int status;
struct irqaction *action;
int cpu = smp_processor_id();
irq_desc_t *desc = get_irq_desc(irq);
irqreturn_t action_ret;
#ifdef CONFIG_IRQSTACKS
struct thread_info *curtp, *irqtp;
#endif
kstat_cpu(cpu).irqs[irq]++;
if (desc->status & IRQ_PER_CPU) {
/* no locking required for CPU-local interrupts: */
ack_irq(irq);
action_ret = handle_IRQ_event(irq, regs, desc->action);
desc->handler->end(irq);
return;
}
spin_lock(&desc->lock);
ack_irq(irq);
/*
REPLAY is when Linux resends an IRQ that was dropped earlier
WAITING is used by probe to mark irqs that are being tested
*/
status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
status |= IRQ_PENDING; /* we _want_ to handle it */
/*
* If the IRQ is disabled for whatever reason, we cannot
* use the action we have.
*/
action = NULL;
if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
action = desc->action;
if (!action || !action->handler) {
ppc_spurious_interrupts++;
printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
/* We can't call disable_irq here, it would deadlock */
if (!desc->depth)
desc->depth = 1;
desc->status |= IRQ_DISABLED;
/* This is not a real spurrious interrupt, we
* have to eoi it, so we jump to out
*/
mask_irq(irq);
goto out;
}
status &= ~IRQ_PENDING; /* we commit to handling */
status |= IRQ_INPROGRESS; /* we are handling it */
}
desc->status = status;
/*
* If there is no IRQ handler or it was disabled, exit early.
Since we set PENDING, if another processor is handling
a different instance of this same irq, the other processor
will take care of it.
*/
if (unlikely(!action))
goto out;
/*
* Edge triggered interrupts need to remember
* pending events.
* This applies to any hw interrupts that allow a second
* instance of the same irq to arrive while we are in do_IRQ
* or in the handler. But the code here only handles the _second_
* instance of the irq, not the third or fourth. So it is mostly
* useful for irq hardware that does not mask cleanly in an
* SMP environment.
*/
for (;;) {
spin_unlock(&desc->lock);
#ifdef CONFIG_IRQSTACKS
/* Switch to the irq stack to handle this */
curtp = current_thread_info();
irqtp = hardirq_ctx[smp_processor_id()];
if (curtp != irqtp) {
irqtp->task = curtp->task;
irqtp->flags = 0;
action_ret = call_handle_IRQ_event(irq, regs, action, irqtp);
irqtp->task = NULL;
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
} else
#endif
action_ret = handle_IRQ_event(irq, regs, action);
spin_lock(&desc->lock);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret, regs);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
}
out:
desc->status &= ~IRQ_INPROGRESS;
/*
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
*/
if (desc->handler) {
if (desc->handler->end)
desc->handler->end(irq);
else if (desc->handler->enable)
desc->handler->enable(irq);
}
spin_unlock(&desc->lock);
}
#ifdef CONFIG_PPC_ISERIES
void do_IRQ(struct pt_regs *regs)
{
@ -310,6 +237,9 @@ void do_IRQ(struct pt_regs *regs)
void do_IRQ(struct pt_regs *regs)
{
int irq;
#ifdef CONFIG_IRQSTACKS
struct thread_info *curtp, *irqtp;
#endif
irq_enter();
@ -328,20 +258,44 @@ void do_IRQ(struct pt_regs *regs)
}
#endif
/*
* Every platform is required to implement ppc_md.get_irq.
* This function will either return an irq number or -1 to
* indicate there are no more pending.
* The value -2 is for buggy hardware and means that this IRQ
* has already been handled. -- Tom
*/
irq = ppc_md.get_irq(regs);
if (irq >= 0)
ppc_irq_dispatch_handler(regs, irq);
else
if (irq >= 0) {
#ifdef CONFIG_IRQSTACKS
/* Switch to the irq stack to handle this */
curtp = current_thread_info();
irqtp = hardirq_ctx[smp_processor_id()];
if (curtp != irqtp) {
irqtp->task = curtp->task;
irqtp->flags = 0;
call___do_IRQ(irq, regs, irqtp);
irqtp->task = NULL;
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
} else
#endif
__do_IRQ(irq, regs);
} else
#ifdef CONFIG_PPC32
if (irq != -2)
#endif
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
irq_exit();
}
#endif /* CONFIG_PPC_ISERIES */
void __init init_IRQ(void)
{
#ifdef CONFIG_PPC64
static int once = 0;
if (once)
@ -349,10 +303,14 @@ void __init init_IRQ(void)
once++;
#endif
ppc_md.init_IRQ();
#ifdef CONFIG_PPC64
irq_ctx_init();
#endif
}
#ifdef CONFIG_PPC64
#ifndef CONFIG_PPC_ISERIES
/*
* Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
@ -517,3 +475,4 @@ static int __init setup_noirqdistrib(char *str)
}
__setup("noirqdistrib", setup_noirqdistrib);
#endif /* CONFIG_PPC64 */

View File

@ -35,6 +35,7 @@
#include <asm/time.h>
#include <asm/iseries/it_exp_vpd_panel.h>
#include <asm/prom.h>
#include <asm/systemcfg.h>
#define MODULE_VERS "1.6"
#define MODULE_NAME "lparcfg"
@ -371,7 +372,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL);
if (lrdrp == NULL) {
partition_potential_processors = systemcfg->processorCount;
partition_potential_processors = _systemcfg->processorCount;
} else {
partition_potential_processors = *(lrdrp + 4);
}

View File

@ -519,7 +519,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
*
* flush_icache_range(unsigned long start, unsigned long stop)
*/
_GLOBAL(flush_icache_range)
_GLOBAL(__flush_icache_range)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
@ -607,27 +607,6 @@ _GLOBAL(invalidate_dcache_range)
sync /* wait for dcbi's to get to ram */
blr
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
* 40x cores have 8K or 16K dcache and 32 byte line size.
* 44x has a 32K dcache and 32 byte line size.
* 8xx has 1, 2, 4, 8K variants.
* For now, cover the worst case of the 44x.
* Must be called with external interrupts disabled.
*/
#define CACHE_NWAYS 64
#define CACHE_NLINES 16
_GLOBAL(flush_dcache_all)
li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
mtctr r4
lis r5, KERNELBASE@h
1: lwz r3, 0(r5) /* Load one word from every line */
addi r5, r5, L1_CACHE_BYTES
bdnz 1b
blr
#endif /* CONFIG_NOT_COHERENT_CACHE */
/*
* Flush a particular page from the data cache to RAM.
* Note: this is necessary because the instruction cache does *not*

View File

@ -89,12 +89,12 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
_GLOBAL(call_handle_IRQ_event)
_GLOBAL(call___do_IRQ)
mflr r0
std r0,16(r1)
stdu r1,THREAD_SIZE-112(r6)
mr r1,r6
bl .handle_IRQ_event
stdu r1,THREAD_SIZE-112(r5)
mr r1,r5
bl .__do_IRQ
ld r1,0(r1)
ld r0,16(r1)
mtlr r0

View File

@ -15,7 +15,7 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/page.h>
#include <asm/systemcfg.h>
#include <asm/lppaca.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/paca.h>
@ -24,8 +24,7 @@ static union {
struct systemcfg data;
u8 page[PAGE_SIZE];
} systemcfg_store __attribute__((__section__(".data.page.aligned")));
struct systemcfg *systemcfg = &systemcfg_store.data;
EXPORT_SYMBOL(systemcfg);
struct systemcfg *_systemcfg = &systemcfg_store.data;
/* This symbol is provided by the linker - let it fill in the paca

View File

@ -44,6 +44,7 @@
#include <asm/cputable.h>
#include <asm/btext.h>
#include <asm/div64.h>
#include <asm/signal.h>
#ifdef CONFIG_8xx
#include <asm/commproc.h>
@ -56,7 +57,6 @@ extern void machine_check_exception(struct pt_regs *regs);
extern void alignment_exception(struct pt_regs *regs);
extern void program_check_exception(struct pt_regs *regs);
extern void single_step_exception(struct pt_regs *regs);
extern int do_signal(sigset_t *, struct pt_regs *);
extern int pmac_newworld;
extern int sys_sigreturn(struct pt_regs *regs);
@ -188,9 +188,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
EXPORT_SYMBOL(cuda_request);
EXPORT_SYMBOL(cuda_poll);
#endif /* CONFIG_ADB_CUDA */
#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
EXPORT_SYMBOL(_machine);
#endif
#ifdef CONFIG_PPC_PMAC
EXPORT_SYMBOL(sys_ctrler);
#endif

View File

@ -1,6 +1,4 @@
/*
* arch/ppc64/kernel/proc_ppc64.c
*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
@ -53,7 +51,7 @@ static int __init proc_ppc64_create(void)
if (!root)
return 1;
if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_CELL)))
if (!(platform_is_pseries() || _machine == PLATFORM_CELL))
return 0;
if (!proc_mkdir("rtas", root))
@ -74,7 +72,7 @@ static int __init proc_ppc64_init(void)
if (!pde)
return 1;
pde->nlink = 1;
pde->data = systemcfg;
pde->data = _systemcfg;
pde->size = PAGE_SIZE;
pde->proc_fops = &page_map_fops;

View File

@ -48,9 +48,6 @@
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
#include <asm/pci-bridge.h>
#ifdef CONFIG_PPC64
#include <asm/systemcfg.h>
#endif
#ifdef DEBUG
#define DBG(fmt...) printk(KERN_ERR fmt)
@ -74,10 +71,6 @@ struct isa_reg_property {
typedef int interpret_func(struct device_node *, unsigned long *,
int, int, int);
extern struct rtas_t rtas;
extern struct lmb lmb;
extern unsigned long klimit;
static int __initdata dt_root_addr_cells;
static int __initdata dt_root_size_cells;
@ -391,7 +384,7 @@ static int __devinit finish_node_interrupts(struct device_node *np,
#ifdef CONFIG_PPC64
/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
char *name = get_property(ic->parent, "name", NULL);
if (name && !strcmp(name, "u3"))
np->intrs[intrcount].line += 128;
@ -1087,9 +1080,9 @@ void __init unflatten_device_tree(void)
static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth, void *data)
{
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
u32 *prop;
unsigned long size = 0;
unsigned long size;
char *type = of_get_flat_dt_prop(node, "device_type", &size);
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
@ -1115,7 +1108,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
#ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */
prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size);
prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
if (prop && (*prop) > 0) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
@ -1161,13 +1154,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
if (prop == NULL)
return 0;
#ifdef CONFIG_PPC64
systemcfg->platform = *prop;
#else
#ifdef CONFIG_PPC_MULTIPLATFORM
_machine = *prop;
#endif
#endif
#ifdef CONFIG_PPC64
/* check if iommu is forced on or off */
@ -1264,7 +1253,14 @@ static int __init early_init_dt_scan_memory(unsigned long node,
unsigned long l;
/* We are scanning "memory" nodes only */
if (type == NULL || strcmp(type, "memory") != 0)
if (type == NULL) {
/*
* The longtrail doesn't have a device_type on the
* /memory node, so look for the node called /memory@0.
*/
if (depth != 1 || strcmp(uname, "memory@0") != 0)
return 0;
} else if (strcmp(type, "memory") != 0)
return 0;
reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
@ -1339,9 +1335,6 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
#ifdef CONFIG_PPC64
systemcfg->physicalMemorySize = lmb_phys_mem_size();
#endif
lmb_reserve(0, __pa(klimit));
DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
@ -1908,7 +1901,7 @@ static int of_finish_dynamic_node(struct device_node *node,
/* We don't support that function on PowerMac, at least
* not yet
*/
if (systemcfg->platform == PLATFORM_POWERMAC)
if (_machine == PLATFORM_POWERMAC)
return -ENODEV;
/* fix up new node's linux_phandle field */
@ -1992,9 +1985,11 @@ int prom_add_property(struct device_node* np, struct property* prop)
*next = prop;
write_unlock(&devtree_lock);
#ifdef CONFIG_PROC_DEVICETREE
/* try to add to proc as well if it was initialized */
if (np->pde)
proc_device_tree_add_prop(np->pde, prop);
#endif /* CONFIG_PROC_DEVICETREE */
return 0;
}

View File

@ -94,11 +94,17 @@ extern const struct linux_logo logo_linux_clut224;
#ifdef CONFIG_PPC64
#define RELOC(x) (*PTRRELOC(&(x)))
#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
#define OF_WORKAROUNDS 0
#else
#define RELOC(x) (x)
#define ADDR(x) (u32) (x)
#define OF_WORKAROUNDS of_workarounds
int of_workarounds;
#endif
#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
#define PROM_BUG() do { \
prom_printf("kernel BUG at %s line 0x%x!\n", \
RELOC(__FILE__), __LINE__); \
@ -111,11 +117,6 @@ extern const struct linux_logo logo_linux_clut224;
#define prom_debug(x...)
#endif
#ifdef CONFIG_PPC32
#define PLATFORM_POWERMAC _MACH_Pmac
#define PLATFORM_CHRP _MACH_chrp
#endif
typedef u32 prom_arg_t;
@ -128,10 +129,11 @@ struct prom_args {
struct prom_t {
ihandle root;
ihandle chosen;
phandle chosen;
int cpu;
ihandle stdout;
ihandle mmumap;
ihandle memory;
};
struct mem_map_entry {
@ -360,16 +362,36 @@ static void __init prom_printf(const char *format, ...)
static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
unsigned long align)
{
int ret;
struct prom_t *_prom = &RELOC(prom);
ret = call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
(prom_arg_t)align);
if (ret != -1 && _prom->mmumap != 0)
/* old pmacs need us to map as well */
if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
/*
* Old OF requires we claim physical and virtual separately
* and then map explicitly (assuming virtual mode)
*/
int ret;
prom_arg_t result;
ret = call_prom_ret("call-method", 5, 2, &result,
ADDR("claim"), _prom->memory,
align, size, virt);
if (ret != 0 || result == -1)
return -1;
ret = call_prom_ret("call-method", 5, 2, &result,
ADDR("claim"), _prom->mmumap,
align, size, virt);
if (ret != 0) {
call_prom("call-method", 4, 1, ADDR("release"),
_prom->memory, size, virt);
return -1;
}
/* the 0x12 is M (coherence) + PP == read/write */
call_prom("call-method", 6, 1,
ADDR("map"), _prom->mmumap, 0, size, virt, virt);
return ret;
ADDR("map"), _prom->mmumap, 0x12, size, virt, virt);
return virt;
}
return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
(prom_arg_t)align);
}
static void __init __attribute__((noreturn)) prom_panic(const char *reason)
@ -415,11 +437,52 @@ static int inline prom_getproplen(phandle node, const char *pname)
return call_prom("getproplen", 2, 1, node, ADDR(pname));
}
static int inline prom_setprop(phandle node, const char *pname,
void *value, size_t valuelen)
static void add_string(char **str, const char *q)
{
char *p = *str;
while (*q)
*p++ = *q++;
*p++ = ' ';
*str = p;
}
static char *tohex(unsigned int x)
{
static char digits[] = "0123456789abcdef";
static char result[9];
int i;
result[8] = 0;
i = 8;
do {
--i;
result[i] = digits[x & 0xf];
x >>= 4;
} while (x != 0 && i > 0);
return &result[i];
}
static int __init prom_setprop(phandle node, const char *nodename,
const char *pname, void *value, size_t valuelen)
{
char cmd[256], *p;
if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
return call_prom("setprop", 4, 1, node, ADDR(pname),
(u32)(unsigned long) value, (u32) valuelen);
/* gah... setprop doesn't work on longtrail, have to use interpret */
p = cmd;
add_string(&p, "dev");
add_string(&p, nodename);
add_string(&p, tohex((u32)(unsigned long) value));
add_string(&p, tohex(valuelen));
add_string(&p, tohex(ADDR(pname)));
add_string(&p, tohex(strlen(RELOC(pname))));
add_string(&p, "property");
*p = 0;
return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
}
/* We can't use the standard versions because of RELOC headaches. */
@ -980,7 +1043,7 @@ static void __init prom_instantiate_rtas(void)
rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
if (!IHANDLE_VALID(rtas_inst)) {
prom_printf("opening rtas package failed");
prom_printf("opening rtas package failed (%x)\n", rtas_inst);
return;
}
@ -988,7 +1051,7 @@ static void __init prom_instantiate_rtas(void)
if (call_prom_ret("call-method", 3, 2, &entry,
ADDR("instantiate-rtas"),
rtas_inst, base) == PROM_ERROR
rtas_inst, base) != 0
|| entry == 0) {
prom_printf(" failed\n");
return;
@ -997,8 +1060,10 @@ static void __init prom_instantiate_rtas(void)
reserve_mem(base, size);
prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
&base, sizeof(base));
prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
&entry, sizeof(entry));
prom_debug("rtas base = 0x%x\n", base);
prom_debug("rtas entry = 0x%x\n", entry);
@ -1089,10 +1154,6 @@ static void __init prom_initialize_tce_table(void)
if (base < local_alloc_bottom)
local_alloc_bottom = base;
/* Save away the TCE table attributes for later use. */
prom_setprop(node, "linux,tce-base", &base, sizeof(base));
prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
/* It seems OF doesn't null-terminate the path :-( */
memset(path, 0, sizeof(path));
/* Call OF to setup the TCE hardware */
@ -1101,6 +1162,10 @@ static void __init prom_initialize_tce_table(void)
prom_printf("package-to-path failed\n");
}
/* Save away the TCE table attributes for later use. */
prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
prom_debug("TCE table: %s\n", path);
prom_debug("\tnode = 0x%x\n", node);
prom_debug("\tbase = 0x%x\n", base);
@ -1342,6 +1407,7 @@ static void __init prom_init_client_services(unsigned long pp)
/*
* For really old powermacs, we need to map things we claim.
* For that, we need the ihandle of the mmu.
* Also, on the longtrail, we need to work around other bugs.
*/
static void __init prom_find_mmu(void)
{
@ -1355,12 +1421,19 @@ static void __init prom_find_mmu(void)
if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
return;
version[sizeof(version) - 1] = 0;
prom_printf("OF version is '%s'\n", version);
/* XXX might need to add other versions here */
if (strcmp(version, "Open Firmware, 1.0.5") != 0)
if (strcmp(version, "Open Firmware, 1.0.5") == 0)
of_workarounds = OF_WA_CLAIM;
else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
} else
return;
_prom->memory = call_prom("open", 1, 1, ADDR("/memory"));
prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
sizeof(_prom->mmumap));
if (!IHANDLE_VALID(_prom->memory) || !IHANDLE_VALID(_prom->mmumap))
of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
}
#else
#define prom_find_mmu()
@ -1382,16 +1455,17 @@ static void __init prom_init_stdout(void)
memset(path, 0, 256);
call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
val = call_prom("instance-to-package", 1, 1, _prom->stdout);
prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
prom_setprop(_prom->chosen, "/chosen", "linux,stdout-package",
&val, sizeof(val));
prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
prom_setprop(_prom->chosen, "linux,stdout-path",
RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
prom_setprop(_prom->chosen, "/chosen", "linux,stdout-path",
path, strlen(path) + 1);
/* If it's a display, note it */
memset(type, 0, sizeof(type));
prom_getprop(val, "device_type", type, sizeof(type));
if (strcmp(type, RELOC("display")) == 0)
prom_setprop(val, "linux,boot-display", NULL, 0);
prom_setprop(val, path, "linux,boot-display", NULL, 0);
}
static void __init prom_close_stdin(void)
@ -1514,7 +1588,7 @@ static void __init prom_check_displays(void)
/* Success */
prom_printf("done\n");
prom_setprop(node, "linux,opened", NULL, 0);
prom_setprop(node, path, "linux,opened", NULL, 0);
/* Setup a usable color table when the appropriate
* method is available. Should update this to set-colors */
@ -1884,9 +1958,11 @@ static void __init fixup_device_tree(void)
/* interrupt on this revision of u3 is number 0 and level */
interrupts[0] = 0;
interrupts[1] = 1;
prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
&interrupts, sizeof(interrupts));
parent = (u32)mpic;
prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
&parent, sizeof(parent));
#endif
}
@ -1922,11 +1998,11 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
val = RELOC(prom_initrd_start);
prom_setprop(_prom->chosen, "linux,initrd-start", &val,
sizeof(val));
prom_setprop(_prom->chosen, "/chosen", "linux,initrd-start",
&val, sizeof(val));
val = RELOC(prom_initrd_end);
prom_setprop(_prom->chosen, "linux,initrd-end", &val,
sizeof(val));
prom_setprop(_prom->chosen, "/chosen", "linux,initrd-end",
&val, sizeof(val));
reserve_mem(RELOC(prom_initrd_start),
RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
@ -1968,16 +2044,17 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_init_client_services(pp);
/*
* See if this OF is old enough that we need to do explicit maps
* and other workarounds
*/
prom_find_mmu();
/*
* Init prom stdout device
*/
prom_init_stdout();
/*
* See if this OF is old enough that we need to do explicit maps
*/
prom_find_mmu();
/*
* Check for an initrd
*/
@ -1989,14 +2066,15 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
RELOC(of_platform) = prom_find_machine_type();
getprop_rval = RELOC(of_platform);
prom_setprop(_prom->chosen, "linux,platform",
prom_setprop(_prom->chosen, "/chosen", "linux,platform",
&getprop_rval, sizeof(getprop_rval));
#ifdef CONFIG_PPC_PSERIES
/*
* On pSeries, inform the firmware about our capabilities
*/
if (RELOC(of_platform) & PLATFORM_PSERIES)
if (RELOC(of_platform) == PLATFORM_PSERIES ||
RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
prom_send_capabilities();
#endif
@ -2050,21 +2128,23 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* Fill in some infos for use by the kernel later on
*/
if (RELOC(prom_memory_limit))
prom_setprop(_prom->chosen, "linux,memory-limit",
prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
&RELOC(prom_memory_limit),
sizeof(prom_memory_limit));
#ifdef CONFIG_PPC64
if (RELOC(ppc64_iommu_off))
prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
NULL, 0);
if (RELOC(iommu_force_on))
prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
NULL, 0);
if (RELOC(prom_tce_alloc_start)) {
prom_setprop(_prom->chosen, "linux,tce-alloc-start",
prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-start",
&RELOC(prom_tce_alloc_start),
sizeof(prom_tce_alloc_start));
prom_setprop(_prom->chosen, "linux,tce-alloc-end",
prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-end",
&RELOC(prom_tce_alloc_end),
sizeof(prom_tce_alloc_end));
}
@ -2081,7 +2161,12 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
prom_printf("copying OF device tree ...\n");
flatten_device_tree();
/* in case stdin is USB and still active on IBM machines... */
/*
* in case stdin is USB and still active on IBM machines...
* Unfortunately quiesce crashes on some powermacs if we have
* closed stdin already (in particular the powerbook 101).
*/
if (RELOC(of_platform) != PLATFORM_POWERMAC)
prom_close_stdin();
/*

View File

@ -259,7 +259,7 @@ static int __init proc_rtas_init(void)
{
struct proc_dir_entry *entry;
if (!(systemcfg->platform & PLATFORM_PSERIES))
if (_machine != PLATFORM_PSERIES && _machine != PLATFORM_PSERIES_LPAR)
return 1;
rtas_node = of_find_node_by_name(NULL, "rtas");

View File

@ -29,9 +29,6 @@
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <asm/lmb.h>
#ifdef CONFIG_PPC64
#include <asm/systemcfg.h>
#endif
struct rtas_t rtas = {
.lock = SPIN_LOCK_UNLOCKED
@ -671,7 +668,7 @@ void __init rtas_initialize(void)
* the stop-self token if any
*/
#ifdef CONFIG_PPC64
if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
if (_machine == PLATFORM_PSERIES_LPAR)
rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
#endif
rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);

View File

@ -47,7 +47,7 @@ static int write_pci_config;
static int ibm_read_pci_config;
static int ibm_write_pci_config;
static int config_access_valid(struct pci_dn *dn, int where)
static inline int config_access_valid(struct pci_dn *dn, int where)
{
if (where < 256)
return 1;
@ -72,16 +72,14 @@ static int of_device_available(struct device_node * dn)
return 0;
}
static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val)
static int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
{
int returnval = -1;
unsigned long buid, addr;
int ret;
struct pci_dn *pdn;
if (!dn || !dn->data)
if (!pdn)
return PCIBIOS_DEVICE_NOT_FOUND;
pdn = dn->data;
if (!config_access_valid(pdn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
@ -90,7 +88,7 @@ static int rtas_read_config(struct device_node *dn, int where, int size, u32 *va
buid = pdn->phb->buid;
if (buid) {
ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
addr, buid >> 32, buid & 0xffffffff, size);
addr, BUID_HI(buid), BUID_LO(buid), size);
} else {
ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
}
@ -100,7 +98,7 @@ static int rtas_read_config(struct device_node *dn, int where, int size, u32 *va
return PCIBIOS_DEVICE_NOT_FOUND;
if (returnval == EEH_IO_ERROR_VALUE(size) &&
eeh_dn_check_failure (dn, NULL))
eeh_dn_check_failure (pdn->node, NULL))
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
@ -118,23 +116,23 @@ static int rtas_pci_read_config(struct pci_bus *bus,
busdn = bus->sysdata; /* must be a phb */
/* Search only direct children of the bus */
for (dn = busdn->child; dn; dn = dn->sibling)
if (dn->data && PCI_DN(dn)->devfn == devfn
for (dn = busdn->child; dn; dn = dn->sibling) {
struct pci_dn *pdn = PCI_DN(dn);
if (pdn && pdn->devfn == devfn
&& of_device_available(dn))
return rtas_read_config(dn, where, size, val);
return rtas_read_config(pdn, where, size, val);
}
return PCIBIOS_DEVICE_NOT_FOUND;
}
int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val)
{
unsigned long buid, addr;
int ret;
struct pci_dn *pdn;
if (!dn || !dn->data)
if (!pdn)
return PCIBIOS_DEVICE_NOT_FOUND;
pdn = dn->data;
if (!config_access_valid(pdn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
@ -142,7 +140,8 @@ int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
(pdn->devfn << 8) | (where & 0xff);
buid = pdn->phb->buid;
if (buid) {
ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val);
ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr,
BUID_HI(buid), BUID_LO(buid), size, (ulong) val);
} else {
ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val);
}
@ -165,10 +164,12 @@ static int rtas_pci_write_config(struct pci_bus *bus,
busdn = bus->sysdata; /* must be a phb */
/* Search only direct children of the bus */
for (dn = busdn->child; dn; dn = dn->sibling)
if (dn->data && PCI_DN(dn)->devfn == devfn
for (dn = busdn->child; dn; dn = dn->sibling) {
struct pci_dn *pdn = PCI_DN(dn);
if (pdn && pdn->devfn == devfn
&& of_device_available(dn))
return rtas_write_config(dn, where, size, val);
return rtas_write_config(pdn, where, size, val);
}
return PCIBIOS_DEVICE_NOT_FOUND;
}

View File

@ -33,6 +33,7 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/systemcfg.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
@ -51,6 +52,9 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/lmb.h>
#include <asm/xmon.h>
#include "setup.h"
#undef DEBUG
@ -60,6 +64,13 @@
#define DBG(fmt...)
#endif
#ifdef CONFIG_PPC_MULTIPLATFORM
int _machine = 0;
EXPORT_SYMBOL(_machine);
#endif
unsigned long klimit = (unsigned long) _end;
/*
* This still seems to be needed... -- paulus
*/
@ -510,7 +521,7 @@ void __init smp_setup_cpu_maps(void)
* On pSeries LPAR, we need to know how many cpus
* could possibly be added to this partition.
*/
if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
if (_machine == PLATFORM_PSERIES_LPAR &&
(dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
unsigned int *ireg;
@ -555,7 +566,27 @@ void __init smp_setup_cpu_maps(void)
cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
}
systemcfg->processorCount = num_present_cpus();
_systemcfg->processorCount = num_present_cpus();
#endif /* CONFIG_PPC64 */
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_XMON
static int __init early_xmon(char *p)
{
/* ensure xmon is enabled */
if (p) {
if (strncmp(p, "on", 2) == 0)
xmon_init(1);
if (strncmp(p, "off", 3) == 0)
xmon_init(0);
if (strncmp(p, "early", 5) != 0)
return 0;
}
xmon_init(1);
debugger(NULL);
return 0;
}
early_param("xmon", early_xmon);
#endif

View File

@ -0,0 +1,6 @@
#ifndef _POWERPC_KERNEL_SETUP_H
#define _POWERPC_KERNEL_SETUP_H
void check_for_initrd(void);
#endif /* _POWERPC_KERNEL_SETUP_H */

View File

@ -40,6 +40,8 @@
#include <asm/xmon.h>
#include <asm/time.h>
#include "setup.h"
#define DBG(fmt...)
#if defined CONFIG_KGDB
@ -70,8 +72,6 @@ unsigned int DMA_MODE_WRITE;
int have_of = 1;
#ifdef CONFIG_PPC_MULTIPLATFORM
int _machine = 0;
extern void prep_init(void);
extern void pmac_init(void);
extern void chrp_init(void);
@ -279,7 +279,6 @@ arch_initcall(ppc_init);
/* Warning, IO base is not yet inited */
void __init setup_arch(char **cmdline_p)
{
extern char *klimit;
extern void do_init_bootmem(void);
/* so udelay does something sensible, assume <= 1000 bogomips */
@ -303,14 +302,9 @@ void __init setup_arch(char **cmdline_p)
pmac_feature_init(); /* New cool way */
#endif
#ifdef CONFIG_XMON
xmon_map_scc();
if (strstr(cmd_line, "xmon")) {
#ifdef CONFIG_XMON_DEFAULT
xmon_init(1);
debugger(NULL);
}
#endif /* CONFIG_XMON */
if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
#endif
#if defined(CONFIG_KGDB)
if (ppc_md.kgdb_map_scc)
@ -343,7 +337,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) klimit;
init_mm.brk = klimit;
/* Save unparsed command line copy for /proc/cmdline */
strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);

View File

@ -61,6 +61,8 @@
#include <asm/xmon.h>
#include <asm/udbg.h>
#include "setup.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
@ -94,15 +96,6 @@ extern void udbg_init_maple_realmode(void);
do { udbg_putc = call_rtas_display_status_delay; } while(0)
#endif
/* extern void *stab; */
extern unsigned long klimit;
extern void mm_init_ppc64(void);
extern void stab_initialize(unsigned long stab);
extern void htab_initialize(void);
extern void early_init_devtree(void *flat_dt);
extern void unflatten_device_tree(void);
int have_of = 1;
int boot_cpuid = 0;
int boot_cpuid_phys = 0;
@ -254,11 +247,10 @@ void __init early_setup(unsigned long dt_ptr)
* Iterate all ppc_md structures until we find the proper
* one for the current machine type
*/
DBG("Probing machine type for platform %x...\n",
systemcfg->platform);
DBG("Probing machine type for platform %x...\n", _machine);
for (mach = machines; *mach; mach++) {
if ((*mach)->probe(systemcfg->platform))
if ((*mach)->probe(_machine))
break;
}
/* What can we do if we didn't find ? */
@ -290,6 +282,28 @@ void __init early_setup(unsigned long dt_ptr)
DBG(" <- early_setup()\n");
}
#ifdef CONFIG_SMP
void early_setup_secondary(void)
{
struct paca_struct *lpaca = get_paca();
/* Mark enabled in PACA */
lpaca->proc_enabled = 0;
/* Initialize hash table for that CPU */
htab_initialize_secondary();
/* Initialize STAB/SLB. We use a virtual address as it works
* in real mode on pSeries and we want a virutal address on
* iSeries anyway
*/
if (cpu_has_feature(CPU_FTR_SLB))
slb_initialize();
else
stab_initialize(lpaca->stab_addr);
}
#endif /* CONFIG_SMP */
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
void smp_release_cpus(void)
@ -315,7 +329,8 @@ void smp_release_cpus(void)
#endif /* CONFIG_SMP || CONFIG_KEXEC */
/*
* Initialize some remaining members of the ppc64_caches and systemcfg structures
* Initialize some remaining members of the ppc64_caches and systemcfg
* structures
* (at least until we get rid of them completely). This is mostly some
* cache informations about the CPU that will be used by cache flush
* routines and/or provided to userland
@ -340,7 +355,7 @@ static void __init initialize_cache_info(void)
const char *dc, *ic;
/* Then read cache informations */
if (systemcfg->platform == PLATFORM_POWERMAC) {
if (_machine == PLATFORM_POWERMAC) {
dc = "d-cache-block-size";
ic = "i-cache-block-size";
} else {
@ -360,8 +375,8 @@ static void __init initialize_cache_info(void)
DBG("Argh, can't find dcache properties ! "
"sizep: %p, lsizep: %p\n", sizep, lsizep);
systemcfg->dcache_size = ppc64_caches.dsize = size;
systemcfg->dcache_line_size =
_systemcfg->dcache_size = ppc64_caches.dsize = size;
_systemcfg->dcache_line_size =
ppc64_caches.dline_size = lsize;
ppc64_caches.log_dline_size = __ilog2(lsize);
ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
@ -378,8 +393,8 @@ static void __init initialize_cache_info(void)
DBG("Argh, can't find icache properties ! "
"sizep: %p, lsizep: %p\n", sizep, lsizep);
systemcfg->icache_size = ppc64_caches.isize = size;
systemcfg->icache_line_size =
_systemcfg->icache_size = ppc64_caches.isize = size;
_systemcfg->icache_line_size =
ppc64_caches.iline_size = lsize;
ppc64_caches.log_iline_size = __ilog2(lsize);
ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
@ -387,10 +402,12 @@ static void __init initialize_cache_info(void)
}
/* Add an eye catcher and the systemcfg layout version number */
strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
systemcfg->version.major = SYSTEMCFG_MAJOR;
systemcfg->version.minor = SYSTEMCFG_MINOR;
systemcfg->processor = mfspr(SPRN_PVR);
strcpy(_systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
_systemcfg->version.major = SYSTEMCFG_MAJOR;
_systemcfg->version.minor = SYSTEMCFG_MINOR;
_systemcfg->processor = mfspr(SPRN_PVR);
_systemcfg->platform = _machine;
_systemcfg->physicalMemorySize = lmb_phys_mem_size();
DBG(" <- initialize_cache_info()\n");
}
@ -479,10 +496,10 @@ void __init setup_system(void)
printk("-----------------------------------------------------\n");
printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
printk("systemcfg = 0x%p\n", systemcfg);
printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
printk("systemcfg = 0x%p\n", _systemcfg);
printk("systemcfg->platform = 0x%x\n", _systemcfg->platform);
printk("systemcfg->processorCount = 0x%lx\n", _systemcfg->processorCount);
printk("systemcfg->physicalMemorySize = 0x%lx\n", _systemcfg->physicalMemorySize);
printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size);
printk("ppc64_caches.icache_line_size = 0x%x\n",
@ -564,12 +581,12 @@ void __init setup_syscall_map(void)
for (i = 0; i < __NR_syscalls; i++) {
if (sys_call_table[i*2] != sys_ni_syscall) {
count64++;
systemcfg->syscall_map_64[i >> 5] |=
_systemcfg->syscall_map_64[i >> 5] |=
0x80000000UL >> (i & 0x1f);
}
if (sys_call_table[i*2+1] != sys_ni_syscall) {
count32++;
systemcfg->syscall_map_32[i >> 5] |=
_systemcfg->syscall_map_32[i >> 5] |=
0x80000000UL >> (i & 0x1f);
}
}
@ -858,26 +875,6 @@ int check_legacy_ioport(unsigned long base_port)
}
EXPORT_SYMBOL(check_legacy_ioport);
#ifdef CONFIG_XMON
static int __init early_xmon(char *p)
{
/* ensure xmon is enabled */
if (p) {
if (strncmp(p, "on", 2) == 0)
xmon_init(1);
if (strncmp(p, "off", 3) == 0)
xmon_init(0);
if (strncmp(p, "early", 5) != 0)
return 0;
}
xmon_init(1);
debugger(NULL);
return 0;
}
early_param("xmon", early_xmon);
#endif
void cpu_die(void)
{
if (ppc_md.cpu_die)

View File

@ -42,6 +42,7 @@
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/sigcontext.h>
#ifdef CONFIG_PPC64
#include "ppc32.h"
#include <asm/unistd.h>

View File

@ -44,6 +44,7 @@
#include <asm/cputable.h>
#include <asm/system.h>
#include <asm/mpic.h>
#include <asm/systemcfg.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#endif
@ -368,9 +369,11 @@ int generic_cpu_disable(void)
if (cpu == boot_cpuid)
return -EBUSY;
systemcfg->processorCount--;
cpu_clear(cpu, cpu_online_map);
#ifdef CONFIG_PPC64
_systemcfg->processorCount--;
fixup_irqs(cpu_online_map);
#endif
return 0;
}
@ -388,9 +391,11 @@ int generic_cpu_enable(unsigned int cpu)
while (!cpu_online(cpu))
cpu_relax();
#ifdef CONFIG_PPC64
fixup_irqs(cpu_online_map);
/* counter the irq disable in fixup_irqs */
local_irq_enable();
#endif
return 0;
}
@ -419,7 +424,9 @@ void generic_mach_cpu_die(void)
while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
cpu_relax();
#ifdef CONFIG_PPC64
flush_tlb_pending();
#endif
cpu_set(cpu, cpu_online_map);
local_irq_enable();
}

View File

@ -52,7 +52,6 @@
#include <asm/semaphore.h>
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/systemcfg.h>
#include <asm/ppc-pci.h>
/* readdir & getdents */

View File

@ -271,13 +271,13 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
* tb_to_xs and stamp_xsec values are consistent. If not, then it
* loops back and reads them again until this criteria is met.
*/
++(systemcfg->tb_update_count);
++(_systemcfg->tb_update_count);
smp_wmb();
systemcfg->tb_orig_stamp = new_tb_stamp;
systemcfg->stamp_xsec = new_stamp_xsec;
systemcfg->tb_to_xs = new_tb_to_xs;
_systemcfg->tb_orig_stamp = new_tb_stamp;
_systemcfg->stamp_xsec = new_stamp_xsec;
_systemcfg->tb_to_xs = new_tb_to_xs;
smp_wmb();
++(systemcfg->tb_update_count);
++(_systemcfg->tb_update_count);
#endif
}
@ -357,8 +357,9 @@ static void iSeries_tb_recal(void)
do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
tb_to_xs = divres.result_low;
do_gtod.varp->tb_to_xs = tb_to_xs;
systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
systemcfg->tb_to_xs = tb_to_xs;
_systemcfg->tb_ticks_per_sec =
tb_ticks_per_sec;
_systemcfg->tb_to_xs = tb_to_xs;
}
else {
printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
@ -483,6 +484,8 @@ void __init smp_space_timers(unsigned int max_cpus)
unsigned long offset = tb_ticks_per_jiffy / max_cpus;
unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
previous_tb -= tb_ticks_per_jiffy;
for_each_cpu(i) {
if (i != boot_cpuid) {
previous_tb += offset;
@ -559,8 +562,8 @@ int do_settimeofday(struct timespec *tv)
update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
#ifdef CONFIG_PPC64
systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
systemcfg->tz_dsttime = sys_tz.tz_dsttime;
_systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
_systemcfg->tz_dsttime = sys_tz.tz_dsttime;
#endif
write_sequnlock_irqrestore(&xtime_lock, flags);
@ -711,11 +714,11 @@ void __init time_init(void)
do_gtod.varp->tb_to_xs = tb_to_xs;
do_gtod.tb_to_us = tb_to_us;
#ifdef CONFIG_PPC64
systemcfg->tb_orig_stamp = tb_last_jiffy;
systemcfg->tb_update_count = 0;
systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
systemcfg->tb_to_xs = tb_to_xs;
_systemcfg->tb_orig_stamp = tb_last_jiffy;
_systemcfg->tb_update_count = 0;
_systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
_systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
_systemcfg->tb_to_xs = tb_to_xs;
#endif
time_freq = 0;

View File

@ -129,7 +129,7 @@ int die(const char *str, struct pt_regs *regs, long err)
nl = 1;
#endif
#ifdef CONFIG_PPC64
switch (systemcfg->platform) {
switch (_machine) {
case PLATFORM_PSERIES:
printk("PSERIES ");
nl = 1;

View File

@ -41,7 +41,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
tmp = *p;
found_first:
tmp &= (~0UL >> (64 - size));
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:

View File

@ -84,10 +84,11 @@
extern unsigned long dart_tablebase;
#endif /* CONFIG_U3_DART */
static unsigned long _SDR1;
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
hpte_t *htab_address;
unsigned long htab_hash_mask;
unsigned long _SDR1;
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
#ifdef CONFIG_HUGETLB_PAGE
@ -165,7 +166,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
* normal insert callback here.
*/
#ifdef CONFIG_PPC_ISERIES
if (systemcfg->platform == PLATFORM_ISERIES_LPAR)
if (_machine == PLATFORM_ISERIES_LPAR)
ret = iSeries_hpte_insert(hpteg, va,
virt_to_abs(paddr),
tmp_mode,
@ -174,7 +175,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
else
#endif
#ifdef CONFIG_PPC_PSERIES
if (systemcfg->platform & PLATFORM_LPAR)
if (_machine & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va,
virt_to_abs(paddr),
tmp_mode,
@ -293,7 +294,7 @@ static void __init htab_init_page_sizes(void)
* Not in the device-tree, let's fallback on known size
* list for 16M capable GP & GR
*/
if ((systemcfg->platform != PLATFORM_ISERIES_LPAR) &&
if ((_machine != PLATFORM_ISERIES_LPAR) &&
cpu_has_feature(CPU_FTR_16M_PAGE))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp));
@ -364,7 +365,7 @@ static int __init htab_dt_scan_pftsize(unsigned long node,
static unsigned long __init htab_get_table_size(void)
{
unsigned long rnd_mem_size, pteg_count;
unsigned long mem_size, rnd_mem_size, pteg_count;
/* If hash size isn't already provided by the platform, we try to
* retreive it from the device-tree. If it's not there neither, we
@ -376,8 +377,9 @@ static unsigned long __init htab_get_table_size(void)
return 1UL << ppc64_pft_size;
/* round mem_size up to next power of 2 */
rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
if (rnd_mem_size < systemcfg->physicalMemorySize)
mem_size = lmb_phys_mem_size();
rnd_mem_size = 1UL << __ilog2(mem_size);
if (rnd_mem_size < mem_size)
rnd_mem_size <<= 1;
/* # pages / 2 */
@ -386,6 +388,15 @@ static unsigned long __init htab_get_table_size(void)
return pteg_count << 7;
}
#ifdef CONFIG_MEMORY_HOTPLUG
void create_section_mapping(unsigned long start, unsigned long end)
{
BUG_ON(htab_bolt_mapping(start, end, start,
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
mmu_linear_psize));
}
#endif /* CONFIG_MEMORY_HOTPLUG */
void __init htab_initialize(void)
{
unsigned long table, htab_size_bytes;
@ -410,7 +421,7 @@ void __init htab_initialize(void)
htab_hash_mask = pteg_count - 1;
if (systemcfg->platform & PLATFORM_LPAR) {
if (platform_is_lpar()) {
/* Using a hypervisor which owns the htab */
htab_address = NULL;
_SDR1 = 0;
@ -431,6 +442,9 @@ void __init htab_initialize(void)
/* Initialize the HPT with no entries */
memset((void *)table, 0, htab_size_bytes);
/* Set SDR1 */
mtspr(SPRN_SDR1, _SDR1);
}
mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
@ -500,6 +514,12 @@ void __init htab_initialize(void)
#undef KB
#undef MB
void __init htab_initialize_secondary(void)
{
if (!platform_is_lpar())
mtspr(SPRN_SDR1, _SDR1);
}
/*
* Called by asm hashtable.S for doing lazy icache flush
*/

View File

@ -84,9 +84,6 @@ void MMU_init(void);
/* XXX should be in current.h -- paulus */
extern struct task_struct *current_set[NR_CPUS];
char *klimit = _end;
struct device_node *memory_node;
extern int init_bootmem_done;
/*

View File

@ -20,6 +20,8 @@
*
*/
#undef DEBUG
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
@ -64,6 +66,12 @@
#include <asm/vdso.h>
#include <asm/imalloc.h>
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
@ -72,8 +80,6 @@
#warning TASK_SIZE is smaller than it needs to be.
#endif
unsigned long klimit = (unsigned long)_end;
/* max amount of RAM to use */
unsigned long __max_memory;
@ -188,14 +194,14 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
}
#ifdef CONFIG_PPC_64K_PAGES
static const int pgtable_cache_size[2] = {
PTE_TABLE_SIZE, PGD_TABLE_SIZE
static const unsigned int pgtable_cache_size[3] = {
PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
"pte_pmd_cache", "pgd_cache",
"pte_pmd_cache", "pmd_cache", "pgd_cache",
};
#else
static const int pgtable_cache_size[2] = {
static const unsigned int pgtable_cache_size[2] = {
PTE_TABLE_SIZE, PMD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
@ -213,6 +219,8 @@ void pgtable_cache_init(void)
int size = pgtable_cache_size[i];
const char *name = pgtable_cache_name[i];
DBG("Allocating page table cache %s (#%d) "
"for size: %08x...\n", name, i, size);
pgtable_cache[i] = kmem_cache_create(name,
size, size,
SLAB_HWCACHE_ALIGN |

View File

@ -110,6 +110,7 @@ EXPORT_SYMBOL(phys_mem_access_prot);
void online_page(struct page *page)
{
ClearPageReserved(page);
set_page_count(page, 0);
free_cold_page(page);
totalram_pages++;
num_physpages++;
@ -127,6 +128,9 @@ int __devinit add_memory(u64 start, u64 size)
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
start += KERNELBASE;
create_section_mapping(start, start + size);
/* this should work for most non-highmem platforms */
zone = pgdata->node_zones;

View File

@ -122,8 +122,11 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
*
*/
if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
mmu_virtual_psize))
panic("Can't map bolted IO mapping");
mmu_virtual_psize)) {
printk(KERN_ERR "Failed to do bolted mapping IO "
"memory at %016lx !\n", pa);
return -ENOMEM;
}
}
return 0;
}

View File

@ -20,6 +20,7 @@
#include <asm/cputable.h>
#include <asm/lmb.h>
#include <asm/abs_addr.h>
#include <asm/firmware.h>
struct stab_entry {
unsigned long esid_data;
@ -256,7 +257,7 @@ void stabs_alloc(void)
paca[cpu].stab_addr = newstab;
paca[cpu].stab_real = virt_to_abs(newstab);
printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx "
printk(KERN_INFO "Segment table for CPU %d at 0x%lx "
"virtual, 0x%lx absolute\n",
cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
}
@ -270,10 +271,28 @@ void stabs_alloc(void)
void stab_initialize(unsigned long stab)
{
unsigned long vsid = get_kernel_vsid(KERNELBASE);
unsigned long stabreal;
asm volatile("isync; slbia; isync":::"memory");
make_ste(stab, GET_ESID(KERNELBASE), vsid);
/* Order update */
asm volatile("sync":::"memory");
/* Set ASR */
stabreal = get_paca()->stab_real | 0x1ul;
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
HvCall1(HvCallBaseSetASR, stabreal);
return;
}
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PPC_PSERIES
if (platform_is_lpar()) {
plpar_hcall_norets(H_SET_ASR, stabreal);
return;
}
#endif
mtspr(SPRN_ASR, stabreal);
}

View File

@ -233,8 +233,7 @@ static unsigned long get_pc(struct pt_regs *regs)
mmcra = mfspr(SPRN_MMCRA);
/* Were we in the hypervisor? */
if ((systemcfg->platform == PLATFORM_PSERIES_LPAR) &&
(mmcra & MMCRA_SIHV))
if (platform_is_lpar() && (mmcra & MMCRA_SIHV))
/* function descriptor madness */
return *((unsigned long *)hypervisor_bucket);

View File

@ -361,7 +361,9 @@ static void __init chrp_find_openpic(void)
printk(KERN_INFO "OpenPIC at %lx\n", opaddr);
irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
prom_get_irq_senses(init_senses, NUM_8259_INTERRUPTS, NR_IRQS - 4);
prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4);
/* i8259 cascade is always positive level */
init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE;
iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len);
if (iranges == NULL)

View File

@ -103,6 +103,9 @@ static void intReceived(struct XmPciLpEvent *eventParm,
struct pt_regs *regsParm)
{
int irq;
#ifdef CONFIG_IRQSTACKS
struct thread_info *curtp, *irqtp;
#endif
++Pci_Interrupt_Count;
@ -110,7 +113,20 @@ static void intReceived(struct XmPciLpEvent *eventParm,
case XmPciLpEvent_SlotInterrupt:
irq = eventParm->hvLpEvent.xCorrelationToken;
/* Dispatch the interrupt handlers for this irq */
ppc_irq_dispatch_handler(regsParm, irq);
#ifdef CONFIG_IRQSTACKS
/* Switch to the irq stack to handle this */
curtp = current_thread_info();
irqtp = hardirq_ctx[smp_processor_id()];
if (curtp != irqtp) {
irqtp->task = curtp->task;
irqtp->flags = 0;
call___do_IRQ(irq, regsParm, irqtp);
irqtp->task = NULL;
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
} else
#endif
__do_IRQ(irq, regsParm);
HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
eventParm->eventData.slotInterrupt.subBusNumber,
eventParm->eventData.slotInterrupt.deviceId);
@ -310,10 +326,8 @@ static void iSeries_disable_IRQ(unsigned int irq)
}
/*
* Need to define this so ppc_irq_dispatch_handler will NOT call
* enable_IRQ at the end of interrupt handling. However, this does
* nothing because there is not enough information provided to do
* the EOI HvCall. This is done by XmPciLpEvent.c
* This does nothing because there is not enough information
* provided to do the EOI HvCall. This is done by XmPciLpEvent.c
*/
static void iSeries_end_IRQ(unsigned int irq)
{

View File

@ -15,6 +15,7 @@
#include <asm/processor.h>
#include <asm/asm-offsets.h>
#include <asm/ppc_asm.h>
.text

View File

@ -39,7 +39,8 @@
#include <asm/sections.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
#include <asm/systemcfg.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/paca.h>
#include <asm/cache.h>
@ -71,7 +72,7 @@ extern void hvlog(char *fmt, ...);
#endif
/* Function Prototypes */
static void build_iSeries_Memory_Map(void);
static unsigned long build_iSeries_Memory_Map(void);
static void iseries_shared_idle(void);
static void iseries_dedicated_idle(void);
#ifdef CONFIG_PCI
@ -84,7 +85,6 @@ static void iSeries_pci_final_fixup(void) { }
int piranha_simulator;
extern int rd_size; /* Defined in drivers/block/rd.c */
extern unsigned long klimit;
extern unsigned long embedded_sysmap_start;
extern unsigned long embedded_sysmap_end;
@ -403,9 +403,11 @@ void mschunks_alloc(unsigned long num_chunks)
* a table used to translate Linux's physical addresses to these
* absolute addresses. Absolute addresses are needed when
* communicating with the hypervisor (e.g. to build HPT entries)
*
* Returns the physical memory size
*/
static void __init build_iSeries_Memory_Map(void)
static unsigned long __init build_iSeries_Memory_Map(void)
{
u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
u32 nextPhysChunk;
@ -538,7 +540,7 @@ static void __init build_iSeries_Memory_Map(void)
* which should be equal to
* nextPhysChunk
*/
systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
return chunk_to_addr(nextPhysChunk);
}
/*
@ -564,8 +566,8 @@ static void __init iSeries_setup_arch(void)
printk("Max physical processors = %d\n",
itVpdAreas.xSlicMaxPhysicalProcs);
systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
printk("Processor version = %x\n", systemcfg->processor);
_systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
printk("Processor version = %x\n", _systemcfg->processor);
}
static void iSeries_show_cpuinfo(struct seq_file *m)
@ -702,7 +704,6 @@ static void iseries_shared_idle(void)
static void iseries_dedicated_idle(void)
{
long oldval;
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
@ -929,7 +930,7 @@ void dt_cpus(struct iseries_flat_dt *dt)
dt_end_node(dt);
}
void build_flat_dt(struct iseries_flat_dt *dt)
void build_flat_dt(struct iseries_flat_dt *dt, unsigned long phys_mem_size)
{
u64 tmp[2];
@ -945,7 +946,7 @@ void build_flat_dt(struct iseries_flat_dt *dt)
dt_prop_str(dt, "name", "memory");
dt_prop_str(dt, "device_type", "memory");
tmp[0] = 0;
tmp[1] = systemcfg->physicalMemorySize;
tmp[1] = phys_mem_size;
dt_prop_u64_list(dt, "reg", tmp, 2);
dt_end_node(dt);
@ -965,13 +966,15 @@ void build_flat_dt(struct iseries_flat_dt *dt)
void * __init iSeries_early_setup(void)
{
unsigned long phys_mem_size;
iSeries_fixup_klimit();
/*
* Initialize the table which translate Linux physical addresses to
* AS/400 absolute addresses
*/
build_iSeries_Memory_Map();
phys_mem_size = build_iSeries_Memory_Map();
iSeries_get_cmdline();
@ -981,7 +984,7 @@ void * __init iSeries_early_setup(void)
/* Parse early parameters, in particular mem=x */
parse_early_param();
build_flat_dt(&iseries_dt);
build_flat_dt(&iseries_dt, phys_mem_size);
return (void *) __pa(&iseries_dt);
}

View File

@ -380,9 +380,6 @@ void __init maple_pcibios_fixup(void)
for_each_pci_dev(dev)
pci_read_irq_line(dev);
/* Do the mapping of the IO space */
phbs_remap_io();
DBG(" <- maple_pcibios_fixup\n");
}

View File

@ -918,9 +918,6 @@ void __init pmac_pci_init(void)
PCI_DN(np)->busno = 0xf0;
}
/* map in PCI I/O space */
phbs_remap_io();
/* pmac_check_ht_link(); */
/* Tell pci.c to not use the common resource allocation mechanism */

View File

@ -74,6 +74,9 @@ static DEFINE_SPINLOCK(pmac_pic_lock);
#define GATWICK_IRQ_POOL_SIZE 10
static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
/*
* Mark an irq as "lost". This is only used on the pmac
* since it can lose interrupts (see pmac_set_irq_mask).

View File

@ -305,9 +305,19 @@ static int __init smp_psurge_probe(void)
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
/* this is not actually strictly necessary -- paulus. */
for (i = 1; i < ncpus; ++i)
smp_hw_index[i] = i;
/*
* This is necessary because OF doesn't know about the
* secondary cpu(s), and thus there aren't nodes in the
* device tree for them, and smp_setup_cpu_maps hasn't
* set their bits in cpu_possible_map and cpu_present_map.
*/
if (ncpus > NR_CPUS)
ncpus = NR_CPUS;
for (i = 1; i < ncpus ; ++i) {
cpu_set(i, cpu_present_map);
cpu_set(i, cpu_possible_map);
set_hard_smp_processor_id(i, i);
}
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
@ -348,6 +358,7 @@ static void __init psurge_dual_sync_tb(int cpu_nr)
int t;
set_dec(tb_ticks_per_jiffy);
/* XXX fixme */
set_tb(0, 0);
last_jiffy_stamp(cpu_nr) = 0;
@ -363,8 +374,6 @@ static void __init psurge_dual_sync_tb(int cpu_nr)
/* now interrupt the secondary, starting both TBs */
psurge_set_ipi(1);
smp_tb_synchronized = 1;
}
static struct irqaction psurge_irqaction = {
@ -625,9 +634,8 @@ void smp_core99_give_timebase(void)
for (t = 100000; t > 0 && sec_tb_reset; --t)
udelay(10);
if (sec_tb_reset)
/* XXX BUG_ON here? */
printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
else
smp_tb_synchronized = 1;
/* Now, restart the timebase by leaving the GPIO to an open collector */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
@ -810,19 +818,9 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
}
/* Core99 Macs (dual G4s and G5s) */
struct smp_ops_t core99_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_core99_probe,
.kick_cpu = smp_core99_kick_cpu,
.setup_cpu = smp_core99_setup_cpu,
.give_timebase = smp_core99_give_timebase,
.take_timebase = smp_core99_take_timebase,
};
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
int __cpu_disable(void)
int smp_core99_cpu_disable(void)
{
cpu_clear(smp_processor_id(), cpu_online_map);
@ -846,7 +844,7 @@ void cpu_die(void)
low_cpu_die();
}
void __cpu_die(unsigned int cpu)
void smp_core99_cpu_die(unsigned int cpu)
{
int timeout;
@ -858,8 +856,21 @@ void __cpu_die(unsigned int cpu)
}
msleep(1);
}
cpu_callin_map[cpu] = 0;
cpu_dead[cpu] = 0;
}
#endif
/* Core99 Macs (dual G4s and G5s) */
struct smp_ops_t core99_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_core99_probe,
.kick_cpu = smp_core99_kick_cpu,
.setup_cpu = smp_core99_setup_cpu,
.give_timebase = smp_core99_give_timebase,
.take_timebase = smp_core99_take_timebase,
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
.cpu_disable = smp_core99_cpu_disable,
.cpu_die = smp_core99_cpu_die,
#endif
};

View File

@ -3,3 +3,5 @@ obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_EEH) += eeh.o eeh_event.o

View File

@ -17,23 +17,21 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/bootmem.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/atomic.h>
#include <asm/systemcfg.h>
#include <asm/ppc-pci.h>
#include <asm/rtas.h>
#undef DEBUG
@ -49,8 +47,8 @@
* were "empty": all reads return 0xff's and all writes are silently
* ignored. EEH slot isolation events can be triggered by parity
* errors on the address or data busses (e.g. during posted writes),
* which in turn might be caused by dust, vibration, humidity,
* radioactivity or plain-old failed hardware.
* which in turn might be caused by low voltage on the bus, dust,
* vibration, humidity, radioactivity or plain-old failed hardware.
*
* Note, however, that one of the leading causes of EEH slot
* freeze events are buggy device drivers, buggy device microcode,
@ -71,26 +69,15 @@
* and sent out for processing.
*/
/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
#define BUID_HI(buid) ((buid) >> 32)
#define BUID_LO(buid) ((buid) & 0xffffffff)
/* EEH event workqueue setup. */
static DEFINE_SPINLOCK(eeh_eventlist_lock);
LIST_HEAD(eeh_eventlist);
static void eeh_event_handler(void *);
DECLARE_WORK(eeh_event_wq, eeh_event_handler, NULL);
static struct notifier_block *eeh_notifier_chain;
/*
* If a device driver keeps reading an MMIO register in an interrupt
/* If a device driver keeps reading an MMIO register in an interrupt
* handler after a slot isolation event has occurred, we assume it
* is broken and panic. This sets the threshold for how many read
* attempts we allow before panicking.
*/
#define EEH_MAX_FAILS 1000
static atomic_t eeh_fail_count;
#define EEH_MAX_FAILS 100000
/* Misc forward declaraions */
static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn);
/* RTAS tokens */
static int ibm_set_eeh_option;
@ -101,12 +88,19 @@ static int ibm_slot_error_detail;
static int eeh_subsystem_enabled;
/* Lock to avoid races due to multiple reports of an error */
static DEFINE_SPINLOCK(confirm_error_lock);
/* Buffer for reporting slot-error-detail rtas calls */
static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(slot_errbuf_lock);
static int eeh_error_buf_size;
/* System monitoring statistics */
static DEFINE_PER_CPU(unsigned long, no_device);
static DEFINE_PER_CPU(unsigned long, no_dn);
static DEFINE_PER_CPU(unsigned long, no_cfg_addr);
static DEFINE_PER_CPU(unsigned long, ignored_check);
static DEFINE_PER_CPU(unsigned long, total_mmio_ffs);
static DEFINE_PER_CPU(unsigned long, false_positives);
static DEFINE_PER_CPU(unsigned long, ignored_failures);
@ -224,9 +218,9 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
while (*p) {
parent = *p;
piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
if (alo < piar->addr_lo) {
if (ahi < piar->addr_lo) {
p = &parent->rb_left;
} else if (ahi > piar->addr_hi) {
} else if (alo > piar->addr_hi) {
p = &parent->rb_right;
} else {
if (dev != piar->pcidev ||
@ -245,6 +239,11 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
piar->pcidev = dev;
piar->flags = flags;
#ifdef DEBUG
printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
alo, ahi, pci_name (dev));
#endif
rb_link_node(&piar->rb_node, parent, p);
rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
@ -260,18 +259,17 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
if (!dn) {
printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n",
pci_name(dev));
printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
return;
}
/* Skip any devices for which EEH is not enabled. */
pdn = dn->data;
pdn = PCI_DN(dn);
if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
pdn->eeh_mode & EEH_MODE_NOCHECK) {
#ifdef DEBUG
printk(KERN_INFO "PCI: skip building address cache for=%s\n",
pci_name(dev));
printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n",
pci_name(dev), pdn->node->full_name);
#endif
return;
}
@ -307,7 +305,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
* we maintain a cache of devices that can be quickly searched.
* This routine adds a device to that cache.
*/
void pci_addr_cache_insert_device(struct pci_dev *dev)
static void pci_addr_cache_insert_device(struct pci_dev *dev)
{
unsigned long flags;
@ -350,7 +348,7 @@ restart:
* the tree multiple times (once per resource).
* But so what; device removal doesn't need to be that fast.
*/
void pci_addr_cache_remove_device(struct pci_dev *dev)
static void pci_addr_cache_remove_device(struct pci_dev *dev)
{
unsigned long flags;
@ -370,8 +368,12 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
*/
void __init pci_addr_cache_build(void)
{
struct device_node *dn;
struct pci_dev *dev = NULL;
if (!eeh_subsystem_enabled)
return;
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
@ -380,6 +382,10 @@ void __init pci_addr_cache_build(void)
continue;
}
pci_addr_cache_insert_device(dev);
/* Save the BAR's; firmware doesn't restore these after EEH reset */
dn = pci_device_to_OF_node(dev);
eeh_save_bars(dev, PCI_DN(dn));
}
#ifdef DEBUG
@ -391,22 +397,26 @@ void __init pci_addr_cache_build(void)
/* --------------------------------------------------------------- */
/* Above lies the PCI Address Cache. Below lies the EEH event infrastructure */
/**
* eeh_register_notifier - Register to find out about EEH events.
* @nb: notifier block to callback on events
*/
int eeh_register_notifier(struct notifier_block *nb)
void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
{
return notifier_chain_register(&eeh_notifier_chain, nb);
}
unsigned long flags;
int rc;
/**
* eeh_unregister_notifier - Unregister to an EEH event notifier.
* @nb: notifier block to callback on events
*/
int eeh_unregister_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&eeh_notifier_chain, nb);
/* Log the error with the rtas logger */
spin_lock_irqsave(&slot_errbuf_lock, flags);
memset(slot_errbuf, 0, eeh_error_buf_size);
rc = rtas_call(ibm_slot_error_detail,
8, 1, NULL, pdn->eeh_config_addr,
BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid), NULL, 0,
virt_to_phys(slot_errbuf),
eeh_error_buf_size,
severity);
if (rc == 0)
log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
spin_unlock_irqrestore(&slot_errbuf_lock, flags);
}
/**
@ -414,16 +424,16 @@ int eeh_unregister_notifier(struct notifier_block *nb)
* @dn: device node to read
* @rets: array to return results in
*/
static int read_slot_reset_state(struct device_node *dn, int rets[])
static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
{
int token, outputs;
struct pci_dn *pdn = dn->data;
if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
token = ibm_read_slot_reset_state2;
outputs = 4;
} else {
token = ibm_read_slot_reset_state;
rets[2] = 0; /* fake PE Unavailable info */
outputs = 3;
}
@ -431,76 +441,9 @@ static int read_slot_reset_state(struct device_node *dn, int rets[])
BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid));
}
/**
* eeh_panic - call panic() for an eeh event that cannot be handled.
* The philosophy of this routine is that it is better to panic and
* halt the OS than it is to risk possible data corruption by
* oblivious device drivers that don't know better.
*
* @dev pci device that had an eeh event
* @reset_state current reset state of the device slot
*/
static void eeh_panic(struct pci_dev *dev, int reset_state)
{
/*
* XXX We should create a separate sysctl for this.
*
* Since the panic_on_oops sysctl is used to halt the system
* in light of potential corruption, we can use it here.
*/
if (panic_on_oops)
panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
pci_name(dev));
else {
__get_cpu_var(ignored_failures)++;
printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
reset_state, pci_name(dev));
}
}
/**
* eeh_event_handler - dispatch EEH events. The detection of a frozen
* slot can occur inside an interrupt, where it can be hard to do
* anything about it. The goal of this routine is to pull these
* detection events out of the context of the interrupt handler, and
* re-dispatch them for processing at a later time in a normal context.
*
* @dummy - unused
*/
static void eeh_event_handler(void *dummy)
{
unsigned long flags;
struct eeh_event *event;
while (1) {
spin_lock_irqsave(&eeh_eventlist_lock, flags);
event = NULL;
if (!list_empty(&eeh_eventlist)) {
event = list_entry(eeh_eventlist.next, struct eeh_event, list);
list_del(&event->list);
}
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
if (event == NULL)
break;
printk(KERN_INFO "EEH: MMIO failure (%d), notifiying device "
"%s\n", event->reset_state,
pci_name(event->dev));
atomic_set(&eeh_fail_count, 0);
notifier_call_chain (&eeh_notifier_chain,
EEH_NOTIFY_FREEZE, event);
__get_cpu_var(slot_resets)++;
pci_dev_put(event->dev);
kfree(event);
}
}
/**
* eeh_token_to_phys - convert EEH address token to phys address
* @token i/o token, should be address in the form 0xE....
* @token i/o token, should be address in the form 0xA....
*/
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
@ -515,6 +458,70 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
return pa | (token & (PAGE_SIZE-1));
}
/**
* Return the "partitionable endpoint" (pe) under which this device lies
*/
static struct device_node * find_device_pe(struct device_node *dn)
{
while ((dn->parent) && PCI_DN(dn->parent) &&
(PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
dn = dn->parent;
}
return dn;
}
/** Mark all devices that are peers of this device as failed.
* Mark the device driver too, so that it can see the failure
* immediately; this is critical, since some drivers poll
* status registers in interrupts ... If a driver is polling,
* and the slot is frozen, then the driver can deadlock in
* an interrupt context, which is bad.
*/
static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
{
while (dn) {
if (PCI_DN(dn)) {
PCI_DN(dn)->eeh_mode |= mode_flag;
if (dn->child)
__eeh_mark_slot (dn->child, mode_flag);
}
dn = dn->sibling;
}
}
void eeh_mark_slot (struct device_node *dn, int mode_flag)
{
dn = find_device_pe (dn);
PCI_DN(dn)->eeh_mode |= mode_flag;
__eeh_mark_slot (dn->child, mode_flag);
}
static void __eeh_clear_slot (struct device_node *dn, int mode_flag)
{
while (dn) {
if (PCI_DN(dn)) {
PCI_DN(dn)->eeh_mode &= ~mode_flag;
PCI_DN(dn)->eeh_check_count = 0;
if (dn->child)
__eeh_clear_slot (dn->child, mode_flag);
}
dn = dn->sibling;
}
}
void eeh_clear_slot (struct device_node *dn, int mode_flag)
{
unsigned long flags;
spin_lock_irqsave(&confirm_error_lock, flags);
dn = find_device_pe (dn);
PCI_DN(dn)->eeh_mode &= ~mode_flag;
PCI_DN(dn)->eeh_check_count = 0;
__eeh_clear_slot (dn->child, mode_flag);
spin_unlock_irqrestore(&confirm_error_lock, flags);
}
/**
* eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze
* @dn device node
@ -526,7 +533,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
* will query firmware for the EEH status.
*
* Returns 0 if there has not been an EEH error; otherwise returns
* a non-zero value and queues up a solt isolation event notification.
* a non-zero value and queues up a slot isolation event notification.
*
* It is safe to call this routine in an interrupt context.
*/
@ -535,42 +542,59 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
int ret;
int rets[3];
unsigned long flags;
int rc, reset_state;
struct eeh_event *event;
struct pci_dn *pdn;
int rc = 0;
__get_cpu_var(total_mmio_ffs)++;
if (!eeh_subsystem_enabled)
return 0;
if (!dn)
if (!dn) {
__get_cpu_var(no_dn)++;
return 0;
pdn = dn->data;
}
pdn = PCI_DN(dn);
/* Access to IO BARs might get this far and still not want checking. */
if (!pdn->eeh_capable || !(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
pdn->eeh_mode & EEH_MODE_NOCHECK) {
__get_cpu_var(ignored_check)++;
#ifdef DEBUG
printk ("EEH:ignored check (%x) for %s %s\n",
pdn->eeh_mode, pci_name (dev), dn->full_name);
#endif
return 0;
}
if (!pdn->eeh_config_addr) {
__get_cpu_var(no_cfg_addr)++;
return 0;
}
/*
* If we already have a pending isolation event for this
* slot, we know it's bad already, we don't need to check...
/* If we already have a pending isolation event for this
* slot, we know it's bad already, we don't need to check.
* Do this checking under a lock; as multiple PCI devices
* in one slot might report errors simultaneously, and we
* only want one error recovery routine running.
*/
spin_lock_irqsave(&confirm_error_lock, flags);
rc = 1;
if (pdn->eeh_mode & EEH_MODE_ISOLATED) {
atomic_inc(&eeh_fail_count);
if (atomic_read(&eeh_fail_count) >= EEH_MAX_FAILS) {
pdn->eeh_check_count ++;
if (pdn->eeh_check_count >= EEH_MAX_FAILS) {
printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n",
pdn->eeh_check_count);
dump_stack();
/* re-read the slot reset state */
if (read_slot_reset_state(dn, rets) != 0)
if (read_slot_reset_state(pdn, rets) != 0)
rets[0] = -1; /* reset state unknown */
eeh_panic(dev, rets[0]);
/* If we are here, then we hit an infinite loop. Stop. */
panic("EEH: MMIO halt (%d) on device:%s\n", rets[0], pci_name(dev));
}
return 0;
goto dn_unlock;
}
/*
@ -580,66 +604,69 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* function zero of a multi-function device.
* In any case they must share a common PHB.
*/
ret = read_slot_reset_state(dn, rets);
if (!(ret == 0 && rets[1] == 1 && (rets[0] == 2 || rets[0] == 4))) {
ret = read_slot_reset_state(pdn, rets);
/* If the call to firmware failed, punt */
if (ret != 0) {
printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
ret, dn->full_name);
__get_cpu_var(false_positives)++;
return 0;
rc = 0;
goto dn_unlock;
}
/* prevent repeated reports of this failure */
pdn->eeh_mode |= EEH_MODE_ISOLATED;
reset_state = rets[0];
spin_lock_irqsave(&slot_errbuf_lock, flags);
memset(slot_errbuf, 0, eeh_error_buf_size);
rc = rtas_call(ibm_slot_error_detail,
8, 1, NULL, pdn->eeh_config_addr,
BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid), NULL, 0,
virt_to_phys(slot_errbuf),
eeh_error_buf_size,
1 /* Temporary Error */);
if (rc == 0)
log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
spin_unlock_irqrestore(&slot_errbuf_lock, flags);
printk(KERN_INFO "EEH: MMIO failure (%d) on device: %s %s\n",
rets[0], dn->name, dn->full_name);
event = kmalloc(sizeof(*event), GFP_ATOMIC);
if (event == NULL) {
eeh_panic(dev, reset_state);
return 1;
/* If EEH is not supported on this device, punt. */
if (rets[1] != 1) {
printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n",
ret, dn->full_name);
__get_cpu_var(false_positives)++;
rc = 0;
goto dn_unlock;
}
event->dev = dev;
event->dn = dn;
event->reset_state = reset_state;
/* If not the kind of error we know about, punt. */
if (rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
__get_cpu_var(false_positives)++;
rc = 0;
goto dn_unlock;
}
/* We may or may not be called in an interrupt context */
spin_lock_irqsave(&eeh_eventlist_lock, flags);
list_add(&event->list, &eeh_eventlist);
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
/* Note that config-io to empty slots may fail;
* we recognize empty because they don't have children. */
if ((rets[0] == 5) && (dn->child == NULL)) {
__get_cpu_var(false_positives)++;
rc = 0;
goto dn_unlock;
}
__get_cpu_var(slot_resets)++;
/* Avoid repeated reports of this failure, including problems
* with other functions on this device, and functions under
* bridges. */
eeh_mark_slot (dn, EEH_MODE_ISOLATED);
spin_unlock_irqrestore(&confirm_error_lock, flags);
eeh_send_failure_event (dn, dev, rets[0], rets[2]);
/* Most EEH events are due to device driver bugs. Having
* a stack trace will help the device-driver authors figure
* out what happened. So print that out. */
dump_stack();
schedule_work(&eeh_event_wq);
if (rets[0] != 5) dump_stack();
return 1;
return 0;
dn_unlock:
spin_unlock_irqrestore(&confirm_error_lock, flags);
return rc;
}
EXPORT_SYMBOL(eeh_dn_check_failure);
EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
/**
* eeh_check_failure - check if all 1's data is due to EEH slot freeze
* @token i/o token, should be address in the form 0xA....
* @val value, should be all 1's (XXX why do we need this arg??)
*
* Check for an eeh failure at the given token address.
* Check for an EEH failure at the given token address. Call this
* routine if the result of a read was all 0xff's and you want to
* find out if this is due to an EEH slot freeze event. This routine
@ -656,8 +683,10 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
/* Finding the phys addr + pci device; this is pretty quick. */
addr = eeh_token_to_phys((unsigned long __force) token);
dev = pci_get_device_by_addr(addr);
if (!dev)
if (!dev) {
__get_cpu_var(no_device)++;
return val;
}
dn = pci_device_to_OF_node(dev);
eeh_dn_check_failure (dn, dev);
@ -668,6 +697,217 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
EXPORT_SYMBOL(eeh_check_failure);
/* ------------------------------------------------------------- */
/* The code below deals with error recovery */
/** Return negative value if a permanent error, else return
* a number of milliseconds to wait until the PCI slot is
* ready to be used.
*/
static int
eeh_slot_availability(struct pci_dn *pdn)
{
int rc;
int rets[3];
rc = read_slot_reset_state(pdn, rets);
if (rc) return rc;
if (rets[1] == 0) return -1; /* EEH is not supported */
if (rets[0] == 0) return 0; /* Oll Korrect */
if (rets[0] == 5) {
if (rets[2] == 0) return -1; /* permanently unavailable */
return rets[2]; /* number of millisecs to wait */
}
return -1;
}
/** rtas_pci_slot_reset raises/lowers the pci #RST line
* state: 1/0 to raise/lower the #RST
*
* Clear the EEH-frozen condition on a slot. This routine
* asserts the PCI #RST line if the 'state' argument is '1',
* and drops the #RST line if 'state is '0'. This routine is
* safe to call in an interrupt context.
*
*/
static void
rtas_pci_slot_reset(struct pci_dn *pdn, int state)
{
int rc;
BUG_ON (pdn==NULL);
if (!pdn->phb) {
printk (KERN_WARNING "EEH: in slot reset, device node %s has no phb\n",
pdn->node->full_name);
return;
}
rc = rtas_call(ibm_set_slot_reset,4,1, NULL,
pdn->eeh_config_addr,
BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid),
state);
if (rc) {
printk (KERN_WARNING "EEH: Unable to reset the failed slot, (%d) #RST=%d dn=%s\n",
rc, state, pdn->node->full_name);
return;
}
}
/** rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
* dn -- device node to be reset.
*/
void
rtas_set_slot_reset(struct pci_dn *pdn)
{
int i, rc;
rtas_pci_slot_reset (pdn, 1);
/* The PCI bus requires that the reset be held high for at least
* a 100 milliseconds. We wait a bit longer 'just in case'. */
#define PCI_BUS_RST_HOLD_TIME_MSEC 250
msleep (PCI_BUS_RST_HOLD_TIME_MSEC);
/* We might get hit with another EEH freeze as soon as the
* pci slot reset line is dropped. Make sure we don't miss
* these, and clear the flag now. */
eeh_clear_slot (pdn->node, EEH_MODE_ISOLATED);
rtas_pci_slot_reset (pdn, 0);
/* After a PCI slot has been reset, the PCI Express spec requires
* a 1.5 second idle time for the bus to stabilize, before starting
* up traffic. */
#define PCI_BUS_SETTLE_TIME_MSEC 1800
msleep (PCI_BUS_SETTLE_TIME_MSEC);
/* Now double check with the firmware to make sure the device is
* ready to be used; if not, wait for recovery. */
for (i=0; i<10; i++) {
rc = eeh_slot_availability (pdn);
if (rc <= 0) break;
msleep (rc+100);
}
}
/* ------------------------------------------------------- */
/** Save and restore of PCI BARs
*
* Although firmware will set up BARs during boot, it doesn't
* set up device BAR's after a device reset, although it will,
* if requested, set up bridge configuration. Thus, we need to
* configure the PCI devices ourselves.
*/
/**
* __restore_bars - Restore the Base Address Registers
* Loads the PCI configuration space base address registers,
* the expansion ROM base address, the latency timer, and etc.
* from the saved values in the device node.
*/
static inline void __restore_bars (struct pci_dn *pdn)
{
int i;
if (NULL==pdn->phb) return;
for (i=4; i<10; i++) {
rtas_write_config(pdn, i*4, 4, pdn->config_space[i]);
}
/* 12 == Expansion ROM Address */
rtas_write_config(pdn, 12*4, 4, pdn->config_space[12]);
#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
#define SAVED_BYTE(OFF) (((u8 *)(pdn->config_space))[BYTE_SWAP(OFF)])
rtas_write_config (pdn, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
rtas_write_config (pdn, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* max latency, min grant, interrupt pin and line */
rtas_write_config(pdn, 15*4, 4, pdn->config_space[15]);
}
/**
* eeh_restore_bars - restore the PCI config space info
*
* This routine performs a recursive walk to the children
* of this device as well.
*/
void eeh_restore_bars(struct pci_dn *pdn)
{
struct device_node *dn;
if (!pdn)
return;
if (! pdn->eeh_is_bridge)
__restore_bars (pdn);
dn = pdn->node->child;
while (dn) {
eeh_restore_bars (PCI_DN(dn));
dn = dn->sibling;
}
}
/**
* eeh_save_bars - save device bars
*
* Save the values of the device bars. Unlike the restore
* routine, this routine is *not* recursive. This is because
* PCI devices are added individuallly; but, for the restore,
* an entire slot is reset at a time.
*/
static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn)
{
int i;
if (!pdev || !pdn )
return;
for (i = 0; i < 16; i++)
pci_read_config_dword(pdev, i * 4, &pdn->config_space[i]);
if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
pdn->eeh_is_bridge = 1;
}
void
rtas_configure_bridge(struct pci_dn *pdn)
{
int token = rtas_token ("ibm,configure-bridge");
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return;
rc = rtas_call(token,3,1, NULL,
pdn->eeh_config_addr,
BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid));
if (rc) {
printk (KERN_WARNING "EEH: Unable to configure device bridge (%d) for %s\n",
rc, pdn->node->full_name);
}
}
/* ------------------------------------------------------------- */
/* The code below deals with enabling EEH for devices during the
* early boot sequence. EEH must be enabled before any PCI probing
* can be done.
*/
#define EEH_ENABLE 1
struct eeh_early_enable_info {
unsigned int buid_hi;
unsigned int buid_lo;
@ -684,9 +924,11 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
u32 *device_id = (u32 *)get_property(dn, "device-id", NULL);
u32 *regs;
int enable;
struct pci_dn *pdn = dn->data;
struct pci_dn *pdn = PCI_DN(dn);
pdn->eeh_mode = 0;
pdn->eeh_check_count = 0;
pdn->eeh_freeze_count = 0;
if (status && strcmp(status, "ok") != 0)
return NULL; /* ignore devices with bad status */
@ -725,6 +967,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
regs[0], info->buid_hi, info->buid_lo,
EEH_ENABLE);
if (ret == 0) {
eeh_subsystem_enabled = 1;
pdn->eeh_mode |= EEH_MODE_SUPPORTED;
@ -736,7 +979,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
/* This device doesn't support EEH, but it may have an
* EEH parent, in which case we mark it as supported. */
if (dn->parent && dn->parent->data
if (dn->parent && PCI_DN(dn->parent)
&& (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
/* Parent supports EEH. */
pdn->eeh_mode |= EEH_MODE_SUPPORTED;
@ -770,6 +1013,9 @@ void __init eeh_init(void)
struct device_node *phb, *np;
struct eeh_early_enable_info info;
spin_lock_init(&confirm_error_lock);
spin_lock_init(&slot_errbuf_lock);
np = of_find_node_by_path("/rtas");
if (np == NULL)
return;
@ -797,13 +1043,11 @@ void __init eeh_init(void)
for (phb = of_find_node_by_name(NULL, "pci"); phb;
phb = of_find_node_by_name(phb, "pci")) {
unsigned long buid;
struct pci_dn *pci;
buid = get_phb_buid(phb);
if (buid == 0 || phb->data == NULL)
if (buid == 0 || PCI_DN(phb) == NULL)
continue;
pci = phb->data;
info.buid_lo = BUID_LO(buid);
info.buid_hi = BUID_HI(buid);
traverse_pci_devices(phb, early_enable_eeh, &info);
@ -832,11 +1076,13 @@ void eeh_add_device_early(struct device_node *dn)
struct pci_controller *phb;
struct eeh_early_enable_info info;
if (!dn || !dn->data)
if (!dn || !PCI_DN(dn))
return;
phb = PCI_DN(dn)->phb;
if (NULL == phb || 0 == phb->buid) {
printk(KERN_WARNING "EEH: Expected buid but found none\n");
printk(KERN_WARNING "EEH: Expected buid but found none for %s\n",
dn->full_name);
dump_stack();
return;
}
@ -844,7 +1090,7 @@ void eeh_add_device_early(struct device_node *dn)
info.buid_lo = BUID_LO(phb->buid);
early_enable_eeh(dn, &info);
}
EXPORT_SYMBOL(eeh_add_device_early);
EXPORT_SYMBOL_GPL(eeh_add_device_early);
/**
* eeh_add_device_late - perform EEH initialization for the indicated pci device
@ -855,6 +1101,9 @@ EXPORT_SYMBOL(eeh_add_device_early);
*/
void eeh_add_device_late(struct pci_dev *dev)
{
struct device_node *dn;
struct pci_dn *pdn;
if (!dev || !eeh_subsystem_enabled)
return;
@ -862,9 +1111,15 @@ void eeh_add_device_late(struct pci_dev *dev)
printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev));
#endif
pci_dev_get (dev);
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
pdn->pcidev = dev;
pci_addr_cache_insert_device (dev);
eeh_save_bars(dev, pdn);
}
EXPORT_SYMBOL(eeh_add_device_late);
EXPORT_SYMBOL_GPL(eeh_add_device_late);
/**
* eeh_remove_device - undo EEH setup for the indicated pci device
@ -875,6 +1130,7 @@ EXPORT_SYMBOL(eeh_add_device_late);
*/
void eeh_remove_device(struct pci_dev *dev)
{
struct device_node *dn;
if (!dev || !eeh_subsystem_enabled)
return;
@ -883,20 +1139,29 @@ void eeh_remove_device(struct pci_dev *dev)
printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev));
#endif
pci_addr_cache_remove_device(dev);
dn = pci_device_to_OF_node(dev);
PCI_DN(dn)->pcidev = NULL;
pci_dev_put (dev);
}
EXPORT_SYMBOL(eeh_remove_device);
EXPORT_SYMBOL_GPL(eeh_remove_device);
static int proc_eeh_show(struct seq_file *m, void *v)
{
unsigned int cpu;
unsigned long ffs = 0, positives = 0, failures = 0;
unsigned long resets = 0;
unsigned long no_dev = 0, no_dn = 0, no_cfg = 0, no_check = 0;
for_each_cpu(cpu) {
ffs += per_cpu(total_mmio_ffs, cpu);
positives += per_cpu(false_positives, cpu);
failures += per_cpu(ignored_failures, cpu);
resets += per_cpu(slot_resets, cpu);
no_dev += per_cpu(no_device, cpu);
no_dn += per_cpu(no_dn, cpu);
no_cfg += per_cpu(no_cfg_addr, cpu);
no_check += per_cpu(ignored_check, cpu);
}
if (0 == eeh_subsystem_enabled) {
@ -904,13 +1169,17 @@ static int proc_eeh_show(struct seq_file *m, void *v)
seq_printf(m, "eeh_total_mmio_ffs=%ld\n", ffs);
} else {
seq_printf(m, "EEH Subsystem is enabled\n");
seq_printf(m, "eeh_total_mmio_ffs=%ld\n"
seq_printf(m,
"no device=%ld\n"
"no device node=%ld\n"
"no config address=%ld\n"
"check not wanted=%ld\n"
"eeh_total_mmio_ffs=%ld\n"
"eeh_false_positives=%ld\n"
"eeh_ignored_failures=%ld\n"
"eeh_slot_resets=%ld\n"
"eeh_fail_count=%d\n",
ffs, positives, failures, resets,
eeh_fail_count.counter);
"eeh_slot_resets=%ld\n",
no_dev, no_dn, no_cfg, no_check,
ffs, positives, failures, resets);
}
return 0;
@ -932,7 +1201,7 @@ static int __init eeh_init_proc(void)
{
struct proc_dir_entry *e;
if (systemcfg->platform & PLATFORM_PSERIES) {
if (platform_is_pseries()) {
e = create_proc_entry("ppc64/eeh", 0, NULL);
if (e)
e->proc_fops = &proc_eeh_operations;

View File

@ -0,0 +1,155 @@
/*
* eeh_event.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright (c) 2005 Linas Vepstas <linas@linas.org>
*/
#include <linux/list.h>
#include <linux/pci.h>
#include <asm/eeh_event.h>
/** Overview:
* EEH error states may be detected within exception handlers;
* however, the recovery processing needs to occur asynchronously
* in a normal kernel context and not an interrupt context.
* This pair of routines creates an event and queues it onto a
* work-queue, where a worker thread can drive recovery.
*/
/* EEH event workqueue setup. */
static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED;
LIST_HEAD(eeh_eventlist);
static void eeh_thread_launcher(void *);
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
/**
* eeh_panic - call panic() for an eeh event that cannot be handled.
* The philosophy of this routine is that it is better to panic and
* halt the OS than it is to risk possible data corruption by
* oblivious device drivers that don't know better.
*
* @dev pci device that had an eeh event
* @reset_state current reset state of the device slot
*/
static void eeh_panic(struct pci_dev *dev, int reset_state)
{
/*
* Since the panic_on_oops sysctl is used to halt the system
* in light of potential corruption, we can use it here.
*/
if (panic_on_oops) {
panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
pci_name(dev));
}
else {
printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
reset_state, pci_name(dev));
}
}
/**
* eeh_event_handler - dispatch EEH events. The detection of a frozen
* slot can occur inside an interrupt, where it can be hard to do
* anything about it. The goal of this routine is to pull these
* detection events out of the context of the interrupt handler, and
* re-dispatch them for processing at a later time in a normal context.
*
* @dummy - unused
*/
static int eeh_event_handler(void * dummy)
{
unsigned long flags;
struct eeh_event *event;
daemonize ("eehd");
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&eeh_eventlist_lock, flags);
event = NULL;
if (!list_empty(&eeh_eventlist)) {
event = list_entry(eeh_eventlist.next, struct eeh_event, list);
list_del(&event->list);
}
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
if (event == NULL)
break;
printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
pci_name(event->dev));
eeh_panic (event->dev, event->state);
kfree(event);
}
return 0;
}
/**
* eeh_thread_launcher
*
* @dummy - unused
*/
static void eeh_thread_launcher(void *dummy)
{
if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
printk(KERN_ERR "Failed to start EEH daemon\n");
}
/**
* eeh_send_failure_event - generate a PCI error event
* @dev pci device
*
* This routine can be called within an interrupt context;
* the actual event will be delivered in a normal context
* (from a workqueue).
*/
int eeh_send_failure_event (struct device_node *dn,
struct pci_dev *dev,
int state,
int time_unavail)
{
unsigned long flags;
struct eeh_event *event;
event = kmalloc(sizeof(*event), GFP_ATOMIC);
if (event == NULL) {
printk (KERN_ERR "EEH: out of memory, event not handled\n");
return 1;
}
if (dev)
pci_dev_get(dev);
event->dn = dn;
event->dev = dev;
event->state = state;
event->time_unavail = time_unavail;
/* We may or may not be called in an interrupt context */
spin_lock_irqsave(&eeh_eventlist_lock, flags);
list_add(&event->list, &eeh_eventlist);
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
schedule_work(&eeh_event_wq);
return 0;
}
/********************** END OF FILE ******************************/

View File

@ -42,7 +42,6 @@
#include <asm/machdep.h>
#include <asm/abs_addr.h>
#include <asm/pSeries_reconfig.h>
#include <asm/systemcfg.h>
#include <asm/firmware.h>
#include <asm/tce.h>
#include <asm/ppc-pci.h>
@ -582,7 +581,7 @@ void iommu_init_early_pSeries(void)
return;
}
if (systemcfg->platform & PLATFORM_LPAR) {
if (platform_is_lpar()) {
if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
ppc_md.tce_build = tce_buildmulti_pSeriesLP;
ppc_md.tce_free = tce_freemulti_pSeriesLP;

View File

@ -107,7 +107,6 @@ static void __init pSeries_request_regions(void)
void __init pSeries_final_fixup(void)
{
phbs_remap_io();
pSeries_request_regions();
pci_addr_cache_build();
@ -123,7 +122,7 @@ static void fixup_winbond_82c105(struct pci_dev* dev)
int i;
unsigned int reg;
if (!(systemcfg->platform & PLATFORM_PSERIES))
if (!platform_is_pseries())
return;
printk("Using INTC for W82c105 IDE controller.\n");

View File

@ -408,7 +408,7 @@ static int proc_ppc64_create_ofdt(void)
{
struct proc_dir_entry *ent;
if (!(systemcfg->platform & PLATFORM_PSERIES))
if (!platform_is_pseries())
return 0;
ent = create_proc_entry("ppc64/ofdt", S_IWUSR, NULL);

View File

@ -482,9 +482,11 @@ static int __init rtas_init(void)
{
struct proc_dir_entry *entry;
/* No RTAS, only warn if we are on a pSeries box */
if (!platform_is_pseries())
return 0;
/* No RTAS */
if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) {
if (systemcfg->platform & PLATFORM_PSERIES)
printk(KERN_INFO "rtasd: no event-scan on system\n");
return 1;
}

View File

@ -249,7 +249,7 @@ static void __init pSeries_setup_arch(void)
ppc_md.idle_loop = default_idle;
}
if (systemcfg->platform & PLATFORM_LPAR)
if (platform_is_lpar())
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
else
ppc_md.enable_pmcs = power4_enable_pmcs;
@ -378,7 +378,7 @@ static void __init pSeries_init_early(void)
fw_feature_init();
if (systemcfg->platform & PLATFORM_LPAR)
if (platform_is_lpar())
hpte_init_lpar();
else {
hpte_init_native();
@ -388,7 +388,7 @@ static void __init pSeries_init_early(void)
generic_find_legacy_serial_ports(&physport, &default_speed);
if (systemcfg->platform & PLATFORM_LPAR)
if (platform_is_lpar())
find_udbg_vterm();
else if (physport) {
/* Map the uart for udbg. */
@ -592,7 +592,7 @@ static void pseries_shared_idle(void)
static int pSeries_pci_probe_mode(struct pci_bus *bus)
{
if (systemcfg->platform & PLATFORM_LPAR)
if (platform_is_lpar())
return PCI_PROBE_DEVTREE;
return PCI_PROBE_NORMAL;
}

View File

@ -46,6 +46,7 @@
#include <asm/rtas.h>
#include <asm/pSeries_reconfig.h>
#include <asm/mpic.h>
#include <asm/systemcfg.h>
#include "plpar_wrappers.h"
@ -96,7 +97,7 @@ int pSeries_cpu_disable(void)
int cpu = smp_processor_id();
cpu_clear(cpu, cpu_online_map);
systemcfg->processorCount--;
_systemcfg->processorCount--;
/*fix boot_cpuid here*/
if (cpu == boot_cpuid)
@ -441,7 +442,7 @@ void __init smp_init_pSeries(void)
smp_ops->cpu_die = pSeries_cpu_die;
/* Processors can be added/removed only on LPAR */
if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
if (platform_is_lpar())
pSeries_reconfig_notifier_register(&pSeries_smp_nb);
#endif

View File

@ -545,7 +545,9 @@ nextnode:
of_node_put(np);
}
if (systemcfg->platform == PLATFORM_PSERIES) {
if (platform_is_lpar())
ops = &pSeriesLP_ops;
else {
#ifdef CONFIG_SMP
for_each_cpu(i) {
int hard_id;
@ -561,12 +563,11 @@ nextnode:
#else
xics_per_cpu[0] = ioremap(intr_base, intr_size);
#endif /* CONFIG_SMP */
} else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
ops = &pSeriesLP_ops;
}
xics_8259_pic.enable = i8259_pic.enable;
xics_8259_pic.disable = i8259_pic.disable;
xics_8259_pic.end = i8259_pic.end;
for (i = 0; i < 16; ++i)
get_irq_desc(i)->handler = &xics_8259_pic;
for (; i < NR_IRQS; ++i)

View File

@ -226,7 +226,7 @@ static void iommu_table_u3_setup(void)
iommu_table_u3.it_busno = 0;
iommu_table_u3.it_offset = 0;
/* it_size is in number of entries */
iommu_table_u3.it_size = dart_tablesize / sizeof(u32);
iommu_table_u3.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR;
/* Initialize the common IOMMU code */
iommu_table_u3.it_base = (unsigned long)dart_vbase;

View File

@ -8,4 +8,4 @@ obj-$(CONFIG_8xx) += start_8xx.o
obj-$(CONFIG_6xx) += start_32.o
obj-$(CONFIG_4xx) += start_32.o
obj-$(CONFIG_PPC64) += start_64.o
obj-y += xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o
obj-y += xmon.o ppc-dis.o ppc-opc.o setjmp.o nonstdio.o

View File

@ -0,0 +1,134 @@
/*
* Copyright (C) 1996-2005 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/string.h>
#include <asm/time.h>
#include "nonstdio.h"
int xmon_putchar(int c)
{
char ch = c;
if (c == '\n')
xmon_putchar('\r');
return xmon_write(&ch, 1) == 1? c: -1;
}
static char line[256];
static char *lineptr;
static int lineleft;
int xmon_expect(const char *str, unsigned long timeout)
{
int c;
unsigned long t0;
/* assume 25MHz default timebase if tb_ticks_per_sec not set yet */
timeout *= tb_ticks_per_sec? tb_ticks_per_sec: 25000000;
t0 = get_tbl();
do {
lineptr = line;
for (;;) {
c = xmon_read_poll();
if (c == -1) {
if (get_tbl() - t0 > timeout)
return 0;
continue;
}
if (c == '\n')
break;
if (c != '\r' && lineptr < &line[sizeof(line) - 1])
*lineptr++ = c;
}
*lineptr = 0;
} while (strstr(line, str) == NULL);
return 1;
}
int xmon_getchar(void)
{
int c;
if (lineleft == 0) {
lineptr = line;
for (;;) {
c = xmon_readchar();
if (c == -1 || c == 4)
break;
if (c == '\r' || c == '\n') {
*lineptr++ = '\n';
xmon_putchar('\n');
break;
}
switch (c) {
case 0177:
case '\b':
if (lineptr > line) {
xmon_putchar('\b');
xmon_putchar(' ');
xmon_putchar('\b');
--lineptr;
}
break;
case 'U' & 0x1F:
while (lineptr > line) {
xmon_putchar('\b');
xmon_putchar(' ');
xmon_putchar('\b');
--lineptr;
}
break;
default:
if (lineptr >= &line[sizeof(line) - 1])
xmon_putchar('\a');
else {
xmon_putchar(c);
*lineptr++ = c;
}
}
}
lineleft = lineptr - line;
lineptr = line;
}
if (lineleft == 0)
return -1;
--lineleft;
return *lineptr++;
}
char *xmon_gets(char *str, int nb)
{
char *p;
int c;
for (p = str; p < str + nb - 1; ) {
c = xmon_getchar();
if (c == -1) {
if (p == str)
return NULL;
break;
}
*p++ = c;
if (c == '\n')
break;
}
*p = 0;
return str;
}
void xmon_printf(const char *format, ...)
{
va_list args;
int n;
static char xmon_outbuf[1024];
va_start(args, format);
n = vsnprintf(xmon_outbuf, sizeof(xmon_outbuf), format, args);
va_end(args);
xmon_write(xmon_outbuf, n);
}

View File

@ -1,22 +1,14 @@
typedef int FILE;
extern FILE *xmon_stdin, *xmon_stdout;
#define EOF (-1)
#define stdin xmon_stdin
#define stdout xmon_stdout
#define printf xmon_printf
#define fprintf xmon_fprintf
#define fputs xmon_fputs
#define fgets xmon_fgets
#define putchar xmon_putchar
#define getchar xmon_getchar
#define putc xmon_putc
#define getc xmon_getc
#define fopen(n, m) NULL
#define fflush(f) do {} while (0)
#define fclose(f) do {} while (0)
extern char *fgets(char *, int, void *);
extern void xmon_printf(const char *, ...);
extern void xmon_fprintf(void *, const char *, ...);
extern void xmon_sprintf(char *, const char *, ...);
#define perror(s) printf("%s: no files!\n", (s))
#define printf xmon_printf
#define putchar xmon_putchar
extern int xmon_putchar(int c);
extern int xmon_getchar(void);
extern char *xmon_gets(char *, int);
extern void xmon_printf(const char *, ...);
extern void xmon_map_scc(void);
extern int xmon_expect(const char *str, unsigned long timeout);
extern int xmon_write(void *ptr, int nb);
extern int xmon_readchar(void);
extern int xmon_read_poll(void);

View File

@ -14,61 +14,61 @@
_GLOBAL(xmon_setjmp)
mflr r0
STL r0,0(r3)
STL r1,SZL(r3)
STL r2,2*SZL(r3)
PPC_STL r0,0(r3)
PPC_STL r1,SZL(r3)
PPC_STL r2,2*SZL(r3)
mfcr r0
STL r0,3*SZL(r3)
STL r13,4*SZL(r3)
STL r14,5*SZL(r3)
STL r15,6*SZL(r3)
STL r16,7*SZL(r3)
STL r17,8*SZL(r3)
STL r18,9*SZL(r3)
STL r19,10*SZL(r3)
STL r20,11*SZL(r3)
STL r21,12*SZL(r3)
STL r22,13*SZL(r3)
STL r23,14*SZL(r3)
STL r24,15*SZL(r3)
STL r25,16*SZL(r3)
STL r26,17*SZL(r3)
STL r27,18*SZL(r3)
STL r28,19*SZL(r3)
STL r29,20*SZL(r3)
STL r30,21*SZL(r3)
STL r31,22*SZL(r3)
PPC_STL r0,3*SZL(r3)
PPC_STL r13,4*SZL(r3)
PPC_STL r14,5*SZL(r3)
PPC_STL r15,6*SZL(r3)
PPC_STL r16,7*SZL(r3)
PPC_STL r17,8*SZL(r3)
PPC_STL r18,9*SZL(r3)
PPC_STL r19,10*SZL(r3)
PPC_STL r20,11*SZL(r3)
PPC_STL r21,12*SZL(r3)
PPC_STL r22,13*SZL(r3)
PPC_STL r23,14*SZL(r3)
PPC_STL r24,15*SZL(r3)
PPC_STL r25,16*SZL(r3)
PPC_STL r26,17*SZL(r3)
PPC_STL r27,18*SZL(r3)
PPC_STL r28,19*SZL(r3)
PPC_STL r29,20*SZL(r3)
PPC_STL r30,21*SZL(r3)
PPC_STL r31,22*SZL(r3)
li r3,0
blr
_GLOBAL(xmon_longjmp)
CMPI r4,0
PPC_LCMPI r4,0
bne 1f
li r4,1
1: LDL r13,4*SZL(r3)
LDL r14,5*SZL(r3)
LDL r15,6*SZL(r3)
LDL r16,7*SZL(r3)
LDL r17,8*SZL(r3)
LDL r18,9*SZL(r3)
LDL r19,10*SZL(r3)
LDL r20,11*SZL(r3)
LDL r21,12*SZL(r3)
LDL r22,13*SZL(r3)
LDL r23,14*SZL(r3)
LDL r24,15*SZL(r3)
LDL r25,16*SZL(r3)
LDL r26,17*SZL(r3)
LDL r27,18*SZL(r3)
LDL r28,19*SZL(r3)
LDL r29,20*SZL(r3)
LDL r30,21*SZL(r3)
LDL r31,22*SZL(r3)
LDL r0,3*SZL(r3)
1: PPC_LL r13,4*SZL(r3)
PPC_LL r14,5*SZL(r3)
PPC_LL r15,6*SZL(r3)
PPC_LL r16,7*SZL(r3)
PPC_LL r17,8*SZL(r3)
PPC_LL r18,9*SZL(r3)
PPC_LL r19,10*SZL(r3)
PPC_LL r20,11*SZL(r3)
PPC_LL r21,12*SZL(r3)
PPC_LL r22,13*SZL(r3)
PPC_LL r23,14*SZL(r3)
PPC_LL r24,15*SZL(r3)
PPC_LL r25,16*SZL(r3)
PPC_LL r26,17*SZL(r3)
PPC_LL r27,18*SZL(r3)
PPC_LL r28,19*SZL(r3)
PPC_LL r29,20*SZL(r3)
PPC_LL r30,21*SZL(r3)
PPC_LL r31,22*SZL(r3)
PPC_LL r0,3*SZL(r3)
mtcrf 0x38,r0
LDL r0,0(r3)
LDL r1,SZL(r3)
LDL r2,2*SZL(r3)
PPC_LL r0,0(r3)
PPC_LL r1,SZL(r3)
PPC_LL r2,2*SZL(r3)
mtlr r0
mr r3,r4
blr
@ -84,52 +84,52 @@ _GLOBAL(xmon_longjmp)
* different ABIs, though).
*/
_GLOBAL(xmon_save_regs)
STL r0,0*SZL(r3)
STL r2,2*SZL(r3)
STL r3,3*SZL(r3)
STL r4,4*SZL(r3)
STL r5,5*SZL(r3)
STL r6,6*SZL(r3)
STL r7,7*SZL(r3)
STL r8,8*SZL(r3)
STL r9,9*SZL(r3)
STL r10,10*SZL(r3)
STL r11,11*SZL(r3)
STL r12,12*SZL(r3)
STL r13,13*SZL(r3)
STL r14,14*SZL(r3)
STL r15,15*SZL(r3)
STL r16,16*SZL(r3)
STL r17,17*SZL(r3)
STL r18,18*SZL(r3)
STL r19,19*SZL(r3)
STL r20,20*SZL(r3)
STL r21,21*SZL(r3)
STL r22,22*SZL(r3)
STL r23,23*SZL(r3)
STL r24,24*SZL(r3)
STL r25,25*SZL(r3)
STL r26,26*SZL(r3)
STL r27,27*SZL(r3)
STL r28,28*SZL(r3)
STL r29,29*SZL(r3)
STL r30,30*SZL(r3)
STL r31,31*SZL(r3)
PPC_STL r0,0*SZL(r3)
PPC_STL r2,2*SZL(r3)
PPC_STL r3,3*SZL(r3)
PPC_STL r4,4*SZL(r3)
PPC_STL r5,5*SZL(r3)
PPC_STL r6,6*SZL(r3)
PPC_STL r7,7*SZL(r3)
PPC_STL r8,8*SZL(r3)
PPC_STL r9,9*SZL(r3)
PPC_STL r10,10*SZL(r3)
PPC_STL r11,11*SZL(r3)
PPC_STL r12,12*SZL(r3)
PPC_STL r13,13*SZL(r3)
PPC_STL r14,14*SZL(r3)
PPC_STL r15,15*SZL(r3)
PPC_STL r16,16*SZL(r3)
PPC_STL r17,17*SZL(r3)
PPC_STL r18,18*SZL(r3)
PPC_STL r19,19*SZL(r3)
PPC_STL r20,20*SZL(r3)
PPC_STL r21,21*SZL(r3)
PPC_STL r22,22*SZL(r3)
PPC_STL r23,23*SZL(r3)
PPC_STL r24,24*SZL(r3)
PPC_STL r25,25*SZL(r3)
PPC_STL r26,26*SZL(r3)
PPC_STL r27,27*SZL(r3)
PPC_STL r28,28*SZL(r3)
PPC_STL r29,29*SZL(r3)
PPC_STL r30,30*SZL(r3)
PPC_STL r31,31*SZL(r3)
/* go up one stack frame for SP */
LDL r4,0(r1)
STL r4,1*SZL(r3)
PPC_LL r4,0(r1)
PPC_STL r4,1*SZL(r3)
/* get caller's LR */
LDL r0,LRSAVE(r4)
STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
PPC_LL r0,LRSAVE(r4)
PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
mfmsr r0
STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
mfctr r0
STL r0,_CTR-STACK_FRAME_OVERHEAD(r3)
PPC_STL r0,_CTR-STACK_FRAME_OVERHEAD(r3)
mfxer r0
STL r0,_XER-STACK_FRAME_OVERHEAD(r3)
PPC_STL r0,_XER-STACK_FRAME_OVERHEAD(r3)
mfcr r0
STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
li r0,0
STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
blr

View File

@ -11,7 +11,6 @@
#include <linux/cuda.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/sysrq.h>
#include <linux/bitops.h>
#include <asm/xmon.h>
#include <asm/prom.h>
@ -22,10 +21,11 @@
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/btext.h>
#include <asm/time.h>
#include "nonstdio.h"
static volatile unsigned char __iomem *sccc, *sccd;
unsigned int TXRDY, RXRDY, DLAB;
static int xmon_expect(const char *str, unsigned int timeout);
static int use_serial;
static int use_screen;
@ -33,16 +33,6 @@ static int via_modem;
static int xmon_use_sccb;
static struct device_node *channel_node;
#define TB_SPEED 25000000
static inline unsigned int readtb(void)
{
unsigned int ret;
asm volatile("mftb %0" : "=r" (ret) :);
return ret;
}
void buf_access(void)
{
if (DLAB)
@ -91,23 +81,7 @@ static unsigned long chrp_find_phys_io_base(void)
}
#endif /* CONFIG_PPC_CHRP */
#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handle_xmon(int key, struct pt_regs *regs,
struct tty_struct *tty)
{
xmon(regs);
}
static struct sysrq_key_op sysrq_xmon_op =
{
.handler = sysrq_handle_xmon,
.help_msg = "Xmon",
.action_msg = "Entering xmon",
};
#endif
void
xmon_map_scc(void)
void xmon_map_scc(void)
{
#ifdef CONFIG_PPC_MULTIPLATFORM
volatile unsigned char __iomem *base;
@ -217,8 +191,6 @@ xmon_map_scc(void)
RXRDY = 1;
DLAB = 0x80;
#endif /* platform */
register_sysrq_key('x', &sysrq_xmon_op);
}
static int scc_initialized = 0;
@ -238,8 +210,7 @@ static inline void do_poll_adb(void)
#endif /* CONFIG_ADB_CUDA */
}
int
xmon_write(void *handle, void *ptr, int nb)
int xmon_write(void *ptr, int nb)
{
char *p = ptr;
int i, c, ct;
@ -311,8 +282,7 @@ static unsigned char xmon_shift_keytab[128] =
"\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
"\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
static int
xmon_get_adb_key(void)
static int xmon_get_adb_key(void)
{
int k, t, on;
@ -350,32 +320,21 @@ xmon_get_adb_key(void)
}
#endif /* CONFIG_BOOTX_TEXT */
int
xmon_read(void *handle, void *ptr, int nb)
int xmon_readchar(void)
{
char *p = ptr;
int i;
#ifdef CONFIG_BOOTX_TEXT
if (use_screen) {
for (i = 0; i < nb; ++i)
*p++ = xmon_get_adb_key();
return i;
}
if (use_screen)
return xmon_get_adb_key();
#endif
if (!scc_initialized)
xmon_init_scc();
for (i = 0; i < nb; ++i) {
while ((*sccc & RXRDY) == 0)
do_poll_adb();
buf_access();
*p++ = *sccd;
}
return i;
return *sccd;
}
int
xmon_read_poll(void)
int xmon_read_poll(void)
{
if ((*sccc & RXRDY) == 0) {
do_poll_adb();
@ -395,8 +354,7 @@ static unsigned char scc_inittab[] = {
3, 0xc1, /* rx enable, 8 bits */
};
void
xmon_init_scc(void)
void xmon_init_scc(void)
{
if ( _machine == _MACH_chrp )
{
@ -410,6 +368,7 @@ xmon_init_scc(void)
else if ( _machine == _MACH_Pmac )
{
int i, x;
unsigned long timeout;
if (channel_node != 0)
pmac_call_feature(
@ -424,8 +383,12 @@ xmon_init_scc(void)
PMAC_FTR_MODEM_ENABLE,
channel_node, 0, 1);
printk(KERN_INFO "Modem powered up by debugger !\n");
t0 = readtb();
while (readtb() - t0 < 3*TB_SPEED)
t0 = get_tbl();
timeout = 3 * tb_ticks_per_sec;
if (timeout == 0)
/* assume 25MHz if tb_ticks_per_sec not set */
timeout = 75000000;
while (get_tbl() - t0 < timeout)
eieio();
}
/* use the B channel if requested */
@ -447,164 +410,19 @@ xmon_init_scc(void)
scc_initialized = 1;
if (via_modem) {
for (;;) {
xmon_write(NULL, "ATE1V1\r", 7);
xmon_write("ATE1V1\r", 7);
if (xmon_expect("OK", 5)) {
xmon_write(NULL, "ATA\r", 4);
xmon_write("ATA\r", 4);
if (xmon_expect("CONNECT", 40))
break;
}
xmon_write(NULL, "+++", 3);
xmon_write("+++", 3);
xmon_expect("OK", 3);
}
}
}
void *xmon_stdin;
void *xmon_stdout;
void *xmon_stderr;
int xmon_putc(int c, void *f)
{
char ch = c;
if (c == '\n')
xmon_putc('\r', f);
return xmon_write(f, &ch, 1) == 1? c: -1;
}
int xmon_putchar(int c)
{
return xmon_putc(c, xmon_stdout);
}
int xmon_fputs(char *str, void *f)
{
int n = strlen(str);
return xmon_write(f, str, n) == n? 0: -1;
}
int
xmon_readchar(void)
{
char ch;
for (;;) {
switch (xmon_read(xmon_stdin, &ch, 1)) {
case 1:
return ch;
case -1:
xmon_printf("read(stdin) returned -1\r\n", 0, 0);
return -1;
}
}
}
static char line[256];
static char *lineptr;
static int lineleft;
int xmon_expect(const char *str, unsigned int timeout)
{
int c;
unsigned int t0;
timeout *= TB_SPEED;
t0 = readtb();
do {
lineptr = line;
for (;;) {
c = xmon_read_poll();
if (c == -1) {
if (readtb() - t0 > timeout)
return 0;
continue;
}
if (c == '\n')
break;
if (c != '\r' && lineptr < &line[sizeof(line) - 1])
*lineptr++ = c;
}
*lineptr = 0;
} while (strstr(line, str) == NULL);
return 1;
}
int
xmon_getchar(void)
{
int c;
if (lineleft == 0) {
lineptr = line;
for (;;) {
c = xmon_readchar();
if (c == -1 || c == 4)
break;
if (c == '\r' || c == '\n') {
*lineptr++ = '\n';
xmon_putchar('\n');
break;
}
switch (c) {
case 0177:
case '\b':
if (lineptr > line) {
xmon_putchar('\b');
xmon_putchar(' ');
xmon_putchar('\b');
--lineptr;
}
break;
case 'U' & 0x1F:
while (lineptr > line) {
xmon_putchar('\b');
xmon_putchar(' ');
xmon_putchar('\b');
--lineptr;
}
break;
default:
if (lineptr >= &line[sizeof(line) - 1])
xmon_putchar('\a');
else {
xmon_putchar(c);
*lineptr++ = c;
}
}
}
lineleft = lineptr - line;
lineptr = line;
}
if (lineleft == 0)
return -1;
--lineleft;
return *lineptr++;
}
char *
xmon_fgets(char *str, int nb, void *f)
{
char *p;
int c;
for (p = str; p < str + nb - 1; ) {
c = xmon_getchar();
if (c == -1) {
if (p == str)
return NULL;
break;
}
*p++ = c;
if (c == '\n')
break;
}
*p = 0;
return str;
}
void
xmon_enter(void)
void xmon_enter(void)
{
#ifdef CONFIG_ADB_PMU
if (_machine == _MACH_Pmac) {
@ -613,8 +431,7 @@ xmon_enter(void)
#endif
}
void
xmon_leave(void)
void xmon_leave(void)
{
#ifdef CONFIG_ADB_PMU
if (_machine == _MACH_Pmac) {

View File

@ -6,182 +6,29 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/sysrq.h>
#include <linux/init.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/udbg.h>
#include <asm/system.h>
#include "nonstdio.h"
#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
struct tty_struct *tty)
void xmon_map_scc(void)
{
/* ensure xmon is enabled */
xmon_init(1);
debugger(pt_regs);
}
static struct sysrq_key_op sysrq_xmon_op =
{
.handler = sysrq_handle_xmon,
.help_msg = "Xmon",
.action_msg = "Entering xmon",
};
static int __init setup_xmon_sysrq(void)
{
register_sysrq_key('x', &sysrq_xmon_op);
return 0;
}
__initcall(setup_xmon_sysrq);
#endif /* CONFIG_MAGIC_SYSRQ */
int
xmon_write(void *handle, void *ptr, int nb)
int xmon_write(void *ptr, int nb)
{
return udbg_write(ptr, nb);
}
int
xmon_read(void *handle, void *ptr, int nb)
int xmon_readchar(void)
{
return udbg_read(ptr, nb);
if (udbg_getc)
return udbg_getc();
return -1;
}
int
xmon_read_poll(void)
int xmon_read_poll(void)
{
if (udbg_getc_poll)
return udbg_getc_poll();
return -1;
}
FILE *xmon_stdin;
FILE *xmon_stdout;
int
xmon_putc(int c, void *f)
{
char ch = c;
if (c == '\n')
xmon_putc('\r', f);
return xmon_write(f, &ch, 1) == 1? c: -1;
}
int
xmon_putchar(int c)
{
return xmon_putc(c, xmon_stdout);
}
int
xmon_fputs(char *str, void *f)
{
int n = strlen(str);
return xmon_write(f, str, n) == n? 0: -1;
}
int
xmon_readchar(void)
{
char ch;
for (;;) {
switch (xmon_read(xmon_stdin, &ch, 1)) {
case 1:
return ch;
case -1:
xmon_printf("read(stdin) returned -1\r\n", 0, 0);
return -1;
}
}
}
static char line[256];
static char *lineptr;
static int lineleft;
int
xmon_getchar(void)
{
int c;
if (lineleft == 0) {
lineptr = line;
for (;;) {
c = xmon_readchar();
if (c == -1 || c == 4)
break;
if (c == '\r' || c == '\n') {
*lineptr++ = '\n';
xmon_putchar('\n');
break;
}
switch (c) {
case 0177:
case '\b':
if (lineptr > line) {
xmon_putchar('\b');
xmon_putchar(' ');
xmon_putchar('\b');
--lineptr;
}
break;
case 'U' & 0x1F:
while (lineptr > line) {
xmon_putchar('\b');
xmon_putchar(' ');
xmon_putchar('\b');
--lineptr;
}
break;
default:
if (lineptr >= &line[sizeof(line) - 1])
xmon_putchar('\a');
else {
xmon_putchar(c);
*lineptr++ = c;
}
}
}
lineleft = lineptr - line;
lineptr = line;
}
if (lineleft == 0)
return -1;
--lineleft;
return *lineptr++;
}
char *
xmon_fgets(char *str, int nb, void *f)
{
char *p;
int c;
for (p = str; p < str + nb - 1; ) {
c = xmon_getchar();
if (c == -1) {
if (p == str)
return NULL;
break;
}
*p++ = c;
if (c == '\n')
break;
}
*p = 0;
return str;
}

View File

@ -15,273 +15,30 @@
#include <asm/8xx_immap.h>
#include <asm/mpc8xx.h>
#include <asm/commproc.h>
#include "nonstdio.h"
extern void xmon_printf(const char *fmt, ...);
extern int xmon_8xx_write(char *str, int nb);
extern int xmon_8xx_read_poll(void);
extern int xmon_8xx_read_char(void);
void prom_drawhex(uint);
void prom_drawstring(const char *str);
static int use_screen = 1; /* default */
#define TB_SPEED 25000000
static inline unsigned int readtb(void)
void xmon_map_scc(void)
{
unsigned int ret;
asm volatile("mftb %0" : "=r" (ret) :);
return ret;
}
void buf_access(void)
{
}
void
xmon_map_scc(void)
{
cpmp = (cpm8xx_t *)&(((immap_t *)IMAP_ADDR)->im_cpm);
use_screen = 0;
prom_drawstring("xmon uses serial port\n");
}
static int scc_initialized = 0;
void xmon_init_scc(void);
int
xmon_write(void *handle, void *ptr, int nb)
int xmon_write(void *ptr, int nb)
{
char *p = ptr;
int i, c, ct;
if (!scc_initialized)
xmon_init_scc();
return(xmon_8xx_write(ptr, nb));
}
int xmon_wants_key;
int
xmon_read(void *handle, void *ptr, int nb)
int xmon_readchar(void)
{
char *p = ptr;
int i;
if (!scc_initialized)
xmon_init_scc();
for (i = 0; i < nb; ++i) {
*p++ = xmon_8xx_read_char();
}
return i;
return xmon_8xx_read_char();
}
int
xmon_read_poll(void)
int xmon_read_poll(void)
{
return(xmon_8xx_read_poll());
}
void
xmon_init_scc()
{
scc_initialized = 1;
}
#if 0
extern int (*prom_entry)(void *);
int
xmon_exit(void)
{
struct prom_args {
char *service;
} args;
for (;;) {
args.service = "exit";
(*prom_entry)(&args);
}
}
#endif
void *xmon_stdin;
void *xmon_stdout;
void *xmon_stderr;
void
xmon_init(void)
{
}
int
xmon_putc(int c, void *f)
{
char ch = c;
if (c == '\n')
xmon_putc('\r', f);
return xmon_write(f, &ch, 1) == 1? c: -1;
}
int
xmon_putchar(int c)
{
return xmon_putc(c, xmon_stdout);
}
int
xmon_fputs(char *str, void *f)
{
int n = strlen(str);
return xmon_write(f, str, n) == n? 0: -1;
}
int
xmon_readchar(void)
{
char ch;
for (;;) {
switch (xmon_read(xmon_stdin, &ch, 1)) {
case 1:
return ch;
case -1:
xmon_printf("read(stdin) returned -1\r\n", 0, 0);
return -1;
}
}
}
static char line[256];
static char *lineptr;
static int lineleft;
#if 0
int xmon_expect(const char *str, unsigned int timeout)
{
int c;
unsigned int t0;
timeout *= TB_SPEED;
t0 = readtb();
do {
lineptr = line;
for (;;) {
c = xmon_read_poll();
if (c == -1) {
if (readtb() - t0 > timeout)
return 0;
continue;
}
if (c == '\n')
break;
if (c != '\r' && lineptr < &line[sizeof(line) - 1])
*lineptr++ = c;
}
*lineptr = 0;
} while (strstr(line, str) == NULL);
return 1;
}
#endif
int
xmon_getchar(void)
{
int c;
if (lineleft == 0) {
lineptr = line;
for (;;) {
c = xmon_readchar();
if (c == -1 || c == 4)
break;
if (c == '\r' || c == '\n') {
*lineptr++ = '\n';
xmon_putchar('\n');
break;
}
switch (c) {
case 0177:
case '\b':
if (lineptr > line) {
xmon_putchar('\b');
xmon_putchar(' ');
xmon_putchar('\b');
--lineptr;
}
break;
case 'U' & 0x1F:
while (lineptr > line) {
xmon_putchar('\b');
xmon_putchar(' ');
xmon_putchar('\b');
--lineptr;
}
break;
default:
if (lineptr >= &line[sizeof(line) - 1])
xmon_putchar('\a');
else {
xmon_putchar(c);
*lineptr++ = c;
}
}
}
lineleft = lineptr - line;
lineptr = line;
}
if (lineleft == 0)
return -1;
--lineleft;
return *lineptr++;
}
char *
xmon_fgets(char *str, int nb, void *f)
{
char *p;
int c;
for (p = str; p < str + nb - 1; ) {
c = xmon_getchar();
if (c == -1) {
if (p == str)
return 0;
break;
}
*p++ = c;
if (c == '\n')
break;
}
*p = 0;
return str;
}
void
prom_drawhex(uint val)
{
unsigned char buf[10];
int i;
for (i = 7; i >= 0; i--)
{
buf[i] = "0123456789abcdef"[val & 0x0f];
val >>= 4;
}
buf[8] = '\0';
xmon_fputs(buf, xmon_stdout);
}
void
prom_drawstring(const char *str)
{
xmon_fputs(str, xmon_stdout);
}

View File

@ -1,54 +0,0 @@
/*
* Written by Cort Dougan to replace the version originally used
* by Paul Mackerras, which came from NetBSD and thus had copyright
* conflicts with Linux.
*
* This file makes liberal use of the standard linux utility
* routines to reduce the size of the binary. We assume we can
* trust some parts of Linux inside the debugger.
* -- Cort (cort@cs.nmt.edu)
*
* Copyright (C) 1999 Cort Dougan.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/module.h>
#include <stdarg.h>
#include "nonstdio.h"
extern int xmon_write(void *, void *, int);
void xmon_vfprintf(void *f, const char *fmt, va_list ap)
{
static char xmon_buf[2048];
int n;
n = vsprintf(xmon_buf, fmt, ap);
xmon_write(f, xmon_buf, n);
}
void xmon_printf(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
xmon_vfprintf(stdout, fmt, ap);
va_end(ap);
}
EXPORT_SYMBOL(xmon_printf);
void xmon_fprintf(void *f, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
xmon_vfprintf(f, fmt, ap);
va_end(ap);
}

View File

@ -1,7 +1,7 @@
/*
* Routines providing a simple monitor for use on the PowerMac.
*
* Copyright (C) 1996 Paul Mackerras.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -18,6 +18,7 @@
#include <linux/kallsyms.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/sysrq.h>
#include <asm/ptrace.h>
#include <asm/string.h>
@ -144,15 +145,10 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
static const char *getvecname(unsigned long vec);
extern int print_insn_powerpc(unsigned long, unsigned long, int);
extern void printf(const char *fmt, ...);
extern void xmon_vfprintf(void *f, const char *fmt, va_list ap);
extern int xmon_putc(int c, void *f);
extern int putchar(int ch);
extern void xmon_enter(void);
extern void xmon_leave(void);
extern int xmon_read_poll(void);
extern long setjmp(long *);
extern void longjmp(long *, long);
extern void xmon_save_regs(struct pt_regs *);
@ -748,7 +744,6 @@ cmds(struct pt_regs *excp)
printf("%x:", smp_processor_id());
#endif /* CONFIG_SMP */
printf("mon> ");
fflush(stdout);
flush_input();
termch = 0;
cmd = skipbl();
@ -1797,7 +1792,7 @@ memex(void)
for(;;){
if (!mnoread)
n = mread(adrs, val, size);
printf("%.16x%c", adrs, brev? 'r': ' ');
printf(REG"%c", adrs, brev? 'r': ' ');
if (!mnoread) {
if (brev)
byterev(val, size);
@ -1976,17 +1971,18 @@ prdump(unsigned long adrs, long ndump)
nr = mread(adrs, temp, r);
adrs += nr;
for (m = 0; m < r; ++m) {
if ((m & 7) == 0 && m > 0)
if ((m & (sizeof(long) - 1)) == 0 && m > 0)
putchar(' ');
if (m < nr)
printf("%.2x", temp[m]);
else
printf("%s", fault_chars[fault_type]);
}
if (m <= 8)
printf(" ");
for (; m < 16; ++m)
for (; m < 16; ++m) {
if ((m & (sizeof(long) - 1)) == 0)
putchar(' ');
printf(" ");
}
printf(" |");
for (m = 0; m < r; ++m) {
if (m < nr) {
@ -2151,7 +2147,6 @@ memzcan(void)
ok = mread(a, &v, 1);
if (ok && !ook) {
printf("%.8x .. ", a);
fflush(stdout);
} else if (!ok && ook)
printf("%.8x\n", a - mskip);
ook = ok;
@ -2372,7 +2367,7 @@ int
inchar(void)
{
if (lineptr == NULL || *lineptr == 0) {
if (fgets(line, sizeof(line), stdin) == NULL) {
if (xmon_gets(line, sizeof(line)) == NULL) {
lineptr = NULL;
return EOF;
}
@ -2526,4 +2521,29 @@ void xmon_init(int enable)
__debugger_dabr_match = NULL;
__debugger_fault_handler = NULL;
}
xmon_map_scc();
}
#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
struct tty_struct *tty)
{
/* ensure xmon is enabled */
xmon_init(1);
debugger(pt_regs);
}
static struct sysrq_key_op sysrq_xmon_op =
{
.handler = sysrq_handle_xmon,
.help_msg = "Xmon",
.action_msg = "Entering xmon",
};
static int __init setup_xmon_sysrq(void)
{
register_sysrq_key('x', &sysrq_xmon_op);
return 0;
}
__initcall(setup_xmon_sysrq);
#endif /* CONFIG_MAGIC_SYSRQ */

View File

@ -19,6 +19,9 @@ extern prom_entry of_prom_entry;
/* function declarations */
int call_prom(const char *service, int nargs, int nret, ...);
int call_prom_ret(const char *service, int nargs, int nret,
unsigned int *rets, ...);
void * claim(unsigned int virt, unsigned int size, unsigned int align);
int map(unsigned int phys, unsigned int virt, unsigned int size);
void enter(void);

View File

@ -3,4 +3,4 @@
#
lib-y := claim.o enter.o exit.o finddevice.o getprop.o ofinit.o \
ofstdio.o read.o release.o write.o map.o
ofstdio.o read.o release.o write.o map.o call_prom.o

View File

@ -0,0 +1,74 @@
/*
* Copyright (C) 1996-2005 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "of1275.h"
#include <stdarg.h>
int call_prom(const char *service, int nargs, int nret, ...)
{
int i;
struct prom_args {
const char *service;
int nargs;
int nret;
unsigned int args[12];
} args;
va_list list;
args.service = service;
args.nargs = nargs;
args.nret = nret;
va_start(list, nret);
for (i = 0; i < nargs; i++)
args.args[i] = va_arg(list, unsigned int);
va_end(list);
for (i = 0; i < nret; i++)
args.args[nargs+i] = 0;
if (of_prom_entry(&args) < 0)
return -1;
return (nret > 0)? args.args[nargs]: 0;
}
int call_prom_ret(const char *service, int nargs, int nret,
unsigned int *rets, ...)
{
int i;
struct prom_args {
const char *service;
int nargs;
int nret;
unsigned int args[12];
} args;
va_list list;
args.service = service;
args.nargs = nargs;
args.nret = nret;
va_start(list, rets);
for (i = 0; i < nargs; i++)
args.args[i] = va_arg(list, unsigned int);
va_end(list);
for (i = 0; i < nret; i++)
args.args[nargs+i] = 0;
if (of_prom_entry(&args) < 0)
return -1;
if (rets != (void *) 0)
for (i = 1; i < nret; ++i)
rets[i-1] = args.args[nargs+i];
return (nret > 0)? args.args[nargs]: 0;
}

View File

@ -9,27 +9,84 @@
*/
#include "of1275.h"
#include "nonstdio.h"
void *
claim(unsigned int virt, unsigned int size, unsigned int align)
/*
* Older OF's require that when claiming a specific range of addresses,
* we claim the physical space in the /memory node and the virtual
* space in the chosen mmu node, and then do a map operation to
* map virtual to physical.
*/
static int need_map = -1;
static ihandle chosen_mmu;
static phandle memory;
/* returns true if s2 is a prefix of s1 */
static int string_match(const char *s1, const char *s2)
{
struct prom_args {
char *service;
int nargs;
int nret;
unsigned int virt;
unsigned int size;
unsigned int align;
void *ret;
} args;
args.service = "claim";
args.nargs = 3;
args.nret = 1;
args.virt = virt;
args.size = size;
args.align = align;
args.ret = (void *) 0;
(*of_prom_entry)(&args);
return args.ret;
for (; *s2; ++s2)
if (*s1++ != *s2)
return 0;
return 1;
}
static int check_of_version(void)
{
phandle oprom, chosen;
char version[64];
oprom = finddevice("/openprom");
if (oprom == OF_INVALID_HANDLE)
return 0;
if (getprop(oprom, "model", version, sizeof(version)) <= 0)
return 0;
version[sizeof(version)-1] = 0;
printf("OF version = '%s'\n", version);
if (!string_match(version, "Open Firmware, 1.")
&& !string_match(version, "FirmWorks,3."))
return 0;
chosen = finddevice("/chosen");
if (chosen == OF_INVALID_HANDLE) {
chosen = finddevice("/chosen@0");
if (chosen == OF_INVALID_HANDLE) {
printf("no chosen\n");
return 0;
}
}
if (getprop(chosen, "mmu", &chosen_mmu, sizeof(chosen_mmu)) <= 0) {
printf("no mmu\n");
return 0;
}
memory = (ihandle) call_prom("open", 1, 1, "/memory");
if (memory == OF_INVALID_HANDLE) {
memory = (ihandle) call_prom("open", 1, 1, "/memory@0");
if (memory == OF_INVALID_HANDLE) {
printf("no memory node\n");
return 0;
}
}
printf("old OF detected\n");
return 1;
}
void *claim(unsigned int virt, unsigned int size, unsigned int align)
{
int ret;
unsigned int result;
if (need_map < 0)
need_map = check_of_version();
if (align || !need_map)
return (void *) call_prom("claim", 3, 1, virt, size, align);
ret = call_prom_ret("call-method", 5, 2, &result, "claim", memory,
align, size, virt);
if (ret != 0 || result == -1)
return (void *) -1;
ret = call_prom_ret("call-method", 5, 2, &result, "claim", chosen_mmu,
align, size, virt);
/* 0x12 == coherent + read/write */
ret = call_prom("call-method", 6, 1, "map", chosen_mmu,
0x12, size, virt, virt);
return virt;
}

View File

@ -10,22 +10,7 @@
#include "of1275.h"
phandle
finddevice(const char *name)
phandle finddevice(const char *name)
{
struct prom_args {
char *service;
int nargs;
int nret;
const char *devspec;
phandle device;
} args;
args.service = "finddevice";
args.nargs = 1;
args.nret = 1;
args.devspec = name;
args.device = OF_INVALID_HANDLE;
(*of_prom_entry)(&args);
return args.device;
return (phandle) call_prom("finddevice", 1, 1, name);
}

View File

@ -80,8 +80,7 @@ $(obj)/note: $(utils)/mknote FORCE
$(call if_changed,mknote)
$(obj)/coffcrt0.o: EXTRA_AFLAGS := -traditional -DXCOFF
$(obj)/crt0.o: EXTRA_AFLAGS := -traditional
$(obj)/coffcrt0.o: EXTRA_AFLAGS := -DXCOFF
targets += coffcrt0.o crt0.o
$(obj)/coffcrt0.o $(obj)/crt0.o: $(common)/crt0.S FORCE
$(call if_changed_dep,as_o_S)

View File

@ -12,7 +12,7 @@ extra-$(CONFIG_6xx) += idle_6xx.o
extra-$(CONFIG_POWER4) += idle_power4.o
extra-y += vmlinux.lds
obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
obj-y := entry.o traps.o idle.o time.o misc.o \
process.o align.o \
setup.o \
ppc_htab.o
@ -38,8 +38,7 @@ endif
# These are here while we do the architecture merge
else
obj-y := irq.o idle.o \
align.o
obj-y := idle.o align.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
obj-$(CONFIG_MODULES) += module.o

View File

@ -358,6 +358,6 @@ label:
NORMAL_EXCEPTION_PROLOG; \
bne load_up_fpu; /* if from user, just load it up */ \
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_EE_LITE(0x800, KernelFP)
EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
#endif /* __HEAD_BOOKE_H__ */

View File

@ -1,165 +0,0 @@
/*
* arch/ppc/kernel/irq.c
*
* Derived from arch/i386/kernel/irq.c
* Copyright (C) 1992 Linus Torvalds
* Adapted from arch/i386 by Gary Thomas
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Updated and modified by Cort Dougan <cort@fsmlabs.com>
* Copyright (C) 1996-2001 Cort Dougan
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*
* The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
* interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
* mask register (of which only 16 are defined), hence the weird shifting
* and complement of the cached_irq_mask. I want to be able to stuff
* this right into the SIU SMASK register.
* Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
* to reduce code space and undefined function references.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/threads.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/config.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/cache.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
extern atomic_t ipi_recv;
extern atomic_t ipi_sent;
#define MAXCOUNT 10000000
int ppc_spurious_interrupts = 0;
struct irqaction *ppc_irq_action[NR_IRQS];
unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
atomic_t ppc_n_lost_interrupts;
#ifdef CONFIG_TAU_INT
extern int tau_initialized;
extern int tau_interrupts(int);
#endif
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
struct irqaction * action;
unsigned long flags;
if (i == 0) {
seq_puts(p, " ");
for (j=0; j<NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "CPU%d ", j);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if ( !action || !action->handler )
goto skip;
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ",
kstat_cpu(j).irqs[i]);
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif /* CONFIG_SMP */
if (irq_desc[i].handler)
seq_printf(p, " %s ", irq_desc[i].handler->typename);
else
seq_puts(p, " None ");
seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
#ifdef CONFIG_TAU_INT
if (tau_initialized){
seq_puts(p, "TAU: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", tau_interrupts(j));
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
}
#endif
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
/* should this be per processor send/receive? */
seq_printf(p, "IPI (recv/sent): %10u/%u\n",
atomic_read(&ipi_recv), atomic_read(&ipi_sent));
#endif
seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
}
return 0;
}
void do_IRQ(struct pt_regs *regs)
{
int irq, first = 1;
irq_enter();
/*
* Every platform is required to implement ppc_md.get_irq.
* This function will either return an irq number or -1 to
* indicate there are no more pending. But the first time
* through the loop this means there wasn't and IRQ pending.
* The value -2 is for buggy hardware and means that this IRQ
* has already been handled. -- Tom
*/
while ((irq = ppc_md.get_irq(regs)) >= 0) {
__do_IRQ(irq, regs);
first = 0;
}
if (irq != -2 && first)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
irq_exit();
}
void __init init_IRQ(void)
{
ppc_md.init_IRQ();
}

View File

@ -497,9 +497,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
* and invalidate the corresponding instruction cache blocks.
* This is a no-op on the 601.
*
* flush_icache_range(unsigned long start, unsigned long stop)
* __flush_icache_range(unsigned long start, unsigned long stop)
*/
_GLOBAL(flush_icache_range)
_GLOBAL(__flush_icache_range)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)

View File

@ -46,6 +46,7 @@
#include <asm/btext.h>
#include <asm/div64.h>
#include <asm/xmon.h>
#include <asm/signal.h>
#ifdef CONFIG_8xx
#include <asm/commproc.h>
@ -57,7 +58,6 @@ extern void machine_check_exception(struct pt_regs *regs);
extern void alignment_exception(struct pt_regs *regs);
extern void program_check_exception(struct pt_regs *regs);
extern void single_step_exception(struct pt_regs *regs);
extern int do_signal(sigset_t *, struct pt_regs *);
extern int pmac_newworld;
extern int sys_sigreturn(struct pt_regs *regs);
@ -78,7 +78,6 @@ EXPORT_SYMBOL(program_check_exception);
EXPORT_SYMBOL(single_step_exception);
EXPORT_SYMBOL(sys_sigreturn);
EXPORT_SYMBOL(ppc_n_lost_interrupts);
EXPORT_SYMBOL(ppc_lost_interrupts);
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
@ -176,6 +175,7 @@ EXPORT_SYMBOL(pci_bus_to_phys);
#endif /* CONFIG_PCI */
#ifdef CONFIG_NOT_COHERENT_CACHE
extern void flush_dcache_all(void);
EXPORT_SYMBOL(flush_dcache_all);
#endif
@ -217,9 +217,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
EXPORT_SYMBOL(cuda_request);
EXPORT_SYMBOL(cuda_poll);
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_PPC_MULTIPLATFORM
EXPORT_SYMBOL(_machine);
#endif
#ifdef CONFIG_PPC_PMAC
EXPORT_SYMBOL(sys_ctrler);
EXPORT_SYMBOL(pmac_newworld);

View File

@ -76,6 +76,7 @@ unsigned int DMA_MODE_WRITE;
#ifdef CONFIG_PPC_MULTIPLATFORM
int _machine = 0;
EXPORT_SYMBOL(_machine);
extern void prep_init(unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, unsigned long r7);

View File

@ -75,6 +75,9 @@ static DEFINE_SPINLOCK(pmac_pic_lock);
#define GATWICK_IRQ_POOL_SIZE 10
static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
/*
* Mark an irq as "lost". This is only used on the pmac
* since it can lose interrupts (see pmac_set_irq_mask).

View File

@ -61,6 +61,15 @@
#include <asm/pci-bridge.h>
#include <asm/todc.h>
/* prep registers for L2 */
#define CACHECRBA 0x80000823 /* Cache configuration register address */
#define L2CACHE_MASK 0x03 /* Mask for 2 L2 Cache bits */
#define L2CACHE_512KB 0x00 /* 512KB */
#define L2CACHE_256KB 0x01 /* 256KB */
#define L2CACHE_1MB 0x02 /* 1MB */
#define L2CACHE_NONE 0x03 /* NONE */
#define L2CACHE_PARITY 0x08 /* Mask for L2 Cache Parity Protected bit */
TODC_ALLOC();
unsigned char ucSystemType;

View File

@ -297,6 +297,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
# between a node's start and end pfns, it may not

View File

@ -5,11 +5,59 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <elf.h>
#define ElfHeaderSize (64 * 1024)
#define ElfPages (ElfHeaderSize / 4096)
#define KERNELBASE (0xc000000000000000)
#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
struct addr_range {
unsigned long long addr;
unsigned long memsize;
unsigned long offset;
};
static int check_elf64(void *p, int size, struct addr_range *r)
{
Elf64_Ehdr *elf64 = p;
Elf64_Phdr *elf64ph;
if (elf64->e_ident[EI_MAG0] != ELFMAG0 ||
elf64->e_ident[EI_MAG1] != ELFMAG1 ||
elf64->e_ident[EI_MAG2] != ELFMAG2 ||
elf64->e_ident[EI_MAG3] != ELFMAG3 ||
elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
elf64->e_type != ET_EXEC || elf64->e_machine != EM_PPC64)
return 0;
if ((elf64->e_phoff + sizeof(Elf64_Phdr)) > size)
return 0;
elf64ph = (Elf64_Phdr *) ((unsigned long)elf64 +
(unsigned long)elf64->e_phoff);
r->memsize = (unsigned long)elf64ph->p_memsz;
r->offset = (unsigned long)elf64ph->p_offset;
r->addr = (unsigned long long)elf64ph->p_vaddr;
#ifdef DEBUG
printf("PPC64 ELF file, ph:\n");
printf("p_type 0x%08x\n", elf64ph->p_type);
printf("p_flags 0x%08x\n", elf64ph->p_flags);
printf("p_offset 0x%016llx\n", elf64ph->p_offset);
printf("p_vaddr 0x%016llx\n", elf64ph->p_vaddr);
printf("p_paddr 0x%016llx\n", elf64ph->p_paddr);
printf("p_filesz 0x%016llx\n", elf64ph->p_filesz);
printf("p_memsz 0x%016llx\n", elf64ph->p_memsz);
printf("p_align 0x%016llx\n", elf64ph->p_align);
printf("... skipping 0x%08lx bytes of ELF header\n",
(unsigned long)elf64ph->p_offset);
#endif
return 64;
}
void get4k(FILE *file, char *buf )
{
unsigned j;
@ -34,97 +82,92 @@ void death(const char *msg, FILE *fdesc, const char *fname)
int main(int argc, char **argv)
{
char inbuf[4096];
FILE *ramDisk = NULL;
FILE *sysmap = NULL;
FILE *inputVmlinux = NULL;
FILE *outputVmlinux = NULL;
struct addr_range vmlinux;
FILE *ramDisk;
FILE *inputVmlinux;
FILE *outputVmlinux;
unsigned i = 0;
unsigned long ramFileLen = 0;
unsigned long ramLen = 0;
unsigned long roundR = 0;
char *rd_name, *lx_name, *out_name;
unsigned long sysmapFileLen = 0;
unsigned long sysmapLen = 0;
unsigned long sysmapPages = 0;
char* ptr_end = NULL;
unsigned long offset_end = 0;
size_t i;
unsigned long ramFileLen;
unsigned long ramLen;
unsigned long roundR;
unsigned long offset_end;
unsigned long kernelLen = 0;
unsigned long actualKernelLen = 0;
unsigned long round = 0;
unsigned long roundedKernelLen = 0;
unsigned long ramStartOffs = 0;
unsigned long ramPages = 0;
unsigned long roundedKernelPages = 0;
unsigned long hvReleaseData = 0;
unsigned long kernelLen;
unsigned long actualKernelLen;
unsigned long round;
unsigned long roundedKernelLen;
unsigned long ramStartOffs;
unsigned long ramPages;
unsigned long roundedKernelPages;
unsigned long hvReleaseData;
u_int32_t eyeCatcher = 0xc8a5d9c4;
unsigned long naca = 0;
unsigned long xRamDisk = 0;
unsigned long xRamDiskSize = 0;
long padPages = 0;
unsigned long naca;
unsigned long xRamDisk;
unsigned long xRamDiskSize;
long padPages;
if (argc < 2) {
fprintf(stderr, "Name of RAM disk file missing.\n");
exit(1);
}
rd_name = argv[1];
if (argc < 3) {
fprintf(stderr, "Name of System Map input file is missing.\n");
exit(1);
}
if (argc < 4) {
fprintf(stderr, "Name of vmlinux file missing.\n");
exit(1);
}
lx_name = argv[2];
if (argc < 5) {
if (argc < 4) {
fprintf(stderr, "Name of vmlinux output file missing.\n");
exit(1);
}
out_name = argv[3];
ramDisk = fopen(argv[1], "r");
ramDisk = fopen(rd_name, "r");
if ( ! ramDisk ) {
fprintf(stderr, "RAM disk file \"%s\" failed to open.\n", argv[1]);
fprintf(stderr, "RAM disk file \"%s\" failed to open.\n", rd_name);
exit(1);
}
sysmap = fopen(argv[2], "r");
if ( ! sysmap ) {
fprintf(stderr, "System Map file \"%s\" failed to open.\n", argv[2]);
exit(1);
}
inputVmlinux = fopen(argv[3], "r");
inputVmlinux = fopen(lx_name, "r");
if ( ! inputVmlinux ) {
fprintf(stderr, "vmlinux file \"%s\" failed to open.\n", argv[3]);
fprintf(stderr, "vmlinux file \"%s\" failed to open.\n", lx_name);
exit(1);
}
outputVmlinux = fopen(argv[4], "w+");
outputVmlinux = fopen(out_name, "w+");
if ( ! outputVmlinux ) {
fprintf(stderr, "output vmlinux file \"%s\" failed to open.\n", argv[4]);
fprintf(stderr, "output vmlinux file \"%s\" failed to open.\n", out_name);
exit(1);
}
i = fread(inbuf, 1, sizeof(inbuf), inputVmlinux);
if (i != sizeof(inbuf)) {
fprintf(stderr, "can not read vmlinux file %s: %u\n", lx_name, i);
exit(1);
}
i = check_elf64(inbuf, sizeof(inbuf), &vmlinux);
if (i == 0) {
fprintf(stderr, "You must have a linux kernel specified as argv[2]\n");
exit(1);
}
/* Input Vmlinux file */
fseek(inputVmlinux, 0, SEEK_END);
kernelLen = ftell(inputVmlinux);
fseek(inputVmlinux, 0, SEEK_SET);
printf("kernel file size = %d\n", kernelLen);
if ( kernelLen == 0 ) {
fprintf(stderr, "You must have a linux kernel specified as argv[3]\n");
exit(1);
}
printf("kernel file size = %lu\n", kernelLen);
actualKernelLen = kernelLen - ElfHeaderSize;
printf("actual kernel length (minus ELF header) = %d\n", actualKernelLen);
printf("actual kernel length (minus ELF header) = %lu\n", actualKernelLen);
round = actualKernelLen % 4096;
roundedKernelLen = actualKernelLen;
@ -134,39 +177,7 @@ int main(int argc, char **argv)
roundedKernelPages = roundedKernelLen / 4096;
printf("Vmlinux pages to copy = %ld/0x%lx \n", roundedKernelPages, roundedKernelPages);
/* Input System Map file */
/* (needs to be processed simply to determine if we need to add pad pages due to the static variables not being included in the vmlinux) */
fseek(sysmap, 0, SEEK_END);
sysmapFileLen = ftell(sysmap);
fseek(sysmap, 0, SEEK_SET);
printf("%s file size = %ld/0x%lx \n", argv[2], sysmapFileLen, sysmapFileLen);
sysmapLen = sysmapFileLen;
roundR = 4096 - (sysmapLen % 4096);
if (roundR) {
printf("Rounding System Map file up to a multiple of 4096, adding %ld/0x%lx \n", roundR, roundR);
sysmapLen += roundR;
}
printf("Rounded System Map size is %ld/0x%lx \n", sysmapLen, sysmapLen);
/* Process the Sysmap file to determine where _end is */
sysmapPages = sysmapLen / 4096;
/* read the whole file line by line, expect that it doesn't fail */
while ( fgets(inbuf, 4096, sysmap) ) ;
/* search for _end in the last page of the system map */
ptr_end = strstr(inbuf, " _end");
if (!ptr_end) {
fprintf(stderr, "Unable to find _end in the sysmap file \n");
fprintf(stderr, "inbuf: \n");
fprintf(stderr, "%s \n", inbuf);
exit(1);
}
printf("Found _end in the last page of the sysmap - backing up 10 characters it looks like %s", ptr_end-10);
/* convert address of _end in system map to hex offset. */
offset_end = (unsigned int)strtol(ptr_end-10, NULL, 16);
offset_end = _ALIGN_UP(vmlinux.memsize, 4096);
/* calc how many pages we need to insert between the vmlinux and the start of the ram disk */
padPages = offset_end/4096 - roundedKernelPages;
@ -194,7 +205,7 @@ int main(int argc, char **argv)
fseek(ramDisk, 0, SEEK_END);
ramFileLen = ftell(ramDisk);
fseek(ramDisk, 0, SEEK_SET);
printf("%s file size = %ld/0x%lx \n", argv[1], ramFileLen, ramFileLen);
printf("%s file size = %ld/0x%lx \n", rd_name, ramFileLen, ramFileLen);
ramLen = ramFileLen;
@ -248,19 +259,19 @@ int main(int argc, char **argv)
/* fseek to the hvReleaseData pointer */
fseek(outputVmlinux, ElfHeaderSize + 0x24, SEEK_SET);
if (fread(&hvReleaseData, 4, 1, outputVmlinux) != 1) {
death("Could not read hvReleaseData pointer\n", outputVmlinux, argv[4]);
death("Could not read hvReleaseData pointer\n", outputVmlinux, out_name);
}
hvReleaseData = ntohl(hvReleaseData); /* Convert to native int */
printf("hvReleaseData is at %08x\n", hvReleaseData);
printf("hvReleaseData is at %08lx\n", hvReleaseData);
/* fseek to the hvReleaseData */
fseek(outputVmlinux, ElfHeaderSize + hvReleaseData, SEEK_SET);
if (fread(inbuf, 0x40, 1, outputVmlinux) != 1) {
death("Could not read hvReleaseData\n", outputVmlinux, argv[4]);
death("Could not read hvReleaseData\n", outputVmlinux, out_name);
}
/* Check hvReleaseData sanity */
if (memcmp(inbuf, &eyeCatcher, 4) != 0) {
death("hvReleaseData is invalid\n", outputVmlinux, argv[4]);
death("hvReleaseData is invalid\n", outputVmlinux, out_name);
}
/* Get the naca pointer */
naca = ntohl(*((u_int32_t*) &inbuf[0x0C])) - KERNELBASE;
@ -269,13 +280,13 @@ int main(int argc, char **argv)
/* fseek to the naca */
fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
if (fread(inbuf, 0x18, 1, outputVmlinux) != 1) {
death("Could not read naca\n", outputVmlinux, argv[4]);
death("Could not read naca\n", outputVmlinux, out_name);
}
xRamDisk = ntohl(*((u_int32_t *) &inbuf[0x0c]));
xRamDiskSize = ntohl(*((u_int32_t *) &inbuf[0x14]));
/* Make sure a RAM disk isn't already present */
if ((xRamDisk != 0) || (xRamDiskSize != 0)) {
death("RAM disk is already attached to this kernel\n", outputVmlinux, argv[4]);
death("RAM disk is already attached to this kernel\n", outputVmlinux, out_name);
}
/* Fill in the values */
*((u_int32_t *) &inbuf[0x0c]) = htonl(ramStartOffs);
@ -285,15 +296,15 @@ int main(int argc, char **argv)
fflush(outputVmlinux);
fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
if (fwrite(inbuf, 0x18, 1, outputVmlinux) != 1) {
death("Could not write naca\n", outputVmlinux, argv[4]);
death("Could not write naca\n", outputVmlinux, out_name);
}
printf("Ram Disk of 0x%lx pages is attached to the kernel at offset 0x%08x\n",
printf("Ram Disk of 0x%lx pages is attached to the kernel at offset 0x%08lx\n",
ramPages, ramStartOffs);
/* Done */
fclose(outputVmlinux);
/* Set permission to executable */
chmod(argv[4], S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
chmod(out_name, S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
return 0;
}

View File

@ -11,12 +11,11 @@ obj-y := misc.o prom.o
endif
obj-y += irq.o idle.o dma.o \
align.o pacaData.o \
udbg.o ioctl32.o \
obj-y += idle.o dma.o \
align.o \
udbg.o \
rtc.o \
cpu_setup_power4.o \
iommu.o sysfs.o vdso.o firmware.o
iommu.o vdso.o
obj-y += vdso32/ vdso64/
pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
@ -31,15 +30,10 @@ endif
obj-$(CONFIG_PPC_PSERIES) += udbg_16550.o
obj-$(CONFIG_KEXEC) += machine_kexec.o
obj-$(CONFIG_EEH) += eeh.o
obj-$(CONFIG_PROC_FS) += proc_ppc64.o
obj-$(CONFIG_MODULES) += module.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_MODULES) += ppc_ksyms.o
endif
obj-$(CONFIG_PPC_RTAS) += rtas_pci.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_BOOTX_TEXT) += btext.o
@ -52,8 +46,6 @@ obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o
obj-$(CONFIG_KPROBES) += kprobes.o
CFLAGS_ioctl32.o += -Ifs/
ifneq ($(CONFIG_PPC_MERGE),y)
ifeq ($(CONFIG_PPC_ISERIES),y)
arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s

View File

@ -74,7 +74,6 @@ int main(void)
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
/* paca */

View File

@ -28,7 +28,6 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/systemcfg.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
@ -1701,21 +1700,9 @@ _GLOBAL(__secondary_start)
HMT_MEDIUM /* Set thread priority to MEDIUM */
ld r2,PACATOC(r13)
li r6,0
stb r6,PACAPROCENABLED(r13)
#ifndef CONFIG_PPC_ISERIES
/* Initialize the page table pointer register. */
LOADADDR(r6,_SDR1)
ld r6,0(r6) /* get the value of _SDR1 */
mtspr SPRN_SDR1,r6 /* set the htab location */
#endif
/* Initialize the first segment table (or SLB) entry */
ld r3,PACASTABVIRT(r13) /* get addr of segment table */
BEGIN_FTR_SECTION
bl .stab_initialize
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
bl .slb_initialize
/* Do early setup for that CPU */
bl .early_setup_secondary
/* Initialize the kernel stack. Just a repeat for iSeries. */
LOADADDR(r3,current_set)
@ -1724,37 +1711,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
std r1,PACAKSAVE(r13)
ld r3,PACASTABREAL(r13) /* get raddr of segment table */
ori r4,r3,1 /* turn on valid bit */
#ifdef CONFIG_PPC_ISERIES
li r0,-1 /* hypervisor call */
li r3,1
sldi r3,r3,63 /* 0x8000000000000000 */
ori r3,r3,4 /* 0x8000000000000004 */
sc /* HvCall_setASR */
#else
/* set the ASR */
ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
ld r3,0(r3)
lwz r3,PLATFORM(r3) /* r3 = platform flags */
andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
beq 98f /* branch if result is 0 */
mfspr r3,SPRN_PVR
srwi r3,r3,16
cmpwi r3,0x37 /* SStar */
beq 97f
cmpwi r3,0x36 /* IStar */
beq 97f
cmpwi r3,0x34 /* Pulsar */
bne 98f
97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
HVSC /* Invoking hcall */
b 99f
98: /* !(rpa hypervisor) || !(star) */
mtasr r4 /* set the stab location */
99:
#endif
li r7,0
mtlr r7
@ -1896,40 +1852,6 @@ _STATIC(start_here_multiplatform)
mr r3,r31
bl .early_setup
/* set the ASR */
ld r3,PACASTABREAL(r13)
ori r4,r3,1 /* turn on valid bit */
ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
ld r3,0(r3)
lwz r3,PLATFORM(r3) /* r3 = platform flags */
andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
beq 98f /* branch if result is 0 */
mfspr r3,SPRN_PVR
srwi r3,r3,16
cmpwi r3,0x37 /* SStar */
beq 97f
cmpwi r3,0x36 /* IStar */
beq 97f
cmpwi r3,0x34 /* Pulsar */
bne 98f
97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
HVSC /* Invoking hcall */
b 99f
98: /* !(rpa hypervisor) || !(star) */
mtasr r4 /* set the stab location */
99:
/* Set SDR1 (hash table pointer) */
ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
ld r3,0(r3)
lwz r3,PLATFORM(r3) /* r3 = platform flags */
/* Test if bit 0 is set (LPAR bit) */
andi. r3,r3,PLATFORM_LPAR
bne 98f /* branch if result is !0 */
LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
sub r6,r6,r26
ld r6,0(r6) /* get the value of _SDR1 */
mtspr SPRN_SDR1,r6 /* set the htab location */
98:
LOADADDR(r3,.start_here_common)
SET_REG_TO_CONST(r4, MSR_KERNEL)
mtspr SPRN_SRR0,r3

View File

@ -26,7 +26,6 @@
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/time.h>
#include <asm/systemcfg.h>
#include <asm/machdep.h>
#include <asm/smp.h>

View File

@ -78,12 +78,12 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
_GLOBAL(call_handle_IRQ_event)
_GLOBAL(call___do_IRQ)
mflr r0
std r0,16(r1)
stdu r1,THREAD_SIZE-112(r6)
mr r1,r6
bl .handle_IRQ_event
stdu r1,THREAD_SIZE-112(r5)
mr r1,r5
bl .__do_IRQ
ld r1,0(r1)
ld r0,16(r1)
mtlr r0

View File

@ -31,7 +31,6 @@
#include <asm/rtas.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/systemcfg.h>
#undef DEBUG_NVRAM
@ -167,7 +166,7 @@ static int dev_nvram_ioctl(struct inode *inode, struct file *file,
case IOC_NVRAM_GET_OFFSET: {
int part, offset;
if (systemcfg->platform != PLATFORM_POWERMAC)
if (_machine != PLATFORM_POWERMAC)
return -EINVAL;
if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
return -EFAULT;
@ -450,7 +449,7 @@ static int nvram_setup_partition(void)
* in our nvram, as Apple defined partitions use pretty much
* all of the space
*/
if (systemcfg->platform == PLATFORM_POWERMAC)
if (_machine == PLATFORM_POWERMAC)
return -ENOSPC;
/* see if we have an OS partition that meets our needs.

View File

@ -548,6 +548,11 @@ static int __init pcibios_init(void)
if (ppc64_isabridge_dev != NULL)
printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
#ifdef CONFIG_PPC_MULTIPLATFORM
/* map in PCI I/O space */
phbs_remap_io();
#endif
printk("PCI: Probing PCI hardware done\n");
return 0;
@ -1277,12 +1282,9 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
* G5 machines... So when something asks for bus 0 io base
* (bus 0 is HT root), we return the AGP one instead.
*/
#ifdef CONFIG_PPC_PMAC
if (systemcfg->platform == PLATFORM_POWERMAC &&
machine_is_compatible("MacRISC4"))
if (machine_is_compatible("MacRISC4"))
if (in_bus == 0)
in_bus = 0xf0;
#endif /* CONFIG_PPC_PMAC */
/* That syscall isn't quite compatible with PCI domains, but it's
* used on pre-domains setup. We return the first match

View File

@ -43,7 +43,7 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
u32 *regs;
struct pci_dn *pdn;
if (phb->is_dynamic)
if (mem_init_done)
pdn = kmalloc(sizeof(*pdn), GFP_KERNEL);
else
pdn = alloc_bootmem(sizeof(*pdn));
@ -120,6 +120,14 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
return NULL;
}
/**
* pci_devs_phb_init_dynamic - setup pci devices under this PHB
* phb: pci-to-host bridge (top-level bridge connecting to cpu)
*
* This routine is called both during boot, (before the memory
* subsystem is set up, before kmalloc is valid) and during the
* dynamic lpar operation of adding a PHB to a running system.
*/
void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
{
struct device_node * dn = (struct device_node *) phb->arch_data;
@ -201,9 +209,14 @@ static struct notifier_block pci_dn_reconfig_nb = {
.notifier_call = pci_dn_reconfig_notifier,
};
/*
* Actually initialize the phbs.
* The buswalk on this phb has not happened yet.
/**
* pci_devs_phb_init - Initialize phbs and pci devs under them.
*
* This routine walks over all phb's (pci-host bridges) on the
* system, and sets up assorted pci-related structures
* (including pci info in the device node structs) for each
* pci device found underneath. This routine runs once,
* early in the boot sequence.
*/
void __init pci_devs_phb_init(void)
{

View File

@ -318,7 +318,7 @@ static int __devinit finish_node_interrupts(struct device_node *np,
}
/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
char *name = get_property(ic->parent, "name", NULL);
if (name && !strcmp(name, "u3"))
np->intrs[intrcount].line += 128;
@ -1065,7 +1065,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
if (prop == NULL)
return 0;
systemcfg->platform = *prop;
_machine = *prop;
/* check if iommu is forced on or off */
if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
@ -1230,11 +1230,8 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
systemcfg->physicalMemorySize = lmb_phys_mem_size();
lmb_reserve(0, __pa(klimit));
DBG("Phys. mem: %lx\n", systemcfg->physicalMemorySize);
/* Reserve LMB regions used by kernel, initrd, dt, etc... */
early_reserve_mem();
@ -1753,7 +1750,7 @@ static int of_finish_dynamic_node(struct device_node *node,
/* We don't support that function on PowerMac, at least
* not yet
*/
if (systemcfg->platform == PLATFORM_POWERMAC)
if (_machine == PLATFORM_POWERMAC)
return -ENODEV;
/* fix up new node's linux_phandle field */

View File

@ -1934,7 +1934,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long
/*
* On pSeries, inform the firmware about our capabilities
*/
if (RELOC(of_platform) & PLATFORM_PSERIES)
if (RELOC(of_platform) == PLATFORM_PSERIES ||
RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
prom_send_capabilities();
/*

View File

@ -34,6 +34,7 @@
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/systemcfg.h>
#include <asm/vdso.h>
#undef DEBUG
@ -179,7 +180,7 @@ static struct page * vdso_vma_nopage(struct vm_area_struct * vma,
* Last page is systemcfg.
*/
if ((vma->vm_end - address) <= PAGE_SIZE)
pg = virt_to_page(systemcfg);
pg = virt_to_page(_systemcfg);
else
pg = virt_to_page(vbase + offset);
@ -604,7 +605,7 @@ void __init vdso_init(void)
get_page(pg);
}
get_page(virt_to_page(systemcfg));
get_page(virt_to_page(_systemcfg));
}
int in_gate_area_no_task(unsigned long addr)

View File

@ -37,6 +37,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h>

View File

@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>

View File

@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/uaccess.h>

View File

@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/uaccess.h>

View File

@ -306,7 +306,7 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
{
struct pci_controller *phb;
if (PCI_DN(dn)->phb) {
if (PCI_DN(dn) && PCI_DN(dn)->phb) {
/* PHB already exists */
return -EINVAL;
}

Some files were not shown because too many files have changed in this diff Show More