Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge
* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge: powerpc: Use correct sequence for putting CPU into nap mode [PATCH] spufs: fix context-switch decrementer code [PATCH] powerpc32: Set cpu explicitly in kernel compiles [PATCH] powerpc/pseries: bugfix: balance calls to pci_device_put [PATCH] powerpc: Fix machine detection in prom_init.c [PATCH] ppc32: Fix string comparing in platform_notify_map [PATCH] powerpc: Avoid __initcall warnings [PATCH] powerpc: Ensure runlatch is off in the idle loop powerpc: Fix CHRP booting - needs a define_machine call powerpc: iSeries has only 256 IRQs
This commit is contained in:
commit
6fbe85f914
|
@ -366,6 +366,7 @@ config PPC_PMAC64
|
|||
select U3_DART
|
||||
select MPIC_BROKEN_U3
|
||||
select GENERIC_TBSYNC
|
||||
select PPC_970_NAP
|
||||
default y
|
||||
|
||||
config PPC_PREP
|
||||
|
@ -383,6 +384,7 @@ config PPC_MAPLE
|
|||
select MPIC_BROKEN_U3
|
||||
select GENERIC_TBSYNC
|
||||
select PPC_UDBG_16550
|
||||
select PPC_970_NAP
|
||||
default n
|
||||
help
|
||||
This option enables support for the Maple 970FX Evaluation Board.
|
||||
|
@ -457,6 +459,10 @@ config PPC_MPC106
|
|||
bool
|
||||
default n
|
||||
|
||||
config PPC_970_NAP
|
||||
bool
|
||||
default n
|
||||
|
||||
source "drivers/cpufreq/Kconfig"
|
||||
|
||||
config CPU_FREQ_PMAC
|
||||
|
|
|
@ -104,6 +104,10 @@ ifndef CONFIG_FSL_BOOKE
|
|||
CFLAGS += -mstring
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_6xx),y)
|
||||
CFLAGS += -mcpu=powerpc
|
||||
endif
|
||||
|
||||
cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
|
||||
cpu-as-$(CONFIG_4xx) += -Wa,-m405
|
||||
cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
|
||||
|
|
|
@ -20,7 +20,7 @@ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
|
|||
firmware.o sysfs.o
|
||||
obj-$(CONFIG_PPC64) += vdso64/
|
||||
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
|
||||
obj-$(CONFIG_POWER4) += idle_power4.o
|
||||
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
|
||||
obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o
|
||||
procfs-$(CONFIG_PPC64) := proc_ppc64.o
|
||||
obj-$(CONFIG_PROC_FS) += $(procfs-y)
|
||||
|
|
|
@ -91,6 +91,7 @@ int main(void)
|
|||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
||||
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
|
||||
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
||||
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
||||
#ifdef CONFIG_PPC32
|
||||
|
|
|
@ -128,37 +128,36 @@ transfer_to_handler:
|
|||
stw r12,4(r11)
|
||||
#endif
|
||||
b 3f
|
||||
|
||||
2: /* if from kernel, check interrupted DOZE/NAP mode and
|
||||
* check for stack overflow
|
||||
*/
|
||||
lwz r9,THREAD_INFO-THREAD(r12)
|
||||
cmplw r1,r9 /* if r1 <= current->thread_info */
|
||||
ble- stack_ovf /* then the kernel stack overflowed */
|
||||
5:
|
||||
#ifdef CONFIG_6xx
|
||||
mfspr r11,SPRN_HID0
|
||||
mtcr r11
|
||||
BEGIN_FTR_SECTION
|
||||
bt- 8,4f /* Check DOZE */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
|
||||
BEGIN_FTR_SECTION
|
||||
bt- 9,4f /* Check NAP */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
||||
tophys(r9,r9) /* check local flags */
|
||||
lwz r12,TI_LOCAL_FLAGS(r9)
|
||||
mtcrf 0x01,r12
|
||||
bt- 31-TLF_NAPPING,4f
|
||||
#endif /* CONFIG_6xx */
|
||||
.globl transfer_to_handler_cont
|
||||
transfer_to_handler_cont:
|
||||
lwz r11,THREAD_INFO-THREAD(r12)
|
||||
cmplw r1,r11 /* if r1 <= current->thread_info */
|
||||
ble- stack_ovf /* then the kernel stack overflowed */
|
||||
3:
|
||||
mflr r9
|
||||
lwz r11,0(r9) /* virtual address of handler */
|
||||
lwz r9,4(r9) /* where to go when done */
|
||||
FIX_SRR1(r10,r12)
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r10
|
||||
mtlr r9
|
||||
SYNC
|
||||
RFI /* jump to handler, enable MMU */
|
||||
|
||||
#ifdef CONFIG_6xx
|
||||
4: b power_save_6xx_restore
|
||||
#ifdef CONFIG_6xx
|
||||
4: rlwinm r12,r12,0,~_TLF_NAPPING
|
||||
stw r12,TI_LOCAL_FLAGS(r9)
|
||||
b power_save_6xx_restore
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -167,10 +166,10 @@ transfer_to_handler_cont:
|
|||
*/
|
||||
stack_ovf:
|
||||
/* sometimes we use a statically-allocated stack, which is OK. */
|
||||
lis r11,_end@h
|
||||
ori r11,r11,_end@l
|
||||
cmplw r1,r11
|
||||
ble 3b /* r1 <= &_end is OK */
|
||||
lis r12,_end@h
|
||||
ori r12,r12,_end@l
|
||||
cmplw r1,r12
|
||||
ble 5b /* r1 <= &_end is OK */
|
||||
SAVE_NVGPRS(r11)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
lis r1,init_thread_union@ha
|
||||
|
|
|
@ -376,17 +376,53 @@ label##_common: \
|
|||
bl hdlr; \
|
||||
b .ret_from_except
|
||||
|
||||
/*
|
||||
* Like STD_EXCEPTION_COMMON, but for exceptions that can occur
|
||||
* in the idle task and therefore need the special idle handling.
|
||||
*/
|
||||
#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
|
||||
.align 7; \
|
||||
.globl label##_common; \
|
||||
label##_common: \
|
||||
EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
|
||||
FINISH_NAP; \
|
||||
DISABLE_INTS; \
|
||||
bl .save_nvgprs; \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
bl hdlr; \
|
||||
b .ret_from_except
|
||||
|
||||
#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
|
||||
.align 7; \
|
||||
.globl label##_common; \
|
||||
label##_common: \
|
||||
EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
|
||||
FINISH_NAP; \
|
||||
DISABLE_INTS; \
|
||||
bl .ppc64_runlatch_on; \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
bl hdlr; \
|
||||
b .ret_from_except_lite
|
||||
|
||||
/*
|
||||
* When the idle code in power4_idle puts the CPU into NAP mode,
|
||||
* it has to do so in a loop, and relies on the external interrupt
|
||||
* and decrementer interrupt entry code to get it out of the loop.
|
||||
* It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
|
||||
* to signal that it is in the loop and needs help to get out.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_970_NAP
|
||||
#define FINISH_NAP \
|
||||
BEGIN_FTR_SECTION \
|
||||
clrrdi r11,r1,THREAD_SHIFT; \
|
||||
ld r9,TI_LOCAL_FLAGS(r11); \
|
||||
andi. r10,r9,_TLF_NAPPING; \
|
||||
bnel power4_fixup_nap; \
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
||||
#else
|
||||
#define FINISH_NAP
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Start of pSeries system interrupt routines
|
||||
*/
|
||||
|
@ -772,6 +808,7 @@ hardware_interrupt_iSeries_masked:
|
|||
.globl machine_check_common
|
||||
machine_check_common:
|
||||
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
|
||||
FINISH_NAP
|
||||
DISABLE_INTS
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
|
@ -783,7 +820,7 @@ machine_check_common:
|
|||
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
|
||||
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
|
||||
STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
|
||||
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
|
||||
|
@ -1034,6 +1071,7 @@ unrecov_slb:
|
|||
.globl hardware_interrupt_entry
|
||||
hardware_interrupt_common:
|
||||
EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
|
||||
FINISH_NAP
|
||||
hardware_interrupt_entry:
|
||||
DISABLE_INTS
|
||||
bl .ppc64_runlatch_on
|
||||
|
@ -1041,6 +1079,15 @@ hardware_interrupt_entry:
|
|||
bl .do_IRQ
|
||||
b .ret_from_except_lite
|
||||
|
||||
#ifdef CONFIG_PPC_970_NAP
|
||||
power4_fixup_nap:
|
||||
andc r9,r9,r10
|
||||
std r9,TI_LOCAL_FLAGS(r11)
|
||||
ld r10,_LINK(r1) /* make idle task do the */
|
||||
std r10,_NIP(r1) /* equivalent of a blr */
|
||||
blr
|
||||
#endif
|
||||
|
||||
.align 7
|
||||
.globl alignment_common
|
||||
alignment_common:
|
||||
|
|
|
@ -50,9 +50,9 @@ void cpu_idle(void)
|
|||
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
while (1) {
|
||||
ppc64_runlatch_off();
|
||||
|
||||
while (!need_resched() && !cpu_should_die()) {
|
||||
ppc64_runlatch_off();
|
||||
|
||||
if (ppc_md.power_save) {
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
/*
|
||||
|
|
|
@ -22,8 +22,6 @@
|
|||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
|
@ -109,12 +107,6 @@ BEGIN_FTR_SECTION
|
|||
dcbf 0,r4
|
||||
dcbf 0,r4
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
|
||||
#ifdef DEBUG
|
||||
lis r6,nap_enter_count@ha
|
||||
lwz r4,nap_enter_count@l(r6)
|
||||
addi r4,r4,1
|
||||
stw r4,nap_enter_count@l(r6)
|
||||
#endif
|
||||
2:
|
||||
BEGIN_FTR_SECTION
|
||||
/* Go to low speed mode on some 750FX */
|
||||
|
@ -144,48 +136,42 @@ BEGIN_FTR_SECTION
|
|||
DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */
|
||||
lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
|
||||
ori r8,r8,_TLF_NAPPING /* so when we take an exception */
|
||||
stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
|
||||
mfmsr r7
|
||||
ori r7,r7,MSR_EE
|
||||
oris r7,r7,MSR_POW@h
|
||||
sync
|
||||
isync
|
||||
1: sync
|
||||
mtmsr r7
|
||||
isync
|
||||
sync
|
||||
blr
|
||||
|
||||
b 1b
|
||||
|
||||
/*
|
||||
* Return from NAP/DOZE mode, restore some CPU specific registers,
|
||||
* we are called with DR/IR still off and r2 containing physical
|
||||
* address of current.
|
||||
* address of current. R11 points to the exception frame (physical
|
||||
* address). We have to preserve r10.
|
||||
*/
|
||||
_GLOBAL(power_save_6xx_restore)
|
||||
mfspr r11,SPRN_HID0
|
||||
rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
|
||||
cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
|
||||
BEGIN_FTR_SECTION
|
||||
rlwinm r11,r11,0,9,7 /* Clear DOZE */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
|
||||
mtspr SPRN_HID0, r11
|
||||
lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
|
||||
stw r9,_NIP(r11) /* make it do a blr */
|
||||
|
||||
#ifdef DEBUG
|
||||
beq cr1,1f
|
||||
lis r11,(nap_return_count-KERNELBASE)@ha
|
||||
lwz r9,nap_return_count@l(r11)
|
||||
addi r9,r9,1
|
||||
stw r9,nap_return_count@l(r11)
|
||||
1:
|
||||
#endif
|
||||
|
||||
rlwinm r9,r1,0,0,18
|
||||
tophys(r9,r9)
|
||||
lwz r11,TI_CPU(r9)
|
||||
#ifdef CONFIG_SMP
|
||||
mfspr r12,SPRN_SPRG3
|
||||
lwz r11,TI_CPU(r12) /* get cpu number * 4 */
|
||||
slwi r11,r11,2
|
||||
#else
|
||||
li r11,0
|
||||
#endif
|
||||
/* Todo make sure all these are in the same page
|
||||
* and load r22 (@ha part + CPU offset) only once
|
||||
* and load r11 (@ha part + CPU offset) only once
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
beq cr1,1f
|
||||
mfspr r9,SPRN_HID0
|
||||
andis. r9,r9,HID0_NAP@h
|
||||
beq 1f
|
||||
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
|
||||
lwz r9,nap_save_msscr0@l(r9)
|
||||
mtspr SPRN_MSSCR0, r9
|
||||
|
@ -210,10 +196,3 @@ _GLOBAL(nap_save_hid1)
|
|||
|
||||
_GLOBAL(powersave_lowspeed)
|
||||
.long 0
|
||||
|
||||
#ifdef DEBUG
|
||||
_GLOBAL(nap_enter_count)
|
||||
.space 4
|
||||
_GLOBAL(nap_return_count)
|
||||
.space 4
|
||||
#endif
|
||||
|
|
|
@ -35,12 +35,16 @@ BEGIN_FTR_SECTION
|
|||
DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
clrrdi r9,r1,THREAD_SHIFT /* current thread_info */
|
||||
ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
|
||||
ori r8,r8,_TLF_NAPPING /* so when we take an exception */
|
||||
std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
|
||||
mfmsr r7
|
||||
ori r7,r7,MSR_EE
|
||||
oris r7,r7,MSR_POW@h
|
||||
sync
|
||||
1: sync
|
||||
isync
|
||||
mtmsrd r7
|
||||
isync
|
||||
sync
|
||||
blr
|
||||
b 1b
|
||||
|
||||
|
|
|
@ -272,18 +272,26 @@ unsigned int virt_irq_to_real_map[NR_IRQS];
|
|||
* Don't use virtual irqs 0, 1, 2 for devices.
|
||||
* The pcnet32 driver considers interrupt numbers < 2 to be invalid,
|
||||
* and 2 is the XICS IPI interrupt.
|
||||
* We limit virtual irqs to 17 less than NR_IRQS so that when we
|
||||
* offset them by 16 (to reserve the first 16 for ISA interrupts)
|
||||
* we don't end up with an interrupt number >= NR_IRQS.
|
||||
* We limit virtual irqs to __irq_offet_value less than virt_irq_max so
|
||||
* that when we offset them we don't end up with an interrupt
|
||||
* number >= virt_irq_max.
|
||||
*/
|
||||
#define MIN_VIRT_IRQ 3
|
||||
#define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1)
|
||||
#define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1)
|
||||
|
||||
unsigned int virt_irq_max;
|
||||
static unsigned int max_virt_irq;
|
||||
static unsigned int nr_virt_irqs;
|
||||
|
||||
void
|
||||
virt_irq_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1)))
|
||||
virt_irq_max = NR_IRQS - 1;
|
||||
max_virt_irq = virt_irq_max - __irq_offset_value;
|
||||
nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1;
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
virt_irq_to_real_map[i] = UNDEFINED_IRQ;
|
||||
}
|
||||
|
@ -308,17 +316,17 @@ int virt_irq_create_mapping(unsigned int real_irq)
|
|||
return real_irq;
|
||||
}
|
||||
|
||||
/* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */
|
||||
/* map to a number between MIN_VIRT_IRQ and max_virt_irq */
|
||||
virq = real_irq;
|
||||
if (virq > MAX_VIRT_IRQ)
|
||||
virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
|
||||
if (virq > max_virt_irq)
|
||||
virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
|
||||
|
||||
/* search for this number or a free slot */
|
||||
first_virq = virq;
|
||||
while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
|
||||
if (virt_irq_to_real_map[virq] == real_irq)
|
||||
return virq;
|
||||
if (++virq > MAX_VIRT_IRQ)
|
||||
if (++virq > max_virt_irq)
|
||||
virq = MIN_VIRT_IRQ;
|
||||
if (virq == first_virq)
|
||||
goto nospace; /* oops, no free slots */
|
||||
|
@ -330,8 +338,8 @@ int virt_irq_create_mapping(unsigned int real_irq)
|
|||
nospace:
|
||||
if (!warned) {
|
||||
printk(KERN_CRIT "Interrupt table is full\n");
|
||||
printk(KERN_CRIT "Increase NR_IRQS (currently %d) "
|
||||
"in your kernel sources and rebuild.\n", NR_IRQS);
|
||||
printk(KERN_CRIT "Increase virt_irq_max (currently %d) "
|
||||
"in your kernel sources and rebuild.\n", virt_irq_max);
|
||||
warned = 1;
|
||||
}
|
||||
return NO_IRQ;
|
||||
|
@ -349,8 +357,8 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
|
|||
|
||||
virq = real_irq;
|
||||
|
||||
if (virq > MAX_VIRT_IRQ)
|
||||
virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
|
||||
if (virq > max_virt_irq)
|
||||
virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
|
||||
|
||||
first_virq = virq;
|
||||
|
||||
|
@ -360,7 +368,7 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
|
|||
|
||||
virq++;
|
||||
|
||||
if (virq >= MAX_VIRT_IRQ)
|
||||
if (virq >= max_virt_irq)
|
||||
virq = 0;
|
||||
|
||||
} while (first_virq != virq);
|
||||
|
|
|
@ -1528,12 +1528,11 @@ static int __init prom_find_machine_type(void)
|
|||
* non-IBM designs !
|
||||
* - it has /rtas
|
||||
*/
|
||||
len = prom_getprop(_prom->root, "model",
|
||||
len = prom_getprop(_prom->root, "device_type",
|
||||
compat, sizeof(compat)-1);
|
||||
if (len <= 0)
|
||||
return PLATFORM_GENERIC;
|
||||
compat[len] = 0;
|
||||
if (strcmp(compat, "chrp"))
|
||||
if (strncmp(compat, RELOC("chrp"), 4))
|
||||
return PLATFORM_GENERIC;
|
||||
|
||||
/* Default to pSeries. We need to know if we are running LPAR */
|
||||
|
|
|
@ -258,11 +258,11 @@ static int __init proc_rtas_init(void)
|
|||
struct proc_dir_entry *entry;
|
||||
|
||||
if (!machine_is(pseries))
|
||||
return 1;
|
||||
return -ENODEV;
|
||||
|
||||
rtas_node = of_find_node_by_name(NULL, "rtas");
|
||||
if (rtas_node == NULL)
|
||||
return 1;
|
||||
return -ENODEV;
|
||||
|
||||
entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL);
|
||||
if (entry)
|
||||
|
|
|
@ -1297,7 +1297,7 @@ static inline void setup_decr(struct spu_state *csa, struct spu *spu)
|
|||
cycles_t resume_time = get_cycles();
|
||||
cycles_t delta_time = resume_time - csa->suspend_time;
|
||||
|
||||
csa->lscsa->decr.slot[0] = delta_time;
|
||||
csa->lscsa->decr.slot[0] -= delta_time;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,3 +9,4 @@ extern long chrp_time_init(void);
|
|||
|
||||
extern void chrp_find_bridges(void);
|
||||
extern void chrp_event_scan(unsigned long);
|
||||
extern void chrp_pcibios_fixup(void);
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <asm/grackle.h>
|
||||
#include <asm/rtas.h>
|
||||
|
||||
#include "chrp.h"
|
||||
|
||||
/* LongTrail */
|
||||
void __iomem *gg2_pci_config_base;
|
||||
|
||||
|
@ -314,6 +316,6 @@ chrp_find_bridges(void)
|
|||
}
|
||||
|
||||
/* Do not fixup interrupts from OF tree on pegasos */
|
||||
if (is_pegasos == 0)
|
||||
ppc_md.pcibios_fixup = chrp_pcibios_fixup;
|
||||
if (is_pegasos)
|
||||
ppc_md.pcibios_fixup = NULL;
|
||||
}
|
||||
|
|
|
@ -440,8 +440,6 @@ void __init chrp_init_IRQ(void)
|
|||
|
||||
if (_chrp_type == _CHRP_Pegasos)
|
||||
ppc_md.get_irq = i8259_irq;
|
||||
else
|
||||
ppc_md.get_irq = mpic_get_irq;
|
||||
|
||||
#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
|
||||
/* see if there is a keyboard in the device tree
|
||||
|
@ -528,26 +526,24 @@ static int __init chrp_probe(void)
|
|||
/* Assume we have an 8259... */
|
||||
__irq_offset_value = NUM_ISA_INTERRUPTS;
|
||||
|
||||
ppc_md.setup_arch = chrp_setup_arch;
|
||||
ppc_md.show_cpuinfo = chrp_show_cpuinfo;
|
||||
|
||||
ppc_md.init_IRQ = chrp_init_IRQ;
|
||||
ppc_md.init = chrp_init2;
|
||||
|
||||
ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
|
||||
|
||||
ppc_md.restart = rtas_restart;
|
||||
ppc_md.power_off = rtas_power_off;
|
||||
ppc_md.halt = rtas_halt;
|
||||
|
||||
ppc_md.time_init = chrp_time_init;
|
||||
ppc_md.calibrate_decr = generic_calibrate_decr;
|
||||
|
||||
/* this may get overridden with rtas routines later... */
|
||||
ppc_md.set_rtc_time = chrp_set_rtc_time;
|
||||
ppc_md.get_rtc_time = chrp_get_rtc_time;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
smp_ops = &chrp_smp_ops;
|
||||
#endif /* CONFIG_SMP */
|
||||
return 1;
|
||||
}
|
||||
|
||||
define_machine(chrp) {
|
||||
.name = "CHRP",
|
||||
.probe = chrp_probe,
|
||||
.setup_arch = chrp_setup_arch,
|
||||
.init = chrp_init2,
|
||||
.show_cpuinfo = chrp_show_cpuinfo,
|
||||
.init_IRQ = chrp_init_IRQ,
|
||||
.get_irq = mpic_get_irq,
|
||||
.pcibios_fixup = chrp_pcibios_fixup,
|
||||
.restart = rtas_restart,
|
||||
.power_off = rtas_power_off,
|
||||
.halt = rtas_halt,
|
||||
.time_init = chrp_time_init,
|
||||
.set_rtc_time = chrp_set_rtc_time,
|
||||
.get_rtc_time = chrp_get_rtc_time,
|
||||
.calibrate_decr = generic_calibrate_decr,
|
||||
.phys_mem_access_prot = pci_phys_mem_access_prot,
|
||||
};
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include <asm/iseries/hv_lp_event.h>
|
||||
#include <asm/iseries/lpar_map.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
#include "naca.h"
|
||||
#include "setup.h"
|
||||
|
@ -684,6 +685,12 @@ static int __init iseries_probe(void)
|
|||
powerpc_firmware_features |= FW_FEATURE_ISERIES;
|
||||
powerpc_firmware_features |= FW_FEATURE_LPAR;
|
||||
|
||||
/*
|
||||
* The Hypervisor only allows us up to 256 interrupt
|
||||
* sources (the irq number is passed in a u8).
|
||||
*/
|
||||
virt_irq_max = 255;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -957,8 +957,10 @@ static void eeh_remove_device(struct pci_dev *dev)
|
|||
pci_addr_cache_remove_device(dev);
|
||||
|
||||
dn = pci_device_to_OF_node(dev);
|
||||
PCI_DN(dn)->pcidev = NULL;
|
||||
pci_dev_put (dev);
|
||||
if (PCI_DN(dn)->pcidev) {
|
||||
PCI_DN(dn)->pcidev = NULL;
|
||||
pci_dev_put (dev);
|
||||
}
|
||||
}
|
||||
|
||||
void eeh_remove_bus_device(struct pci_dev *dev)
|
||||
|
|
|
@ -488,7 +488,7 @@ static int __init rtas_init(void)
|
|||
/* No RTAS */
|
||||
if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) {
|
||||
printk(KERN_INFO "rtasd: no event-scan on system\n");
|
||||
return 1;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL);
|
||||
|
|
|
@ -156,12 +156,13 @@ void platform_notify_map(const struct platform_notify_dev_map *map,
|
|||
while (map->bus_id != NULL) {
|
||||
idx = -1;
|
||||
s = strrchr(dev->bus_id, '.');
|
||||
if (s != NULL)
|
||||
if (s != NULL) {
|
||||
idx = (int)simple_strtol(s + 1, NULL, 10);
|
||||
else
|
||||
len = s - dev->bus_id;
|
||||
} else {
|
||||
s = dev->bus_id;
|
||||
|
||||
len = s - dev->bus_id;
|
||||
len = strlen(dev->bus_id);
|
||||
}
|
||||
|
||||
if (!strncmp(dev->bus_id, map->bus_id, len)) {
|
||||
pdev = container_of(dev, struct platform_device, dev);
|
||||
|
|
|
@ -54,6 +54,13 @@
|
|||
*/
|
||||
extern unsigned int virt_irq_to_real_map[NR_IRQS];
|
||||
|
||||
/* The maximum virtual IRQ number that we support. This
|
||||
* can be set by the platform and will be reduced by the
|
||||
* value of __irq_offset_value. It defaults to and is
|
||||
* capped by (NR_IRQS - 1).
|
||||
*/
|
||||
extern unsigned int virt_irq_max;
|
||||
|
||||
/* Create a mapping for a real_irq if it doesn't already exist.
|
||||
* Return the virtual irq as a convenience.
|
||||
*/
|
||||
|
|
|
@ -37,6 +37,8 @@ struct thread_info {
|
|||
int preempt_count; /* 0 => preemptable,
|
||||
<0 => BUG */
|
||||
struct restart_block restart_block;
|
||||
unsigned long local_flags; /* private flags for thread */
|
||||
|
||||
/* low level flags - has atomic operations done on it */
|
||||
unsigned long flags ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
@ -143,6 +145,12 @@ static inline struct thread_info *current_thread_info(void)
|
|||
_TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
|
||||
|
||||
/* Bits in local_flags */
|
||||
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
|
||||
#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
|
||||
|
||||
#define _TLF_NAPPING (1 << TLF_NAPPING)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_POWERPC_THREAD_INFO_H */
|
||||
|
|
Loading…
Reference in New Issue