[MIPS] SMP: Call platform methods via ops structure.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Ralf Baechle 2007-11-19 12:23:51 +00:00
parent 19388fb092
commit 87353d8ac3
27 changed files with 714 additions and 518 deletions

View File

@ -1441,6 +1441,7 @@ config MIPS_MT_SMP
select SMP select SMP
select SYS_SUPPORTS_SCHED_SMT if SMP select SYS_SUPPORTS_SCHED_SMT if SMP
select SYS_SUPPORTS_SMP select SYS_SUPPORTS_SMP
select SMP_UP
help help
This is a kernel model which is also known a VSMP or lately This is a kernel model which is also known a VSMP or lately
has been marketesed into SMVP. has been marketesed into SMVP.
@ -1457,6 +1458,7 @@ config MIPS_MT_SMTC
select NR_CPUS_DEFAULT_8 select NR_CPUS_DEFAULT_8
select SMP select SMP
select SYS_SUPPORTS_SMP select SYS_SUPPORTS_SMP
select SMP_UP
help help
This is a kernel model which is known a SMTC or lately has been This is a kernel model which is known a SMTC or lately has been
marketesed into SMVP. marketesed into SMVP.
@ -1735,6 +1737,9 @@ config SMP
If you don't know what to do here, say N. If you don't know what to do here, say N.
config SMP_UP
bool
config SYS_SUPPORTS_SMP config SYS_SUPPORTS_SMP
bool bool

View File

@ -12,6 +12,7 @@
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/sgialib.h> #include <asm/sgialib.h>
#include <asm/smp-ops.h>
#undef DEBUG_PROM_INIT #undef DEBUG_PROM_INIT
@ -48,4 +49,11 @@ void __init prom_init(void)
ArcRead(0, &c, 1, &cnt); ArcRead(0, &c, 1, &cnt);
ArcEnterInteractiveMode(); ArcEnterInteractiveMode();
#endif #endif
#ifdef CONFIG_SGI_IP27
{
extern struct plat_smp_ops ip27_smp_ops;
register_smp_ops(&ip27_smp_ops);
}
#endif
} }

View File

@ -17,7 +17,6 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#include <asm/r4kcache.h> #include <asm/r4kcache.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>

View File

@ -29,6 +29,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/system.h> #include <asm/system.h>
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
@ -575,9 +576,7 @@ void __init setup_arch(char **cmdline_p)
arch_mem_init(cmdline_p); arch_mem_init(cmdline_p);
resource_init(); resource_init();
#ifdef CONFIG_SMP
plat_smp_setup(); plat_smp_setup();
#endif
} }
static int __init fpu_disable(char *s) static int __init fpu_disable(char *s)

View File

@ -215,12 +215,117 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
write_tc_c0_tchalt(TCHALT_H); write_tc_c0_tchalt(TCHALT_H);
} }
static void vsmp_send_ipi_single(int cpu, unsigned int action)
{
int i;
unsigned long flags;
int vpflags;
local_irq_save(flags);
vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
switch (action) {
case SMP_CALL_FUNCTION:
i = C_SW1;
break;
case SMP_RESCHEDULE_YOURSELF:
default:
i = C_SW0;
break;
}
/* 1:1 mapping of vpe and tc... */
settc(cpu);
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);
local_irq_restore(flags);
}
static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
vsmp_send_ipi_single(i, action);
}
static void __cpuinit vsmp_init_secondary(void)
{
/* Enable per-cpu interrupts */
/* This is Malta specific: IPI,performance and timer inetrrupts */
write_c0_status((read_c0_status() & ~ST0_IM ) |
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
}
static void __cpuinit vsmp_smp_finish(void)
{
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
static void vsmp_cpus_done(void)
{
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(cpu);
/* restart */
write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
/* enable the tc this vpe/cpu will be running */
write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
write_tc_c0_tchalt(0);
/* enable the VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
/* stack pointer */
write_tc_gpr_sp( __KSTK_TOS(idle));
/* global pointer */
write_tc_gpr_gp((unsigned long)gp);
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(EVPE_ENABLE);
}
/* /*
* Common setup before any secondaries are started * Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the * Make sure all CPU's are in a sensible state before we boot any of the
* secondarys * secondarys
*/ */
void __init plat_smp_setup(void) static void __init vsmp_smp_setup(void)
{ {
unsigned int mvpconf0, ntc, tc, ncpu = 0; unsigned int mvpconf0, ntc, tc, ncpu = 0;
unsigned int nvpe; unsigned int nvpe;
@ -263,7 +368,7 @@ void __init plat_smp_setup(void)
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
} }
void __init plat_prepare_cpus(unsigned int max_cpus) static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{ {
mips_mt_set_cpuoptions(); mips_mt_set_cpuoptions();
@ -283,99 +388,13 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
} }
/* struct plat_smp_ops vsmp_smp_ops = {
* Setup the PC, SP, and GP of a secondary processor and start it .send_ipi_single = vsmp_send_ipi_single,
* running! .send_ipi_mask = vsmp_send_ipi_mask,
* smp_bootstrap is the place to resume from .init_secondary = vsmp_init_secondary,
* __KSTK_TOS(idle) is apparently the stack pointer .smp_finish = vsmp_smp_finish,
* (unsigned long)idle->thread_info the gp .cpus_done = vsmp_cpus_done,
* assumes a 1:1 mapping of TC => VPE .boot_secondary = vsmp_boot_secondary,
*/ .smp_setup = vsmp_smp_setup,
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) .prepare_cpus = vsmp_prepare_cpus,
{ };
struct thread_info *gp = task_thread_info(idle);
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(cpu);
/* restart */
write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
/* enable the tc this vpe/cpu will be running */
write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
write_tc_c0_tchalt(0);
/* enable the VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
/* stack pointer */
write_tc_gpr_sp( __KSTK_TOS(idle));
/* global pointer */
write_tc_gpr_gp((unsigned long)gp);
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(EVPE_ENABLE);
}
void __cpuinit prom_init_secondary(void)
{
/* Enable per-cpu interrupts */
/* This is Malta specific: IPI,performance and timer inetrrupts */
write_c0_status((read_c0_status() & ~ST0_IM ) |
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
}
void __cpuinit prom_smp_finish(void)
{
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
void prom_cpus_done(void)
{
}
void core_send_ipi(int cpu, unsigned int action)
{
int i;
unsigned long flags;
int vpflags;
local_irq_save(flags);
vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
switch (action) {
case SMP_CALL_FUNCTION:
i = C_SW1;
break;
case SMP_RESCHEDULE_YOURSELF:
default:
i = C_SW0;
break;
}
/* 1:1 mapping of vpe and tc... */
settc(cpu);
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);
local_irq_restore(flags);
}

View File

@ -37,7 +37,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/time.h> #include <asm/time.h>
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
@ -84,6 +83,16 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(cpu, cpu_sibling_map[cpu]); cpu_set(cpu, cpu_sibling_map[cpu]);
} }
struct plat_smp_ops *mp_ops;
__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
{
if (ops)
printk(KERN_WARNING "Overriding previous set SMP ops\n");
mp_ops = ops;
}
/* /*
* First C code run on the secondary CPUs after being started up by * First C code run on the secondary CPUs after being started up by
* the master. * the master.
@ -100,7 +109,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_report(); cpu_report();
per_cpu_trap_init(); per_cpu_trap_init();
mips_clockevent_init(); mips_clockevent_init();
prom_init_secondary(); mp_ops->init_secondary();
/* /*
* XXX parity protection should be folded in here when it's converted * XXX parity protection should be folded in here when it's converted
@ -112,7 +121,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu = smp_processor_id(); cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy; cpu_data[cpu].udelay_val = loops_per_jiffy;
prom_smp_finish(); mp_ops->smp_finish();
set_cpu_sibling_map(cpu); set_cpu_sibling_map(cpu);
cpu_set(cpu, cpu_callin_map); cpu_set(cpu, cpu_callin_map);
@ -184,7 +193,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
smp_mb(); smp_mb();
/* Send a message to all other CPUs and wait for them to respond */ /* Send a message to all other CPUs and wait for them to respond */
core_send_ipi_mask(mask, SMP_CALL_FUNCTION); mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
/* Wait for response */ /* Wait for response */
/* FIXME: lock-up detection, backtrace on lock-up */ /* FIXME: lock-up detection, backtrace on lock-up */
@ -278,7 +287,7 @@ void smp_send_stop(void)
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
prom_cpus_done(); mp_ops->cpus_done();
} }
/* called from main before smp_init() */ /* called from main before smp_init() */
@ -286,7 +295,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
init_new_context(current, &init_mm); init_new_context(current, &init_mm);
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
plat_prepare_cpus(max_cpus); mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0); set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU #ifndef CONFIG_HOTPLUG_CPU
cpu_present_map = cpu_possible_map; cpu_present_map = cpu_possible_map;
@ -325,7 +334,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
if (IS_ERR(idle)) if (IS_ERR(idle))
panic(KERN_ERR "Fork failed for CPU %d", cpu); panic(KERN_ERR "Fork failed for CPU %d", cpu);
prom_boot_secondary(cpu, idle); mp_ops->boot_secondary(cpu, idle);
/* /*
* Trust is futile. We should really have timeouts ... * Trust is futile. We should really have timeouts ...

View File

@ -14,7 +14,6 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>

View File

@ -16,7 +16,6 @@
#include <asm/hazards.h> #include <asm/hazards.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/time.h> #include <asm/time.h>

View File

@ -250,6 +250,8 @@ void __init mips_ejtag_setup(void)
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
} }
extern struct plat_smp_ops msmtc_smp_ops;
void __init prom_init(void) void __init prom_init(void)
{ {
prom_argc = fw_arg0; prom_argc = fw_arg0;
@ -416,4 +418,10 @@ void __init prom_init(void)
#ifdef CONFIG_SERIAL_8250_CONSOLE #ifdef CONFIG_SERIAL_8250_CONSOLE
console_config(); console_config();
#endif #endif
#ifdef CONFIG_MIPS_MT_SMP
register_smp_ops(&vsmp_smp_ops);
#endif
#ifdef CONFIG_MIPS_MT_SMTC
register_smp_ops(&msmtc_smp_ops);
#endif
} }

View File

@ -15,26 +15,24 @@
* Cause the specified action to be performed on a targeted "CPU" * Cause the specified action to be performed on a targeted "CPU"
*/ */
void core_send_ipi(int cpu, unsigned int action) static void msmtc_send_ipi_single(int cpu, unsigned int action)
{ {
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
smtc_send_ipi(cpu, LINUX_SMP_IPI, action); smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
} }
/* static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
* Platform "CPU" startup hook
*/
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{ {
smtc_boot_secondary(cpu, idle); unsigned int i;
for_each_cpu_mask(i, mask)
msmtc_send_ipi_single(i, action);
} }
/* /*
* Post-config but pre-boot cleanup entry point * Post-config but pre-boot cleanup entry point
*/ */
static void __cpuinit msmtc_init_secondary(void)
void __cpuinit prom_init_secondary(void)
{ {
void smtc_init_secondary(void); void smtc_init_secondary(void);
int myvpe; int myvpe;
@ -54,29 +52,17 @@ void __cpuinit prom_init_secondary(void)
} }
/* /*
* Platform SMP pre-initialization * Platform "CPU" startup hook
*
* As noted above, we can assume a single CPU for now
* but it may be multithreaded.
*/ */
static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
void __cpuinit plat_smp_setup(void)
{ {
if (read_c0_config3() & (1<<2)) smtc_boot_secondary(cpu, idle);
mipsmt_build_cpu_map(0);
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
if (read_c0_config3() & (1<<2))
mipsmt_prepare_cpus();
} }
/* /*
* SMP initialization finalization entry point * SMP initialization finalization entry point
*/ */
static void __cpuinit msmtc_smp_finish(void)
void __cpuinit prom_smp_finish(void)
{ {
smtc_smp_finish(); smtc_smp_finish();
} }
@ -85,10 +71,38 @@ void __cpuinit prom_smp_finish(void)
* Hook for after all CPUs are online * Hook for after all CPUs are online
*/ */
void prom_cpus_done(void) static void msmtc_cpus_done(void)
{ {
} }
/*
* Platform SMP pre-initialization
*
* As noted above, we can assume a single CPU for now
* but it may be multithreaded.
*/
static void __init msmtc_smp_setup(void)
{
mipsmt_build_cpu_map(0);
}
static void __init msmtc_prepare_cpus(unsigned int max_cpus)
{
mipsmt_prepare_cpus();
}
struct plat_smp_ops msmtc_smp_ops = {
.send_ipi_single = msmtc_send_ipi_single,
.send_ipi_mask = msmtc_send_ipi_mask,
.init_secondary = msmtc_init_secondary,
.smp_finish = msmtc_smp_finish,
.cpus_done = msmtc_cpus_done,
.boot_secondary = msmtc_boot_secondary,
.smp_setup = msmtc_smp_setup,
.prepare_cpus = msmtc_prepare_cpus,
};
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
/* /*
* IRQ affinity hook * IRQ affinity hook

View File

@ -21,6 +21,6 @@ obj-y := sim_platform.o sim_setup.o sim_mem.o sim_time.o sim_int.o \
sim_cmdline.o sim_cmdline.o
obj-$(CONFIG_EARLY_PRINTK) += sim_console.o obj-$(CONFIG_EARLY_PRINTK) += sim_console.o
obj-$(CONFIG_SMP) += sim_smp.o obj-$(CONFIG_MIPS_MT_SMTC) += sim_smtc.o
EXTRA_CFLAGS += -Werror EXTRA_CFLAGS += -Werror

View File

@ -60,6 +60,8 @@ void __init plat_mem_setup(void)
#endif #endif
} }
extern struct plat_smp_ops ssmtc_smp_ops;
void __init prom_init(void) void __init prom_init(void)
{ {
set_io_port_base(0xbfd00000); set_io_port_base(0xbfd00000);
@ -67,8 +69,20 @@ void __init prom_init(void)
pr_info("\nLINUX started...\n"); pr_info("\nLINUX started...\n");
prom_init_cmdline(); prom_init_cmdline();
prom_meminit(); prom_meminit();
}
#ifdef CONFIG_MIPS_MT_SMP
if (cpu_has_mipsmt)
register_smp_ops(&vsmp_smp_ops);
else
register_smp_ops(&up_smp_ops);
#endif
#ifdef CONFIG_MIPS_MT_SMTC
if (cpu_has_mipsmt)
register_smp_ops(&ssmtc_smp_ops);
else
register_smp_ops(&up_smp_ops);
#endif
}
static void __init serial_init(void) static void __init serial_init(void)
{ {

View File

@ -16,7 +16,7 @@
* *
*/ */
/* /*
* Simulator Platform-specific hooks for SMP operation * Simulator Platform-specific hooks for SMTC operation
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
@ -29,65 +29,72 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/smtc_ipi.h> #include <asm/smtc_ipi.h>
#endif /* CONFIG_MIPS_MT_SMTC */
/* VPE/SMP Prototype implements platform interfaces directly */ /* VPE/SMP Prototype implements platform interfaces directly */
#if !defined(CONFIG_MIPS_MT_SMP)
/* /*
* Cause the specified action to be performed on a targeted "CPU" * Cause the specified action to be performed on a targeted "CPU"
*/ */
void core_send_ipi(int cpu, unsigned int action) static void ssmtc_send_ipi_single(int cpu, unsigned int action)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
smtc_send_ipi(cpu, LINUX_SMP_IPI, action); smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
#endif /* CONFIG_MIPS_MT_SMTC */ /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
} }
/* static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
* Platform "CPU" startup hook
*/
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{ {
#ifdef CONFIG_MIPS_MT_SMTC unsigned int i;
smtc_boot_secondary(cpu, idle);
#endif /* CONFIG_MIPS_MT_SMTC */ for_each_cpu_mask(i, mask)
ssmtc_send_ipi_single(i, action);
} }
/* /*
* Post-config but pre-boot cleanup entry point * Post-config but pre-boot cleanup entry point
*/ */
static void __cpuinit ssmtc_init_secondary(void)
void __cpuinit prom_init_secondary(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_init_secondary(void); void smtc_init_secondary(void);
smtc_init_secondary(); smtc_init_secondary();
#endif /* CONFIG_MIPS_MT_SMTC */
} }
void plat_smp_setup(void) /*
* SMP initialization finalization entry point
*/
static void __cpuinit ssmtc_smp_finish(void)
{
smtc_smp_finish();
}
/*
* Hook for after all CPUs are online
*/
static void ssmtc_cpus_done(void)
{
}
/*
* Platform "CPU" startup hook
*/
static void __cpuinit ssmtc_boot_secondary(int cpu, struct task_struct *idle)
{
smtc_boot_secondary(cpu, idle);
}
static void __init ssmtc_smp_setup(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
if (read_c0_config3() & (1 << 2)) if (read_c0_config3() & (1 << 2))
mipsmt_build_cpu_map(0); mipsmt_build_cpu_map(0);
#endif /* CONFIG_MIPS_MT_SMTC */
} }
/* /*
* Platform SMP pre-initialization * Platform SMP pre-initialization
*/ */
static void ssmtc_prepare_cpus(unsigned int max_cpus)
void plat_prepare_cpus(unsigned int max_cpus)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
/* /*
* As noted above, we can assume a single CPU for now * As noted above, we can assume a single CPU for now
* but it may be multithreaded. * but it may be multithreaded.
@ -96,28 +103,15 @@ void plat_prepare_cpus(unsigned int max_cpus)
if (read_c0_config3() & (1 << 2)) { if (read_c0_config3() & (1 << 2)) {
mipsmt_prepare_cpus(); mipsmt_prepare_cpus();
} }
#endif /* CONFIG_MIPS_MT_SMTC */
} }
/* struct plat_smp_ops ssmtc_smp_ops = {
* SMP initialization finalization entry point .send_ipi_single = ssmtc_send_ipi_single,
*/ .send_ipi_mask = ssmtc_send_ipi_mask,
.init_secondary = ssmtc_init_secondary,
void __cpuinit prom_smp_finish(void) .smp_finish = ssmtc_smp_finish,
{ .cpus_done = ssmtc_cpus_done,
#ifdef CONFIG_MIPS_MT_SMTC .boot_secondary = ssmtc_boot_secondary,
smtc_smp_finish(); .smp_setup = ssmtc_smp_setup,
#endif /* CONFIG_MIPS_MT_SMTC */ .prepare_cpus = ssmtc_prepare_cpus,
} };
/*
* Hook for after all CPUs are online
*/
void prom_cpus_done(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
#endif /* CONFIG_MIPS_MT_SMTC */
}
#endif /* CONFIG_MIPS32R2_MT_SMP */

View File

@ -19,6 +19,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/smp-ops.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/pmon.h> #include <asm/pmon.h>
@ -78,6 +79,8 @@ static void prom_halt(void)
__asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0"); __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0");
} }
extern struct plat_smp_ops yos_smp_ops;
/* /*
* Init routine which accepts the variables from PMON * Init routine which accepts the variables from PMON
*/ */
@ -127,6 +130,8 @@ void __init prom_init(void)
} }
prom_grab_secondary(); prom_grab_secondary();
register_smp_ops(&yos_smp_ops);
} }
void __init prom_free_prom_memory(void) void __init prom_free_prom_memory(void)

View File

@ -42,70 +42,6 @@ void __init prom_grab_secondary(void)
launchstack + LAUNCHSTACK_SIZE, 0); launchstack + LAUNCHSTACK_SIZE, 0);
} }
/*
* Detect available CPUs, populate phys_cpu_present_map before smp_init
*
* We don't want to start the secondary CPU yet nor do we have a nice probing
* feature in PMON so we just assume presence of the secondary core.
*/
void __init plat_smp_setup(void)
{
int i;
cpus_clear(phys_cpu_present_map);
for (i = 0; i < 2; i++) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
/*
* Be paranoid. Enable the IPI only if we're really about to go SMP.
*/
if (cpus_weight(cpu_possible_map))
set_c0_status(STATUSF_IP5);
}
/*
* Firmware CPU startup hook
* Complicated by PMON's weird interface which tries to minimic the UNIX fork.
* It launches the next * available CPU and copies some information on the
* stack so the first thing we do is throw away that stuff and load useful
* values into the registers ...
*/
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long) task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
secondary_sp = sp;
secondary_gp = gp;
spin_unlock(&launch_lock);
}
/* Hook for after all CPUs are online */
void prom_cpus_done(void)
{
}
/*
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
void __cpuinit prom_init_secondary(void)
{
set_c0_status(ST0_CO | ST0_IE | ST0_IM);
}
void __cpuinit prom_smp_finish(void)
{
}
void titan_mailbox_irq(void) void titan_mailbox_irq(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
@ -133,7 +69,7 @@ void titan_mailbox_irq(void)
/* /*
* Send inter-processor interrupt * Send inter-processor interrupt
*/ */
void core_send_ipi(int cpu, unsigned int action) static void yos_send_ipi_single(int cpu, unsigned int action)
{ {
/* /*
* Generate an INTMSG so that it can be sent over to the * Generate an INTMSG so that it can be sent over to the
@ -159,3 +95,86 @@ void core_send_ipi(int cpu, unsigned int action)
break; break;
} }
} }
static void yos_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
yos_send_ipi_single(i, action);
}
/*
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
static void __cpuinit yos_init_secondary(void)
{
set_c0_status(ST0_CO | ST0_IE | ST0_IM);
}
static void __cpuinit yos_smp_finish(void)
{
}
/* Hook for after all CPUs are online */
static void yos_cpus_done(void)
{
}
/*
* Firmware CPU startup hook
* Complicated by PMON's weird interface which tries to minimic the UNIX fork.
* It launches the next * available CPU and copies some information on the
* stack so the first thing we do is throw away that stuff and load useful
* values into the registers ...
*/
static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long) task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
secondary_sp = sp;
secondary_gp = gp;
spin_unlock(&launch_lock);
}
/*
* Detect available CPUs, populate phys_cpu_present_map before smp_init
*
* We don't want to start the secondary CPU yet nor do we have a nice probing
* feature in PMON so we just assume presence of the secondary core.
*/
static void __init yos_smp_setup(void)
{
int i;
cpus_clear(phys_cpu_present_map);
for (i = 0; i < 2; i++) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
}
static void __init yos_prepare_cpus(unsigned int max_cpus)
{
/*
* Be paranoid. Enable the IPI only if we're really about to go SMP.
*/
if (cpus_weight(cpu_possible_map))
set_c0_status(STATUSF_IP5);
}
struct plat_smp_ops yos_smp_ops = {
.send_ipi_single = yos_send_ipi_single,
.send_ipi_mask = yos_send_ipi_mask,
.init_secondary = yos_init_secondary,
.smp_finish = yos_smp_finish,
.cpus_done = yos_cpus_done,
.boot_secondary = yos_boot_secondary,
.smp_setup = yos_smp_setup,
.prepare_cpus = yos_prepare_cpus,
};

View File

@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org)
* *
* Symmetric Uniprocessor (TM) Support * Symmetric Uniprocessor (TM) Support
*/ */
@ -13,43 +13,55 @@
/* /*
* Send inter-processor interrupt * Send inter-processor interrupt
*/ */
void core_send_ipi(int cpu, unsigned int action) void up_send_ipi_single(int cpu, unsigned int action)
{ {
panic(KERN_ERR "%s called", __FUNCTION__); panic(KERN_ERR "%s called", __func__);
}
static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
} }
/* /*
* After we've done initial boot, this function is called to allow the * After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed * board code to clean up state, if needed
*/ */
void __cpuinit prom_init_secondary(void) void __cpuinit up_init_secondary(void)
{ {
} }
void __cpuinit prom_smp_finish(void) void __cpuinit up_smp_finish(void)
{ {
} }
/* Hook for after all CPUs are online */ /* Hook for after all CPUs are online */
void prom_cpus_done(void) void up_cpus_done(void)
{ {
} }
void __init prom_prepare_cpus(unsigned int max_cpus)
{
cpus_clear(phys_cpu_present_map);
}
/* /*
* Firmware CPU startup hook * Firmware CPU startup hook
*/ */
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle)
{ {
} }
void __init plat_smp_setup(void) void __init up_smp_setup(void)
{ {
} }
void __init plat_prepare_cpus(unsigned int max_cpus)
void __init up_prepare_cpus(unsigned int max_cpus)
{ {
} }
struct plat_smp_ops up_smp_ops = {
.send_ipi_single = up_send_ipi_single,
.send_ipi_mask = up_send_ipi_mask,
.init_secondary = up_init_secondary,
.smp_finish = up_smp_finish,
.cpus_done = up_cpus_done,
.boot_secondary = up_boot_secondary,
.smp_setup = up_smp_setup,
.prepare_cpus = up_prepare_cpus,
};

View File

@ -27,7 +27,6 @@
#include <asm/sn/hub.h> #include <asm/sn/hub.h>
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/smp.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>

View File

@ -11,7 +11,6 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/smp.h>
#include <asm/sn/types.h> #include <asm/sn/types.h>
#include <asm/sn/arch.h> #include <asm/sn/arch.h>
#include <asm/sn/gda.h> #include <asm/sn/gda.h>

View File

@ -140,62 +140,7 @@ static __init void intr_clear_all(nasid_t nasid)
REMOTE_HUB_CLR_INTR(nasid, i); REMOTE_HUB_CLR_INTR(nasid, i);
} }
void __init plat_smp_setup(void) static void ip27_send_ipi_single(int destid, unsigned int action)
{
cnodeid_t cnode;
for_each_online_node(cnode) {
if (cnode == 0)
continue;
intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
}
replicate_kernel_text();
/*
* Assumption to be fixed: we're always booted on logical / physical
* processor 0. While we're always running on logical processor 0
* this still means this is physical processor zero; it might for
* example be disabled in the firwware.
*/
alloc_cpupda(0, 0);
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
/* We already did everything necessary earlier */
}
/*
* Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
* set sp to the kernel stack of the newly created idle process, gp to the proc
* struct so that current_thread_info() will work.
*/
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long)task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu),
(launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
0, (void *) sp, (void *) gp);
}
void __cpuinit prom_init_secondary(void)
{
per_cpu_init();
local_irq_enable();
}
void __init prom_cpus_done(void)
{
}
void __cpuinit prom_smp_finish(void)
{
}
void core_send_ipi(int destid, unsigned int action)
{ {
int irq; int irq;
@ -219,3 +164,77 @@ void core_send_ipi(int destid, unsigned int action)
*/ */
REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
} }
static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
ip27_send_ipi_single(i, action);
}
static void __cpuinit ip27_init_secondary(void)
{
per_cpu_init();
local_irq_enable();
}
static void __cpuinit ip27_smp_finish(void)
{
}
static void __init ip27_cpus_done(void)
{
}
/*
* Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
* set sp to the kernel stack of the newly created idle process, gp to the proc
* struct so that current_thread_info() will work.
*/
static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long)task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu),
(launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
0, (void *) sp, (void *) gp);
}
static void __init ip27_smp_setup(void)
{
cnodeid_t cnode;
for_each_online_node(cnode) {
if (cnode == 0)
continue;
intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
}
replicate_kernel_text();
/*
* Assumption to be fixed: we're always booted on logical / physical
* processor 0. While we're always running on logical processor 0
* this still means this is physical processor zero; it might for
* example be disabled in the firwware.
*/
alloc_cpupda(0, 0);
}
static void __init ip27_prepare_cpus(unsigned int max_cpus)
{
/* We already did everything necessary earlier */
}
struct plat_smp_ops ip27_smp_ops = {
.send_ipi_single = ip27_send_ipi_single,
.send_ipi_mask = ip27_send_ipi_mask,
.init_secondary = ip27_init_secondary,
.smp_finish = ip27_smp_finish,
.cpus_done = ip27_cpus_done,
.boot_secondary = ip27_boot_secondary,
.smp_setup = ip27_smp_setup,
.prepare_cpus = ip27_prepare_cpus,
};

View File

@ -23,6 +23,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250.h>
#include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h> #include <asm/sibyte/bcm1480_int.h>
@ -67,14 +68,6 @@ void __cpuinit bcm1480_smp_init(void)
change_c0_status(ST0_IM, imask); change_c0_status(ST0_IM, imask);
} }
void __cpuinit bcm1480_smp_finish(void)
{
extern void sb1480_clockevent_init(void);
sb1480_clockevent_init();
local_irq_enable();
}
/* /*
* These are routines for dealing with the sb1250 smp capabilities * These are routines for dealing with the sb1250 smp capabilities
* independent of board/firmware * independent of board/firmware
@ -84,11 +77,105 @@ void __cpuinit bcm1480_smp_finish(void)
* Simple enough; everything is set up, so just poke the appropriate mailbox * Simple enough; everything is set up, so just poke the appropriate mailbox
* register, and we should be set * register, and we should be set
*/ */
void core_send_ipi(int cpu, unsigned int action) static void bcm1480_send_ipi_single(int cpu, unsigned int action)
{ {
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
} }
static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
bcm1480_send_ipi_single(i, action);
}
/*
* Code to run on secondary just after probing the CPU
*/
static void __cpuinit bcm1480_init_secondary(void)
{
extern void bcm1480_smp_init(void);
bcm1480_smp_init();
}
/*
* Do any tidying up before marking online and running the idle
* loop
*/
static void __cpuinit bcm1480_smp_finish(void)
{
extern void sb1480_clockevent_init(void);
sb1480_clockevent_init();
local_irq_enable();
bcm1480_smp_finish();
}
/*
* Final cleanup after all secondaries booted
*/
static void bcm1480_cpus_done(void)
{
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
*/
static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
{
int retval;
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
(unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
}
/*
* Use CFE to find out how many CPUs are available, setting up
* phys_cpu_present_map and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
*/
static void __init bcm1480_smp_setup(void)
{
int i, num;
cpus_clear(phys_cpu_present_map);
cpu_set(0, phys_cpu_present_map);
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
static void __init bcm1480_prepare_cpus(unsigned int max_cpus)
{
}
struct plat_smp_ops bcm1480_smp_ops = {
.send_ipi_single = bcm1480_send_ipi_single,
.send_ipi_mask = bcm1480_send_ipi_mask,
.init_secondary = bcm1480_init_secondary,
.smp_finish = bcm1480_smp_finish,
.cpus_done = bcm1480_cpus_done,
.boot_secondary = bcm1480_boot_secondary,
.smp_setup = bcm1480_smp_setup,
.prepare_cpus = bcm1480_prepare_cpus,
};
void bcm1480_mailbox_interrupt(void) void bcm1480_mailbox_interrupt(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();

View File

@ -1,3 +1,2 @@
lib-y = setup.o lib-y = setup.o
lib-$(CONFIG_SMP) += smp.o
lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o

View File

@ -28,6 +28,7 @@
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/sibyte/board.h> #include <asm/sibyte/board.h>
#include <asm/smp-ops.h>
#include <asm/fw/cfe/cfe_api.h> #include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h> #include <asm/fw/cfe/cfe_error.h>
@ -232,6 +233,9 @@ static int __init initrd_setup(char *str)
#endif #endif
extern struct plat_smp_ops sb_smp_ops;
extern struct plat_smp_ops bcm1480_smp_ops;
/* /*
* prom_init is called just after the cpu type is determined, from setup_arch() * prom_init is called just after the cpu type is determined, from setup_arch()
*/ */
@ -340,6 +344,13 @@ void __init prom_init(void)
arcs_cmdline[CL_SIZE-1] = 0; arcs_cmdline[CL_SIZE-1] = 0;
prom_meminit(); prom_meminit();
#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
register_smp_ops(&sb_smp_ops);
#endif
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
register_smp_ops(&bcm1480_smp_ops);
#endif
} }
void __init prom_free_prom_memory(void) void __init prom_free_prom_memory(void)

View File

@ -1,110 +0,0 @@
/*
* Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/processor.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h>
/*
* Use CFE to find out how many CPUs are available, setting up
* phys_cpu_present_map and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
*/
void __init plat_smp_setup(void)
{
int i, num;
cpus_clear(phys_cpu_present_map);
cpu_set(0, phys_cpu_present_map);
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
*/
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{
int retval;
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
(unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
}
/*
* Code to run on secondary just after probing the CPU
*/
void __cpuinit prom_init_secondary(void)
{
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
extern void bcm1480_smp_init(void);
bcm1480_smp_init();
#elif defined(CONFIG_SIBYTE_SB1250)
extern void sb1250_smp_init(void);
sb1250_smp_init();
#else
#error invalid SMP configuration
#endif
}
/*
* Do any tidying up before marking online and running the idle
* loop
*/
void __cpuinit prom_smp_finish(void)
{
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
extern void bcm1480_smp_finish(void);
bcm1480_smp_finish();
#elif defined(CONFIG_SIBYTE_SB1250)
extern void sb1250_smp_finish(void);
sb1250_smp_finish();
#else
#error invalid SMP configuration
#endif
}
/*
* Final cleanup after all secondaries booted
*/
void prom_cpus_done(void)
{
}

View File

@ -24,6 +24,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_int.h>
@ -55,14 +56,6 @@ void __cpuinit sb1250_smp_init(void)
change_c0_status(ST0_IM, imask); change_c0_status(ST0_IM, imask);
} }
void __cpuinit sb1250_smp_finish(void)
{
extern void sb1250_clockevent_init(void);
sb1250_clockevent_init();
local_irq_enable();
}
/* /*
* These are routines for dealing with the sb1250 smp capabilities * These are routines for dealing with the sb1250 smp capabilities
* independent of board/firmware * independent of board/firmware
@ -72,11 +65,104 @@ void __cpuinit sb1250_smp_finish(void)
* Simple enough; everything is set up, so just poke the appropriate mailbox * Simple enough; everything is set up, so just poke the appropriate mailbox
* register, and we should be set * register, and we should be set
*/ */
void core_send_ipi(int cpu, unsigned int action) static void sb1250_send_ipi_single(int cpu, unsigned int action)
{ {
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]); __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
} }
static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
sb1250_send_ipi_single(i, action);
}
/*
* Code to run on secondary just after probing the CPU
*/
static void __cpuinit sb1250_init_secondary(void)
{
extern void sb1250_smp_init(void);
sb1250_smp_init();
}
/*
* Do any tidying up before marking online and running the idle
* loop
*/
static void __cpuinit sb1250_smp_finish(void)
{
extern void sb1250_clockevent_init(void);
sb1250_clockevent_init();
local_irq_enable();
}
/*
* Final cleanup after all secondaries booted
*/
static void sb1250_cpus_done(void)
{
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
*/
static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
{
int retval;
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
(unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
}
/*
* Use CFE to find out how many CPUs are available, setting up
* phys_cpu_present_map and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
*/
static void __init sb1250_smp_setup(void)
{
int i, num;
cpus_clear(phys_cpu_present_map);
cpu_set(0, phys_cpu_present_map);
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
static void __init sb1250_prepare_cpus(unsigned int max_cpus)
{
}
struct plat_smp_ops sb_smp_ops = {
.send_ipi_single = sb1250_send_ipi_single,
.send_ipi_mask = sb1250_send_ipi_mask,
.init_secondary = sb1250_init_secondary,
.smp_finish = sb1250_smp_finish,
.cpus_done = sb1250_cpus_done,
.boot_secondary = sb1250_boot_secondary,
.smp_setup = sb1250_smp_setup,
.prepare_cpus = sb1250_prepare_cpus,
};
void sb1250_mailbox_interrupt(void) void sb1250_mailbox_interrupt(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();

View File

@ -48,12 +48,10 @@ extern unsigned int zbbus_mhz;
extern void sb1250_time_init(void); extern void sb1250_time_init(void);
extern void sb1250_mask_irq(int cpu, int irq); extern void sb1250_mask_irq(int cpu, int irq);
extern void sb1250_unmask_irq(int cpu, int irq); extern void sb1250_unmask_irq(int cpu, int irq);
extern void sb1250_smp_finish(void);
extern void bcm1480_time_init(void); extern void bcm1480_time_init(void);
extern void bcm1480_mask_irq(int cpu, int irq); extern void bcm1480_mask_irq(int cpu, int irq);
extern void bcm1480_unmask_irq(int cpu, int irq); extern void bcm1480_unmask_irq(int cpu, int irq);
extern void bcm1480_smp_finish(void);
#define AT_spin \ #define AT_spin \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \

View File

@ -0,0 +1,56 @@
/*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
* Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2002 Ralf Baechle
* Copyright (C) 2000, 2001 Broadcom Corporation
*/
#ifndef __ASM_SMP_OPS_H
#define __ASM_SMP_OPS_H
#ifdef CONFIG_SMP
#include <linux/cpumask.h>
struct plat_smp_ops {
void (*send_ipi_single)(int cpu, unsigned int action);
void (*send_ipi_mask)(cpumask_t mask, unsigned int action);
void (*init_secondary)(void);
void (*smp_finish)(void);
void (*cpus_done)(void);
void (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus);
};
extern void register_smp_ops(struct plat_smp_ops *ops);
static inline void plat_smp_setup(void)
{
extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->smp_setup();
}
#else /* !CONFIG_SMP */
struct plat_smp_ops;
static inline void plat_smp_setup(void)
{
/* UP, nothing to do ... */
}
static inline void register_smp_ops(struct plat_smp_ops *ops)
{
}
#endif /* !CONFIG_SMP */
extern struct plat_smp_ops up_smp_ops;
extern struct plat_smp_ops vsmp_smp_ops;
#endif /* __ASM_SMP_OPS_H */

View File

@ -11,14 +11,13 @@
#ifndef __ASM_SMP_H #ifndef __ASM_SMP_H
#define __ASM_SMP_H #define __ASM_SMP_H
#ifdef CONFIG_SMP
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/smp-ops.h>
extern int smp_num_siblings; extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_sibling_map[];
@ -52,56 +51,6 @@ extern struct call_data_struct *call_data;
extern cpumask_t phys_cpu_present_map; extern cpumask_t phys_cpu_present_map;
#define cpu_possible_map phys_cpu_present_map #define cpu_possible_map phys_cpu_present_map
/*
* These are defined by the board-specific code.
*/
/*
* Cause the function described by call_data to be executed on the passed
* cpu. When the function has finished, increment the finished field of
* call_data.
*/
extern void core_send_ipi(int cpu, unsigned int action);
static inline void core_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
core_send_ipi(i, action);
}
/*
* Firmware CPU startup hook
*/
extern void prom_boot_secondary(int cpu, struct task_struct *idle);
/*
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
extern void prom_init_secondary(void);
/*
* Populate cpu_possible_map before smp_init, called from setup_arch.
*/
extern void plat_smp_setup(void);
/*
* Called in smp_prepare_cpus.
*/
extern void plat_prepare_cpus(unsigned int max_cpus);
/*
* Last chance for the board code to finish SMP initialization before
* the CPU is "online".
*/
extern void prom_smp_finish(void);
/* Hook for after all CPUs are online */
extern void prom_cpus_done(void);
extern void asmlinkage smp_bootstrap(void); extern void asmlinkage smp_bootstrap(void);
/* /*
@ -111,11 +60,11 @@ extern void asmlinkage smp_bootstrap(void);
*/ */
static inline void smp_send_reschedule(int cpu) static inline void smp_send_reschedule(int cpu)
{ {
core_send_ipi(cpu, SMP_RESCHEDULE_YOURSELF); extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
} }
extern asmlinkage void smp_call_function_interrupt(void); extern asmlinkage void smp_call_function_interrupt(void);
#endif /* CONFIG_SMP */
#endif /* __ASM_SMP_H */ #endif /* __ASM_SMP_H */