2011-05-07 04:06:40 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
|
|
|
|
* reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the NetLogic
|
|
|
|
* license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
|
|
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
|
|
|
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
|
|
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
|
|
|
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
|
|
|
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
|
|
|
|
#include <asm/netlogic/interrupt.h>
|
|
|
|
#include <asm/netlogic/mips-extns.h>
|
2011-11-11 19:38:29 +08:00
|
|
|
#include <asm/netlogic/haldefs.h>
|
|
|
|
#include <asm/netlogic/common.h>
|
2011-05-07 04:06:40 +08:00
|
|
|
|
2011-11-16 08:21:28 +08:00
|
|
|
#if defined(CONFIG_CPU_XLP)
|
|
|
|
#include <asm/netlogic/xlp-hal/iomap.h>
|
2011-11-16 08:21:29 +08:00
|
|
|
#include <asm/netlogic/xlp-hal/xlp.h>
|
2011-11-16 08:21:28 +08:00
|
|
|
#include <asm/netlogic/xlp-hal/pic.h>
|
|
|
|
#elif defined(CONFIG_CPU_XLR)
|
2011-05-07 04:06:40 +08:00
|
|
|
#include <asm/netlogic/xlr/iomap.h>
|
|
|
|
#include <asm/netlogic/xlr/pic.h>
|
2011-11-16 08:21:29 +08:00
|
|
|
#include <asm/netlogic/xlr/xlr.h>
|
2011-11-16 08:21:28 +08:00
|
|
|
#else
|
|
|
|
#error "Unknown CPU"
|
|
|
|
#endif
|
2011-05-07 04:06:40 +08:00
|
|
|
|
2011-11-11 19:38:29 +08:00
|
|
|
void nlm_send_ipi_single(int logical_cpu, unsigned int action)
|
2011-05-07 04:06:40 +08:00
|
|
|
{
|
2012-10-31 20:01:39 +08:00
|
|
|
int cpu, node;
|
|
|
|
uint64_t picbase;
|
|
|
|
|
|
|
|
cpu = cpu_logical_map(logical_cpu);
|
|
|
|
node = cpu / NLM_CPUS_PER_NODE;
|
|
|
|
picbase = nlm_get_node(node)->picbase;
|
2011-05-07 04:06:40 +08:00
|
|
|
|
|
|
|
if (action & SMP_CALL_FUNCTION)
|
2012-10-31 20:01:39 +08:00
|
|
|
nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
|
2011-11-11 19:38:29 +08:00
|
|
|
if (action & SMP_RESCHEDULE_YOURSELF)
|
2012-10-31 20:01:39 +08:00
|
|
|
nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
|
2011-05-07 04:06:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_cpu(cpu, mask) {
|
2011-11-11 19:38:29 +08:00
|
|
|
nlm_send_ipi_single(cpu, action);
|
2011-05-07 04:06:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* IRQ_IPI_SMP_FUNCTION Handler */
|
|
|
|
void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
|
|
|
|
{
|
2013-01-14 23:11:54 +08:00
|
|
|
clear_c0_eimr(irq);
|
|
|
|
ack_c0_eirr(irq);
|
2011-11-16 08:21:28 +08:00
|
|
|
smp_call_function_interrupt();
|
2013-01-14 23:11:54 +08:00
|
|
|
set_c0_eimr(irq);
|
2011-05-07 04:06:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* IRQ_IPI_SMP_RESCHEDULE handler */
|
|
|
|
void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
|
|
|
|
{
|
2013-01-14 23:11:54 +08:00
|
|
|
clear_c0_eimr(irq);
|
|
|
|
ack_c0_eirr(irq);
|
2011-11-16 08:21:28 +08:00
|
|
|
scheduler_ipi();
|
2013-01-14 23:11:54 +08:00
|
|
|
set_c0_eimr(irq);
|
2011-05-07 04:06:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called before going into mips code, early cpu init
|
|
|
|
*/
|
2011-11-11 19:38:29 +08:00
|
|
|
void nlm_early_init_secondary(int cpu)
|
2011-05-07 04:06:40 +08:00
|
|
|
{
|
2011-11-16 08:21:28 +08:00
|
|
|
change_c0_config(CONF_CM_CMASK, 0x3);
|
|
|
|
#ifdef CONFIG_CPU_XLP
|
2012-10-31 20:01:39 +08:00
|
|
|
/* mmu init, once per core */
|
|
|
|
if (cpu % NLM_THREADS_PER_CORE == 0)
|
2011-11-11 19:38:29 +08:00
|
|
|
xlp_mmu_init();
|
|
|
|
#endif
|
2012-10-31 20:01:39 +08:00
|
|
|
write_c0_ebase(nlm_current_node()->ebase);
|
2011-05-07 04:06:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Code to run on secondary just after probing the CPU
|
|
|
|
*/
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 21:38:59 +08:00
|
|
|
static void nlm_init_secondary(void)
|
2011-05-07 04:06:40 +08:00
|
|
|
{
|
2012-10-31 20:01:41 +08:00
|
|
|
int hwtid;
|
|
|
|
|
|
|
|
hwtid = hard_smp_processor_id();
|
|
|
|
current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
|
2012-10-31 20:01:42 +08:00
|
|
|
nlm_percpu_init(hwtid);
|
2012-10-31 20:01:41 +08:00
|
|
|
nlm_smp_irq_init(hwtid);
|
2011-05-07 04:06:40 +08:00
|
|
|
}
|
|
|
|
|
2011-11-16 08:21:29 +08:00
|
|
|
void nlm_prepare_cpus(unsigned int max_cpus)
|
|
|
|
{
|
|
|
|
/* declare we are SMT capable */
|
|
|
|
smp_num_siblings = nlm_threads_per_core;
|
|
|
|
}
|
|
|
|
|
2011-05-07 04:06:40 +08:00
|
|
|
void nlm_smp_finish(void)
|
|
|
|
{
|
2011-06-07 05:44:12 +08:00
|
|
|
local_irq_enable();
|
2011-05-07 04:06:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void nlm_cpus_done(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Boot all other cpus in the system, initialize them, and bring them into
|
|
|
|
* the boot function
|
|
|
|
*/
|
|
|
|
unsigned long nlm_next_gp;
|
|
|
|
unsigned long nlm_next_sp;
|
2013-03-24 01:27:55 +08:00
|
|
|
static cpumask_t phys_cpu_present_mask;
|
2011-05-07 04:06:40 +08:00
|
|
|
|
|
|
|
void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
|
|
|
|
{
|
2012-10-31 20:01:39 +08:00
|
|
|
int cpu, node;
|
2011-05-07 04:06:40 +08:00
|
|
|
|
2012-10-31 20:01:39 +08:00
|
|
|
cpu = cpu_logical_map(logical_cpu);
|
|
|
|
node = cpu / NLM_CPUS_PER_NODE;
|
|
|
|
nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
|
|
|
|
nlm_next_gp = (unsigned long)task_thread_info(idle);
|
2011-05-07 04:06:40 +08:00
|
|
|
|
2012-10-31 20:01:39 +08:00
|
|
|
/* barrier for sp/gp store above */
|
2011-05-07 04:06:40 +08:00
|
|
|
__sync();
|
2012-10-31 20:01:39 +08:00
|
|
|
nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
|
2011-05-07 04:06:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init nlm_smp_setup(void)
|
|
|
|
{
|
|
|
|
unsigned int boot_cpu;
|
2012-10-31 20:01:39 +08:00
|
|
|
int num_cpus, i, ncore;
|
2013-06-10 14:41:04 +08:00
|
|
|
volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
|
2013-03-24 01:27:55 +08:00
|
|
|
char buf[64];
|
2011-05-07 04:06:40 +08:00
|
|
|
|
|
|
|
boot_cpu = hard_smp_processor_id();
|
2013-03-24 01:27:55 +08:00
|
|
|
cpumask_clear(&phys_cpu_present_mask);
|
2011-05-07 04:06:40 +08:00
|
|
|
|
2013-03-24 01:27:55 +08:00
|
|
|
cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
|
2011-05-07 04:06:40 +08:00
|
|
|
__cpu_number_map[boot_cpu] = 0;
|
|
|
|
__cpu_logical_map[0] = boot_cpu;
|
2012-03-29 13:08:30 +08:00
|
|
|
set_cpu_possible(0, true);
|
2011-05-07 04:06:40 +08:00
|
|
|
|
|
|
|
num_cpus = 1;
|
|
|
|
for (i = 0; i < NR_CPUS; i++) {
|
2011-09-24 08:29:54 +08:00
|
|
|
/*
|
2013-06-10 14:41:04 +08:00
|
|
|
* cpu_ready array is not set for the boot_cpu,
|
2011-11-11 19:38:29 +08:00
|
|
|
* it is only set for ASPs (see smpboot.S)
|
2011-09-24 08:29:54 +08:00
|
|
|
*/
|
2013-06-10 14:41:04 +08:00
|
|
|
if (cpu_ready[i]) {
|
2013-03-24 01:27:55 +08:00
|
|
|
cpumask_set_cpu(i, &phys_cpu_present_mask);
|
2011-05-07 04:06:40 +08:00
|
|
|
__cpu_number_map[i] = num_cpus;
|
|
|
|
__cpu_logical_map[num_cpus] = i;
|
2012-03-29 13:08:30 +08:00
|
|
|
set_cpu_possible(num_cpus, true);
|
2011-05-07 04:06:40 +08:00
|
|
|
++num_cpus;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-24 01:27:55 +08:00
|
|
|
cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
|
|
|
|
pr_info("Physical CPU mask: %s\n", buf);
|
|
|
|
cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
|
|
|
|
pr_info("Possible CPU mask: %s\n", buf);
|
|
|
|
|
2012-10-31 20:01:39 +08:00
|
|
|
/* check with the cores we have worken up */
|
|
|
|
for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
|
|
|
|
ncore += hweight32(nlm_get_node(i)->coremask);
|
|
|
|
|
|
|
|
pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
|
|
|
|
nlm_threads_per_core, num_cpus);
|
2013-03-24 01:27:55 +08:00
|
|
|
|
|
|
|
/* switch NMI handler to boot CPUs */
|
2011-11-16 08:21:29 +08:00
|
|
|
nlm_set_nmi_handler(nlm_boot_secondary_cpus);
|
2011-05-07 04:06:40 +08:00
|
|
|
}
|
|
|
|
|
2012-10-31 20:01:37 +08:00
|
|
|
static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
|
2011-11-16 08:21:29 +08:00
|
|
|
{
|
|
|
|
uint32_t core0_thr_mask, core_thr_mask;
|
2012-10-31 20:01:37 +08:00
|
|
|
int threadmode, i, j;
|
2011-11-16 08:21:29 +08:00
|
|
|
|
2012-10-31 20:01:37 +08:00
|
|
|
core0_thr_mask = 0;
|
2012-10-31 20:01:39 +08:00
|
|
|
for (i = 0; i < NLM_THREADS_PER_CORE; i++)
|
2012-10-31 20:01:37 +08:00
|
|
|
if (cpumask_test_cpu(i, wakeup_mask))
|
|
|
|
core0_thr_mask |= (1 << i);
|
2011-11-16 08:21:29 +08:00
|
|
|
switch (core0_thr_mask) {
|
|
|
|
case 1:
|
|
|
|
nlm_threads_per_core = 1;
|
|
|
|
threadmode = 0;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
nlm_threads_per_core = 2;
|
|
|
|
threadmode = 2;
|
|
|
|
break;
|
|
|
|
case 0xf:
|
|
|
|
nlm_threads_per_core = 4;
|
|
|
|
threadmode = 3;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto unsupp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify other cores CPU masks */
|
2012-10-31 20:01:39 +08:00
|
|
|
for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
|
2012-10-31 20:01:37 +08:00
|
|
|
core_thr_mask = 0;
|
2012-10-31 20:01:39 +08:00
|
|
|
for (j = 0; j < NLM_THREADS_PER_CORE; j++)
|
2012-10-31 20:01:37 +08:00
|
|
|
if (cpumask_test_cpu(i + j, wakeup_mask))
|
|
|
|
core_thr_mask |= (1 << j);
|
|
|
|
if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
|
2011-11-16 08:21:29 +08:00
|
|
|
goto unsupp;
|
|
|
|
}
|
|
|
|
return threadmode;
|
|
|
|
|
|
|
|
unsupp:
|
2012-10-31 20:01:37 +08:00
|
|
|
panic("Unsupported CPU mask %lx\n",
|
|
|
|
(unsigned long)cpumask_bits(wakeup_mask)[0]);
|
2011-11-16 08:21:29 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 21:38:59 +08:00
|
|
|
int nlm_wakeup_secondary_cpus(void)
|
2011-11-16 08:21:29 +08:00
|
|
|
{
|
2013-06-10 14:41:03 +08:00
|
|
|
u32 *reset_data;
|
2011-11-16 08:21:29 +08:00
|
|
|
int threadmode;
|
|
|
|
|
|
|
|
/* verify the mask and setup core config variables */
|
2012-10-31 20:01:37 +08:00
|
|
|
threadmode = nlm_parse_cpumask(&nlm_cpumask);
|
2011-11-16 08:21:29 +08:00
|
|
|
|
|
|
|
/* Setup CPU init parameters */
|
2013-06-10 14:41:03 +08:00
|
|
|
reset_data = nlm_get_boot_data(BOOT_THREAD_MODE);
|
|
|
|
*reset_data = threadmode;
|
2011-11-16 08:21:29 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_XLP
|
|
|
|
xlp_wakeup_secondary_cpus();
|
|
|
|
#else
|
|
|
|
xlr_wakeup_secondary_cpus();
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-05-07 04:06:40 +08:00
|
|
|
struct plat_smp_ops nlm_smp_ops = {
|
|
|
|
.send_ipi_single = nlm_send_ipi_single,
|
|
|
|
.send_ipi_mask = nlm_send_ipi_mask,
|
|
|
|
.init_secondary = nlm_init_secondary,
|
|
|
|
.smp_finish = nlm_smp_finish,
|
|
|
|
.cpus_done = nlm_cpus_done,
|
|
|
|
.boot_secondary = nlm_boot_secondary,
|
|
|
|
.smp_setup = nlm_smp_setup,
|
|
|
|
.prepare_cpus = nlm_prepare_cpus,
|
|
|
|
};
|