2009-01-09 08:46:40 +08:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
2010-07-24 01:57:49 +08:00
|
|
|
* Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
|
2009-01-09 08:46:40 +08:00
|
|
|
*/
|
2009-06-23 17:36:38 +08:00
|
|
|
#include <linux/cpu.h>
|
2009-01-09 08:46:40 +08:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/time.h>
|
2012-03-29 01:30:02 +08:00
|
|
|
#include <asm/setup.h>
|
2009-01-09 08:46:40 +08:00
|
|
|
|
|
|
|
#include <asm/octeon/octeon.h>
|
|
|
|
|
2009-06-23 17:36:38 +08:00
|
|
|
#include "octeon_boot.h"
|
|
|
|
|
2009-01-09 08:46:40 +08:00
|
|
|
volatile unsigned long octeon_processor_boot = 0xff;
|
|
|
|
volatile unsigned long octeon_processor_sp;
|
|
|
|
volatile unsigned long octeon_processor_gp;
|
|
|
|
|
2009-06-23 17:36:38 +08:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
2010-07-24 01:57:51 +08:00
|
|
|
uint64_t octeon_bootloader_entry_addr;
|
|
|
|
EXPORT_SYMBOL(octeon_bootloader_entry_addr);
|
2009-06-23 17:36:38 +08:00
|
|
|
#endif
|
|
|
|
|
2016-02-10 03:00:12 +08:00
|
|
|
static void octeon_icache_flush(void)
|
|
|
|
{
|
|
|
|
asm volatile ("synci 0($0)\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void (*octeon_message_functions[8])(void) = {
|
|
|
|
scheduler_ipi,
|
|
|
|
generic_smp_call_function_interrupt,
|
|
|
|
octeon_icache_flush,
|
|
|
|
};
|
|
|
|
|
2009-01-09 08:46:40 +08:00
|
|
|
static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
2016-02-10 03:00:12 +08:00
|
|
|
u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
|
|
|
|
u64 action;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure the function array initialization remains
|
|
|
|
* correct.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
|
|
|
|
BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1));
|
|
|
|
BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load the mailbox register to figure out what we're supposed
|
|
|
|
* to do.
|
|
|
|
*/
|
|
|
|
action = cvmx_read_csr(mbox_clrx);
|
2009-01-09 08:46:40 +08:00
|
|
|
|
2016-02-10 03:00:12 +08:00
|
|
|
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
|
|
|
|
action &= 0xff;
|
|
|
|
else
|
|
|
|
action &= 0xffff;
|
2009-01-09 08:46:40 +08:00
|
|
|
|
|
|
|
/* Clear the mailbox to clear the interrupt */
|
2016-02-10 03:00:12 +08:00
|
|
|
cvmx_write_csr(mbox_clrx, action);
|
2009-01-09 08:46:40 +08:00
|
|
|
|
2016-02-10 03:00:12 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
|
|
|
|
if (action & 1) {
|
|
|
|
void (*fn)(void) = octeon_message_functions[i];
|
2009-01-09 08:46:40 +08:00
|
|
|
|
2016-02-10 03:00:12 +08:00
|
|
|
if (fn)
|
|
|
|
fn();
|
|
|
|
}
|
|
|
|
action >>= 1;
|
|
|
|
i++;
|
|
|
|
}
|
2009-01-09 08:46:40 +08:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Cause the function described by call_data to be executed on the passed
|
2013-01-22 19:59:30 +08:00
|
|
|
* cpu. When the function has finished, increment the finished field of
|
2009-01-09 08:46:40 +08:00
|
|
|
* call_data.
|
|
|
|
*/
|
|
|
|
void octeon_send_ipi_single(int cpu, unsigned int action)
|
|
|
|
{
|
|
|
|
int coreid = cpu_logical_map(cpu);
|
|
|
|
/*
|
|
|
|
pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
|
|
|
|
coreid, action);
|
|
|
|
*/
|
|
|
|
cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
|
|
|
|
}
|
|
|
|
|
2009-10-02 07:47:38 +08:00
|
|
|
static inline void octeon_send_ipi_mask(const struct cpumask *mask,
|
|
|
|
unsigned int action)
|
2009-01-09 08:46:40 +08:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2015-03-05 08:19:17 +08:00
|
|
|
for_each_cpu(i, mask)
|
2009-01-09 08:46:40 +08:00
|
|
|
octeon_send_ipi_single(i, action);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-03-29 13:08:31 +08:00
|
|
|
* Detect available CPUs, populate cpu_possible_mask
|
2009-01-09 08:46:40 +08:00
|
|
|
*/
|
2009-06-23 17:36:38 +08:00
|
|
|
static void octeon_smp_hotplug_setup(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
2010-07-24 01:57:51 +08:00
|
|
|
struct linux_app_boot_info *labi;
|
|
|
|
|
2014-06-28 05:59:51 +08:00
|
|
|
if (!setup_max_cpus)
|
|
|
|
return;
|
|
|
|
|
2010-07-24 01:57:51 +08:00
|
|
|
labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
|
2014-06-28 05:59:52 +08:00
|
|
|
if (labi->labi_signature != LABI_SIGNATURE) {
|
|
|
|
pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
|
|
|
|
return;
|
|
|
|
}
|
2010-07-24 01:57:51 +08:00
|
|
|
|
|
|
|
octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
|
2009-06-23 17:36:38 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-02-20 09:04:07 +08:00
|
|
|
static void __init octeon_smp_setup(void)
|
2009-01-09 08:46:40 +08:00
|
|
|
{
|
|
|
|
const int coreid = cvmx_get_core_num();
|
|
|
|
int cpus;
|
|
|
|
int id;
|
2016-02-02 09:46:54 +08:00
|
|
|
struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
|
|
|
|
|
2010-07-24 01:57:49 +08:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
2016-02-10 03:00:12 +08:00
|
|
|
int core_mask = octeon_get_boot_coremask();
|
2010-07-24 01:57:49 +08:00
|
|
|
unsigned int num_cores = cvmx_octeon_num_cores();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* The present CPUs are initially just the boot cpu (CPU 0). */
|
|
|
|
for (id = 0; id < NR_CPUS; id++) {
|
|
|
|
set_cpu_possible(id, id == 0);
|
|
|
|
set_cpu_present(id, id == 0);
|
|
|
|
}
|
2009-01-09 08:46:40 +08:00
|
|
|
|
|
|
|
__cpu_number_map[coreid] = 0;
|
|
|
|
__cpu_logical_map[0] = coreid;
|
|
|
|
|
2010-07-24 01:57:49 +08:00
|
|
|
/* The present CPUs get the lowest CPU numbers. */
|
2009-01-09 08:46:40 +08:00
|
|
|
cpus = 1;
|
2010-07-24 01:57:49 +08:00
|
|
|
for (id = 0; id < NR_CPUS; id++) {
|
2016-02-02 09:46:54 +08:00
|
|
|
if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
|
2010-07-24 01:57:49 +08:00
|
|
|
set_cpu_possible(cpus, true);
|
|
|
|
set_cpu_present(cpus, true);
|
|
|
|
__cpu_number_map[id] = cpus;
|
|
|
|
__cpu_logical_map[cpus] = id;
|
|
|
|
cpus++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
/*
|
2013-01-22 19:59:30 +08:00
|
|
|
* The possible CPUs are all those present on the chip. We
|
|
|
|
* will assign CPU numbers for possible cores as well. Cores
|
2010-07-24 01:57:49 +08:00
|
|
|
* are always consecutively numberd from 0.
|
|
|
|
*/
|
2014-06-28 05:59:52 +08:00
|
|
|
for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
|
|
|
|
id < num_cores && id < NR_CPUS; id++) {
|
2010-07-24 01:57:49 +08:00
|
|
|
if (!(core_mask & (1 << id))) {
|
|
|
|
set_cpu_possible(cpus, true);
|
2009-01-09 08:46:40 +08:00
|
|
|
__cpu_number_map[id] = cpus;
|
|
|
|
__cpu_logical_map[cpus] = id;
|
|
|
|
cpus++;
|
|
|
|
}
|
|
|
|
}
|
2010-07-24 01:57:49 +08:00
|
|
|
#endif
|
2009-06-23 17:36:38 +08:00
|
|
|
|
|
|
|
octeon_smp_hotplug_setup();
|
2009-01-09 08:46:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Firmware CPU startup hook
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void octeon_boot_secondary(int cpu, struct task_struct *idle)
|
|
|
|
{
|
|
|
|
int count;
|
|
|
|
|
|
|
|
pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
|
|
|
|
cpu_logical_map(cpu));
|
|
|
|
|
|
|
|
octeon_processor_sp = __KSTK_TOS(idle);
|
|
|
|
octeon_processor_gp = (unsigned long)(task_thread_info(idle));
|
|
|
|
octeon_processor_boot = cpu_logical_map(cpu);
|
|
|
|
mb();
|
|
|
|
|
|
|
|
count = 10000;
|
|
|
|
while (octeon_processor_sp && count) {
|
|
|
|
/* Waiting for processor to get the SP and GP */
|
|
|
|
udelay(1);
|
|
|
|
count--;
|
|
|
|
}
|
|
|
|
if (count == 0)
|
|
|
|
pr_err("Secondary boot timeout\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* After we've done initial boot, this function is called to allow the
|
|
|
|
* board code to clean up state, if needed
|
|
|
|
*/
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 21:38:59 +08:00
|
|
|
static void octeon_init_secondary(void)
|
2009-01-09 08:46:40 +08:00
|
|
|
{
|
2010-07-24 01:57:51 +08:00
|
|
|
unsigned int sr;
|
2009-01-09 08:46:40 +08:00
|
|
|
|
2010-07-24 01:57:51 +08:00
|
|
|
sr = set_c0_status(ST0_BEV);
|
|
|
|
write_c0_ebase((u32)ebase);
|
|
|
|
write_c0_status(sr);
|
|
|
|
|
2009-01-09 08:46:40 +08:00
|
|
|
octeon_check_cpu_bist();
|
|
|
|
octeon_init_cvmcount();
|
2011-03-26 03:38:51 +08:00
|
|
|
|
|
|
|
octeon_irq_setup_secondary();
|
2009-01-09 08:46:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Callout to firmware before smp_init
|
|
|
|
*
|
|
|
|
*/
|
2016-02-20 09:04:07 +08:00
|
|
|
static void __init octeon_prepare_cpus(unsigned int max_cpus)
|
2009-01-09 08:46:40 +08:00
|
|
|
{
|
2011-02-18 06:47:52 +08:00
|
|
|
/*
|
|
|
|
* Only the low order mailbox bits are used for IPIs, leave
|
|
|
|
* the other bits alone.
|
|
|
|
*/
|
|
|
|
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
|
2011-10-04 04:31:10 +08:00
|
|
|
if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
|
|
|
|
IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
|
|
|
|
mailbox_interrupt)) {
|
2011-11-17 23:07:31 +08:00
|
|
|
panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
|
2009-01-09 08:46:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Last chance for the board code to finish SMP initialization before
|
|
|
|
* the CPU is "online".
|
|
|
|
*/
|
|
|
|
static void octeon_smp_finish(void)
|
|
|
|
{
|
|
|
|
octeon_user_io_init();
|
|
|
|
|
|
|
|
/* to generate the first CPU timer interrupt */
|
|
|
|
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
|
2012-07-19 15:13:53 +08:00
|
|
|
local_irq_enable();
|
2009-01-09 08:46:40 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 17:36:38 +08:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
|
|
|
|
/* State of each CPU. */
|
|
|
|
DEFINE_PER_CPU(int, cpu_state);
|
|
|
|
|
|
|
|
static int octeon_cpu_disable(void)
|
|
|
|
{
|
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
|
|
|
|
if (cpu == 0)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2014-06-28 05:59:52 +08:00
|
|
|
if (!octeon_bootloader_entry_addr)
|
|
|
|
return -ENOTSUPP;
|
|
|
|
|
2012-03-29 13:08:30 +08:00
|
|
|
set_cpu_online(cpu, false);
|
2015-03-05 08:19:17 +08:00
|
|
|
cpumask_clear_cpu(cpu, &cpu_callin_map);
|
2013-09-04 00:19:28 +08:00
|
|
|
octeon_fixup_irqs();
|
2009-06-23 17:36:38 +08:00
|
|
|
|
2016-01-28 01:07:00 +08:00
|
|
|
__flush_cache_all();
|
2009-06-23 17:36:38 +08:00
|
|
|
local_flush_tlb_all();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_cpu_die(unsigned int cpu)
|
|
|
|
{
|
|
|
|
int coreid = cpu_logical_map(cpu);
|
2010-07-24 01:57:51 +08:00
|
|
|
uint32_t mask, new_mask;
|
|
|
|
const struct cvmx_bootmem_named_block_desc *block_desc;
|
2009-06-23 17:36:38 +08:00
|
|
|
|
|
|
|
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a bit complicated strategics of getting/settig available
|
|
|
|
* cores mask, copied from bootloader
|
|
|
|
*/
|
2010-07-24 01:57:51 +08:00
|
|
|
|
|
|
|
mask = 1 << coreid;
|
2009-06-23 17:36:38 +08:00
|
|
|
/* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
|
|
|
|
block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
|
|
|
|
|
|
|
|
if (!block_desc) {
|
2010-07-24 01:57:51 +08:00
|
|
|
struct linux_app_boot_info *labi;
|
2009-06-23 17:36:38 +08:00
|
|
|
|
2010-07-24 01:57:51 +08:00
|
|
|
labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
|
2009-06-23 17:36:38 +08:00
|
|
|
|
2010-07-24 01:57:51 +08:00
|
|
|
labi->avail_coremask |= mask;
|
|
|
|
new_mask = labi->avail_coremask;
|
|
|
|
} else { /* alternative, already initialized */
|
|
|
|
uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
|
|
|
|
AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
|
|
|
|
*p |= mask;
|
|
|
|
new_mask = *p;
|
2009-06-23 17:36:38 +08:00
|
|
|
}
|
|
|
|
|
2010-07-24 01:57:51 +08:00
|
|
|
pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
|
|
|
|
mb();
|
2009-06-23 17:36:38 +08:00
|
|
|
cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
|
|
|
|
cvmx_write_csr(CVMX_CIU_PP_RST, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void play_dead(void)
|
|
|
|
{
|
2010-07-24 01:57:51 +08:00
|
|
|
int cpu = cpu_number_map(cvmx_get_core_num());
|
2009-06-23 17:36:38 +08:00
|
|
|
|
|
|
|
idle_task_exit();
|
|
|
|
octeon_processor_boot = 0xff;
|
2010-07-24 01:57:51 +08:00
|
|
|
per_cpu(cpu_state, cpu) = CPU_DEAD;
|
|
|
|
|
|
|
|
mb();
|
2009-06-23 17:36:38 +08:00
|
|
|
|
|
|
|
while (1) /* core will be reset here */
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void kernel_entry(unsigned long arg1, ...);
|
|
|
|
|
|
|
|
static void start_after_reset(void)
|
|
|
|
{
|
2013-01-22 19:59:30 +08:00
|
|
|
kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
|
2009-06-23 17:36:38 +08:00
|
|
|
}
|
|
|
|
|
2010-07-24 01:57:51 +08:00
|
|
|
static int octeon_update_boot_vector(unsigned int cpu)
|
2009-06-23 17:36:38 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
int coreid = cpu_logical_map(cpu);
|
2010-07-24 01:57:51 +08:00
|
|
|
uint32_t avail_coremask;
|
|
|
|
const struct cvmx_bootmem_named_block_desc *block_desc;
|
2009-06-23 17:36:38 +08:00
|
|
|
struct boot_init_vector *boot_vect =
|
2010-07-24 01:57:51 +08:00
|
|
|
(struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
|
2009-06-23 17:36:38 +08:00
|
|
|
|
|
|
|
block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
|
|
|
|
|
|
|
|
if (!block_desc) {
|
2010-07-24 01:57:51 +08:00
|
|
|
struct linux_app_boot_info *labi;
|
|
|
|
|
|
|
|
labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
|
|
|
|
|
|
|
|
avail_coremask = labi->avail_coremask;
|
|
|
|
labi->avail_coremask &= ~(1 << coreid);
|
2009-06-23 17:36:38 +08:00
|
|
|
} else { /* alternative, already initialized */
|
2010-07-24 01:57:51 +08:00
|
|
|
avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
|
|
|
|
block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
|
2009-06-23 17:36:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(avail_coremask & (1 << coreid))) {
|
2016-02-25 16:44:58 +08:00
|
|
|
/* core not available, assume, that caught by simple-executive */
|
2009-06-23 17:36:38 +08:00
|
|
|
cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
|
|
|
|
cvmx_write_csr(CVMX_CIU_PP_RST, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
boot_vect[coreid].app_start_func_addr =
|
|
|
|
(uint32_t) (unsigned long) start_after_reset;
|
2010-07-24 01:57:51 +08:00
|
|
|
boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
|
2009-06-23 17:36:38 +08:00
|
|
|
|
2010-07-24 01:57:51 +08:00
|
|
|
mb();
|
2009-06-23 17:36:38 +08:00
|
|
|
|
|
|
|
cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 21:38:59 +08:00
|
|
|
static int octeon_cpu_callback(struct notifier_block *nfb,
|
2009-06-23 17:36:38 +08:00
|
|
|
unsigned long action, void *hcpu)
|
|
|
|
{
|
|
|
|
unsigned int cpu = (unsigned long)hcpu;
|
|
|
|
|
2016-05-24 21:08:47 +08:00
|
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
2009-06-23 17:36:38 +08:00
|
|
|
case CPU_UP_PREPARE:
|
|
|
|
octeon_update_boot_vector(cpu);
|
|
|
|
break;
|
|
|
|
case CPU_ONLINE:
|
|
|
|
pr_info("Cpu %d online\n", cpu);
|
|
|
|
break;
|
|
|
|
case CPU_DEAD:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 21:38:59 +08:00
|
|
|
static int register_cavium_notifier(void)
|
2009-06-23 17:36:38 +08:00
|
|
|
{
|
2010-07-24 01:57:50 +08:00
|
|
|
hotcpu_notifier(octeon_cpu_callback, 0);
|
2009-06-23 17:36:38 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
late_initcall(register_cavium_notifier);
|
|
|
|
|
2013-01-22 19:59:30 +08:00
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
2009-06-23 17:36:38 +08:00
|
|
|
|
2009-01-09 08:46:40 +08:00
|
|
|
struct plat_smp_ops octeon_smp_ops = {
|
|
|
|
.send_ipi_single = octeon_send_ipi_single,
|
|
|
|
.send_ipi_mask = octeon_send_ipi_mask,
|
|
|
|
.init_secondary = octeon_init_secondary,
|
|
|
|
.smp_finish = octeon_smp_finish,
|
|
|
|
.boot_secondary = octeon_boot_secondary,
|
|
|
|
.smp_setup = octeon_smp_setup,
|
|
|
|
.prepare_cpus = octeon_prepare_cpus,
|
2009-06-23 17:36:38 +08:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
.cpu_disable = octeon_cpu_disable,
|
|
|
|
.cpu_die = octeon_cpu_die,
|
|
|
|
#endif
|
2009-01-09 08:46:40 +08:00
|
|
|
};
|
2016-02-10 03:00:12 +08:00
|
|
|
|
|
|
|
static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
scheduler_ipi();
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
generic_smp_call_function_interrupt();
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
octeon_icache_flush();
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Callout to firmware before smp_init
|
|
|
|
*/
|
|
|
|
static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
|
|
|
|
{
|
|
|
|
if (request_irq(OCTEON_IRQ_MBOX0 + 0,
|
|
|
|
octeon_78xx_reched_interrupt,
|
|
|
|
IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
|
|
|
|
octeon_78xx_reched_interrupt)) {
|
|
|
|
panic("Cannot request_irq for SchedulerIPI");
|
|
|
|
}
|
|
|
|
if (request_irq(OCTEON_IRQ_MBOX0 + 1,
|
|
|
|
octeon_78xx_call_function_interrupt,
|
|
|
|
IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
|
|
|
|
octeon_78xx_call_function_interrupt)) {
|
|
|
|
panic("Cannot request_irq for SMP-Call");
|
|
|
|
}
|
|
|
|
if (request_irq(OCTEON_IRQ_MBOX0 + 2,
|
|
|
|
octeon_78xx_icache_flush_interrupt,
|
|
|
|
IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
|
|
|
|
octeon_78xx_icache_flush_interrupt)) {
|
|
|
|
panic("Cannot request_irq for ICache-Flush");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
if (action & 1)
|
|
|
|
octeon_ciu3_mbox_send(cpu, i);
|
|
|
|
action >>= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
|
|
|
|
unsigned int action)
|
|
|
|
{
|
|
|
|
unsigned int cpu;
|
|
|
|
|
|
|
|
for_each_cpu(cpu, mask)
|
|
|
|
octeon_78xx_send_ipi_single(cpu, action);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct plat_smp_ops octeon_78xx_smp_ops = {
|
|
|
|
.send_ipi_single = octeon_78xx_send_ipi_single,
|
|
|
|
.send_ipi_mask = octeon_78xx_send_ipi_mask,
|
|
|
|
.init_secondary = octeon_init_secondary,
|
|
|
|
.smp_finish = octeon_smp_finish,
|
|
|
|
.boot_secondary = octeon_boot_secondary,
|
|
|
|
.smp_setup = octeon_smp_setup,
|
|
|
|
.prepare_cpus = octeon_78xx_prepare_cpus,
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
.cpu_disable = octeon_cpu_disable,
|
|
|
|
.cpu_die = octeon_cpu_die,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
void __init octeon_setup_smp(void)
|
|
|
|
{
|
|
|
|
struct plat_smp_ops *ops;
|
|
|
|
|
|
|
|
if (octeon_has_feature(OCTEON_FEATURE_CIU3))
|
|
|
|
ops = &octeon_78xx_smp_ops;
|
|
|
|
else
|
|
|
|
ops = &octeon_smp_ops;
|
|
|
|
|
|
|
|
register_smp_ops(ops);
|
|
|
|
}
|