2019-06-04 16:11:33 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2008-08-02 17:55:55 +08:00
|
|
|
* arch/arm/include/asm/smp.h
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2004-2005 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_ARM_SMP_H
|
|
|
|
#define __ASM_ARM_SMP_H
|
|
|
|
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
|
|
|
|
#ifndef CONFIG_SMP
|
2008-08-02 17:55:55 +08:00
|
|
|
# error "<asm/smp.h> included in non-SMP build"
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2005-06-22 08:14:34 +08:00
|
|
|
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
struct seq_file;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* generate IPI list text
|
|
|
|
*/
|
2010-11-15 22:33:51 +08:00
|
|
|
extern void show_ipi_list(struct seq_file *, int);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-11-08 23:35:23 +08:00
|
|
|
/*
|
|
|
|
* Called from assembly code, this handles an IPI.
|
|
|
|
*/
|
2010-11-15 17:42:08 +08:00
|
|
|
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
|
2005-11-08 23:35:23 +08:00
|
|
|
|
2011-10-06 22:18:14 +08:00
|
|
|
/*
|
|
|
|
* Called from C code, this handles an IPI.
|
|
|
|
*/
|
|
|
|
void handle_IPI(int ipinr, struct pt_regs *regs);
|
|
|
|
|
2006-02-16 19:08:09 +08:00
|
|
|
/*
|
2009-05-28 21:16:52 +08:00
|
|
|
* Setup the set of possible CPUs (via set_cpu_possible)
|
2006-02-16 19:08:09 +08:00
|
|
|
*/
|
|
|
|
extern void smp_init_cpus(void);
|
|
|
|
|
ARM: Allow IPIs to be handled as normal interrupts
In order to deal with IPIs as normal interrupts, let's add
a new way to register them with the architecture code.
set_smp_ipi_range() takes a range of interrupts, and allows
the arch code to request them as if the were normal interrupts.
A standard handler is then called by the core IRQ code to deal
with the IPI.
This means that we don't need to call irq_enter/irq_exit, and
that we don't need to deal with set_irq_regs either. So let's
move the dispatcher into its own function, and leave handle_IPI()
as a compatibility function.
On the sending side, let's make use of ipi_send_mask, which
already exists for this purpose.
One of the major difference is that we end up, in some cases
(such as when performing IRQ time accounting on the scheduler
IPI), end up with nested irq_enter()/irq_exit() pairs.
Other than the (relatively small) overhead, there should be
no consequences to it (these pairs are designed to nest
correctly, and the accounting shouldn't be off).
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2020-06-24 03:38:41 +08:00
|
|
|
/*
|
|
|
|
* Register IPI interrupts with the arch SMP code
|
|
|
|
*/
|
|
|
|
extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
|
|
|
|
|
2005-11-08 23:35:23 +08:00
|
|
|
/*
|
|
|
|
* Called from platform specific assembly code, this is the
|
|
|
|
* secondary CPU entry point.
|
|
|
|
*/
|
|
|
|
asmlinkage void secondary_start_kernel(void);
|
|
|
|
|
2010-12-03 19:09:48 +08:00
|
|
|
|
2005-06-18 16:33:31 +08:00
|
|
|
/*
|
|
|
|
* Initial data for bringing up a secondary CPU.
|
|
|
|
*/
|
|
|
|
struct secondary_data {
|
2013-02-23 02:51:30 +08:00
|
|
|
union {
|
2017-10-16 19:54:05 +08:00
|
|
|
struct mpu_rgn_info *mpu_rgn_info;
|
2015-04-05 03:09:46 +08:00
|
|
|
u64 pgdir;
|
2013-02-23 02:51:30 +08:00
|
|
|
};
|
2011-05-26 18:22:44 +08:00
|
|
|
unsigned long swapper_pg_dir;
|
2005-06-18 16:33:31 +08:00
|
|
|
void *stack;
|
|
|
|
};
|
|
|
|
extern struct secondary_data secondary_data;
|
2014-03-17 01:04:54 +08:00
|
|
|
extern void secondary_startup(void);
|
2015-05-18 16:06:13 +08:00
|
|
|
extern void secondary_startup_arm(void);
|
2005-06-18 16:33:31 +08:00
|
|
|
|
2005-11-03 06:24:33 +08:00
|
|
|
extern int __cpu_disable(void);
|
|
|
|
|
|
|
|
extern void __cpu_die(unsigned int cpu);
|
|
|
|
|
2008-06-11 02:48:30 +08:00
|
|
|
extern void arch_send_call_function_single_ipi(int cpu);
|
2009-05-17 23:20:18 +08:00
|
|
|
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
2012-11-06 10:48:40 +08:00
|
|
|
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
|
2008-06-11 02:48:30 +08:00
|
|
|
|
2012-11-28 10:54:41 +08:00
|
|
|
extern int register_ipi_completion(struct completion *completion, int cpu);
|
|
|
|
|
2011-09-08 16:06:10 +08:00
|
|
|
struct smp_operations {
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/*
|
|
|
|
* Setup the set of possible CPUs (via set_cpu_possible)
|
|
|
|
*/
|
|
|
|
void (*smp_init_cpus)(void);
|
|
|
|
/*
|
|
|
|
* Initialize cpu_possible map, and enable coherency
|
|
|
|
*/
|
|
|
|
void (*smp_prepare_cpus)(unsigned int max_cpus);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform platform specific initialisation of the specified CPU.
|
|
|
|
*/
|
|
|
|
void (*smp_secondary_init)(unsigned int cpu);
|
|
|
|
/*
|
|
|
|
* Boot a secondary CPU, and assign it the specified idle task.
|
|
|
|
* This also gives us the initial stack to use for this CPU.
|
|
|
|
*/
|
|
|
|
int (*smp_boot_secondary)(unsigned int cpu, struct task_struct *idle);
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
int (*cpu_kill)(unsigned int cpu);
|
|
|
|
void (*cpu_die)(unsigned int cpu);
|
2015-07-29 07:34:48 +08:00
|
|
|
bool (*cpu_can_disable)(unsigned int cpu);
|
2011-09-08 16:06:10 +08:00
|
|
|
int (*cpu_disable)(unsigned int cpu);
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2013-10-31 09:21:09 +08:00
|
|
|
struct of_cpu_method {
|
|
|
|
const char *method;
|
2015-08-26 14:49:12 +08:00
|
|
|
const struct smp_operations *ops;
|
2013-10-31 09:21:09 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
|
|
|
|
static const struct of_cpu_method __cpu_method_of_table_##name \
|
2020-10-22 10:36:07 +08:00
|
|
|
__used __section("__cpu_method_of_table") \
|
2013-10-31 09:21:09 +08:00
|
|
|
= { .method = _method, .ops = _ops }
|
2011-09-08 16:06:10 +08:00
|
|
|
/*
|
|
|
|
* set platform specific SMP operations
|
|
|
|
*/
|
2015-08-26 14:49:11 +08:00
|
|
|
extern void smp_set_ops(const struct smp_operations *);
|
2011-09-08 16:06:10 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* ifndef __ASM_ARM_SMP_H */
|