2009-01-10 04:27:08 +08:00
|
|
|
/*
|
|
|
|
* Uniprocessor-only support functions. The counterpart to kernel/smp.c
|
|
|
|
*/
|
|
|
|
|
2009-01-12 23:04:37 +08:00
|
|
|
#include <linux/interrupt.h>
|
2009-01-10 04:27:08 +08:00
|
|
|
#include <linux/kernel.h>
|
2011-05-24 02:51:41 +08:00
|
|
|
#include <linux/export.h>
|
2009-01-10 04:27:08 +08:00
|
|
|
#include <linux/smp.h>
|
|
|
|
|
|
|
|
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
|
|
int wait)
|
|
|
|
{
|
2013-09-12 05:23:25 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2009-01-11 12:15:21 +08:00
|
|
|
WARN_ON(cpu != 0);
|
|
|
|
|
2013-09-12 05:23:25 +08:00
|
|
|
local_irq_save(flags);
|
|
|
|
func(info);
|
|
|
|
local_irq_restore(flags);
|
2009-01-11 12:15:21 +08:00
|
|
|
|
2009-01-10 04:27:08 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(smp_call_function_single);
|
2013-09-12 05:23:24 +08:00
|
|
|
|
2013-11-15 06:32:08 +08:00
|
|
|
void __smp_call_function_single(int cpu, struct call_single_data *csd,
|
|
|
|
int wait)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
csd->func(csd->info);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__smp_call_function_single);
|
|
|
|
|
2013-09-12 05:23:26 +08:00
|
|
|
int on_each_cpu(smp_call_func_t func, void *info, int wait)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
func(info);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(on_each_cpu);
|
|
|
|
|
2013-09-12 05:23:24 +08:00
|
|
|
/*
|
|
|
|
* Note we still need to test the mask even for UP
|
|
|
|
* because we actually can get an empty mask from
|
|
|
|
* code that on SMP might call us without the local
|
|
|
|
* CPU in the mask.
|
|
|
|
*/
|
|
|
|
void on_each_cpu_mask(const struct cpumask *mask,
|
|
|
|
smp_call_func_t func, void *info, bool wait)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (cpumask_test_cpu(0, mask)) {
|
|
|
|
local_irq_save(flags);
|
|
|
|
func(info);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(on_each_cpu_mask);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Preemption is disabled here to make sure the cond_func is called under the
|
|
|
|
* same condtions in UP and SMP.
|
|
|
|
*/
|
|
|
|
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|
|
|
smp_call_func_t func, void *info, bool wait,
|
|
|
|
gfp_t gfp_flags)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
if (cond_func(0, info)) {
|
|
|
|
local_irq_save(flags);
|
|
|
|
func(info);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(on_each_cpu_cond);
|