2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* arch/sh/kernel/smp.c
|
|
|
|
*
|
|
|
|
* SMP support for the SuperH processors.
|
|
|
|
*
|
2010-03-30 11:38:01 +08:00
|
|
|
* Copyright (C) 2002 - 2010 Paul Mundt
|
2007-09-21 17:32:32 +08:00
|
|
|
* Copyright (C) 2006 - 2007 Akio Idehara
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2007-09-21 17:32:32 +08:00
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-05-31 12:46:21 +08:00
|
|
|
#include <linux/err.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/cache.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/spinlock.h>
|
2007-09-21 17:32:32 +08:00
|
|
|
#include <linux/mm.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/module.h>
|
2008-10-21 11:39:24 +08:00
|
|
|
#include <linux/cpu.h>
|
2007-09-21 17:32:32 +08:00
|
|
|
#include <linux/interrupt.h>
|
2011-04-05 23:23:39 +08:00
|
|
|
#include <linux/sched.h>
|
2011-07-27 07:09:06 +08:00
|
|
|
#include <linux/atomic.h>
|
2016-02-13 06:05:11 +08:00
|
|
|
#include <linux/clockchips.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/smp.h>
|
2007-09-21 17:32:32 +08:00
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/sections.h>
|
2012-03-30 18:29:57 +08:00
|
|
|
#include <asm/setup.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-09-21 17:32:32 +08:00
|
|
|
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
|
|
|
|
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-03-30 11:38:01 +08:00
|
|
|
struct plat_smp_ops *mp_ops = NULL;
|
|
|
|
|
2010-04-26 17:49:58 +08:00
|
|
|
/* State of each CPU */
|
|
|
|
DEFINE_PER_CPU(int, cpu_state) = { 0 };
|
|
|
|
|
2013-06-19 05:10:12 +08:00
|
|
|
void register_smp_ops(struct plat_smp_ops *ops)
|
2010-03-30 11:38:01 +08:00
|
|
|
{
|
|
|
|
if (mp_ops)
|
|
|
|
printk(KERN_WARNING "Overriding previously set SMP ops\n");
|
|
|
|
|
|
|
|
mp_ops = ops;
|
|
|
|
}
|
|
|
|
|
2013-06-19 05:10:12 +08:00
|
|
|
static inline void smp_store_cpu_info(unsigned int cpu)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-09-21 17:32:32 +08:00
|
|
|
struct sh_cpuinfo *c = cpu_data + cpu;
|
|
|
|
|
2009-10-14 13:14:30 +08:00
|
|
|
memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
|
|
|
|
|
2007-09-21 17:32:32 +08:00
|
|
|
c->loops_per_jiffy = loops_per_jiffy;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
|
{
|
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
|
2007-09-21 17:32:32 +08:00
|
|
|
init_new_context(current, &init_mm);
|
|
|
|
current_thread_info()->cpu = cpu;
|
2010-03-30 11:38:01 +08:00
|
|
|
mp_ops->prepare_cpus(max_cpus);
|
2007-09-21 17:32:32 +08:00
|
|
|
|
|
|
|
#ifndef CONFIG_HOTPLUG_CPU
|
2012-02-15 12:58:04 +08:00
|
|
|
init_cpu_present(cpu_possible_mask);
|
2007-09-21 17:32:32 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-04-26 17:55:01 +08:00
|
|
|
void __init smp_prepare_boot_cpu(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
|
2007-09-21 17:32:32 +08:00
|
|
|
__cpu_number_map[0] = cpu;
|
|
|
|
__cpu_logical_map[0] = cpu;
|
|
|
|
|
2009-06-12 21:03:14 +08:00
|
|
|
set_cpu_online(cpu, true);
|
|
|
|
set_cpu_possible(cpu, true);
|
2010-04-26 17:49:58 +08:00
|
|
|
|
|
|
|
per_cpu(cpu_state, cpu) = CPU_ONLINE;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-04-26 18:08:55 +08:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
void native_cpu_die(unsigned int cpu)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
smp_rmb();
|
|
|
|
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
|
|
|
|
if (system_state == SYSTEM_RUNNING)
|
|
|
|
pr_info("CPU %u is now offline\n", cpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
msleep(100);
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("CPU %u didn't die...\n", cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
int native_cpu_disable(unsigned int cpu)
|
|
|
|
{
|
|
|
|
return cpu == 0 ? -EPERM : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void play_dead_common(void)
|
|
|
|
{
|
|
|
|
idle_task_exit();
|
|
|
|
irq_ctx_exit(raw_smp_processor_id());
|
|
|
|
mb();
|
|
|
|
|
sh: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One
of them is address calculation via the form &__get_cpu_var(x). This
calculates the address for the instance of the percpu variable of the
current processor based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less
registers are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these
operations are used throughout then specialized macros can be defined in
non -x86 arches as well in order to optimize per cpu access by f.e. using
a global register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Signed-off-by: Christoph Lameter <cl@linux.com>
Tested-by: Geert Uytterhoeven <geert@linux-m68k.org> [compilation only]
Cc: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-05 07:05:51 +08:00
|
|
|
__this_cpu_write(cpu_state, CPU_DEAD);
|
2010-04-26 18:08:55 +08:00
|
|
|
local_irq_disable();
|
|
|
|
}
|
|
|
|
|
|
|
|
void native_play_dead(void)
|
|
|
|
{
|
|
|
|
play_dead_common();
|
|
|
|
}
|
|
|
|
|
|
|
|
int __cpu_disable(void)
|
|
|
|
{
|
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mp_ops->cpu_disable(cpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take this CPU offline. Once we clear this, we can't return,
|
|
|
|
* and we must not schedule until we're ready to give up the cpu.
|
|
|
|
*/
|
|
|
|
set_cpu_online(cpu, false);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OK - migrate IRQs away from this CPU
|
|
|
|
*/
|
|
|
|
migrate_irqs();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush user cache and TLB mappings, and then remove this CPU
|
|
|
|
* from the vm mask set of all processes.
|
|
|
|
*/
|
|
|
|
flush_cache_all();
|
2016-02-13 06:11:55 +08:00
|
|
|
#ifdef CONFIG_MMU
|
2010-04-26 18:08:55 +08:00
|
|
|
local_flush_tlb_all();
|
2016-02-13 06:11:55 +08:00
|
|
|
#endif
|
2010-04-26 18:08:55 +08:00
|
|
|
|
2012-06-01 07:26:23 +08:00
|
|
|
clear_tasks_mm_cpumask(cpu);
|
2010-04-26 18:08:55 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#else /* ... !CONFIG_HOTPLUG_CPU */
|
2010-04-28 19:11:30 +08:00
|
|
|
int native_cpu_disable(unsigned int cpu)
|
2010-04-26 18:08:55 +08:00
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void native_cpu_die(unsigned int cpu)
|
|
|
|
{
|
|
|
|
/* We said "no" in __cpu_disable */
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
void native_play_dead(void)
|
|
|
|
{
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-06-19 05:10:12 +08:00
|
|
|
asmlinkage void start_secondary(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-04-26 17:49:58 +08:00
|
|
|
unsigned int cpu = smp_processor_id();
|
2007-09-21 17:32:32 +08:00
|
|
|
struct mm_struct *mm = &init_mm;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-03-29 04:08:25 +08:00
|
|
|
enable_mmu();
|
2007-09-21 17:32:32 +08:00
|
|
|
atomic_inc(&mm->mm_count);
|
|
|
|
atomic_inc(&mm->mm_users);
|
|
|
|
current->active_mm = mm;
|
2016-02-13 06:11:55 +08:00
|
|
|
#ifdef CONFIG_MMU
|
2007-09-21 17:32:32 +08:00
|
|
|
enter_lazy_tlb(mm, current);
|
2010-04-26 18:08:55 +08:00
|
|
|
local_flush_tlb_all();
|
2016-02-13 06:11:55 +08:00
|
|
|
#endif
|
2007-09-21 17:32:32 +08:00
|
|
|
|
|
|
|
per_cpu_trap_init();
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
2010-04-26 17:49:58 +08:00
|
|
|
notify_cpu_starting(cpu);
|
2008-09-07 22:57:22 +08:00
|
|
|
|
2007-09-21 17:32:32 +08:00
|
|
|
local_irq_enable();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-09-21 17:32:32 +08:00
|
|
|
calibrate_delay();
|
|
|
|
|
|
|
|
smp_store_cpu_info(cpu);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-04-26 17:39:50 +08:00
|
|
|
set_cpu_online(cpu, true);
|
2010-04-26 17:49:58 +08:00
|
|
|
per_cpu(cpu_state, cpu) = CPU_ONLINE;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-02-27 02:43:40 +08:00
|
|
|
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-09-21 17:32:32 +08:00
|
|
|
extern struct {
|
|
|
|
unsigned long sp;
|
|
|
|
unsigned long bss_start;
|
|
|
|
unsigned long bss_end;
|
|
|
|
void *start_kernel_fn;
|
|
|
|
void *cpu_init_fn;
|
|
|
|
void *thread_info;
|
|
|
|
} stack_start;
|
|
|
|
|
2013-06-19 05:10:12 +08:00
|
|
|
int __cpu_up(unsigned int cpu, struct task_struct *tsk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-09-21 17:32:32 +08:00
|
|
|
unsigned long timeout;
|
2005-11-09 13:39:01 +08:00
|
|
|
|
2010-04-26 17:49:58 +08:00
|
|
|
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
|
|
|
|
2007-09-21 17:32:32 +08:00
|
|
|
/* Fill in data in head.S for secondary cpus */
|
|
|
|
stack_start.sp = tsk->thread.sp;
|
|
|
|
stack_start.thread_info = tsk->stack;
|
|
|
|
stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
|
|
|
|
stack_start.start_kernel_fn = start_secondary;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-10-14 10:51:28 +08:00
|
|
|
flush_icache_range((unsigned long)&stack_start,
|
|
|
|
(unsigned long)&stack_start + sizeof(stack_start));
|
|
|
|
wmb();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-03-30 11:38:01 +08:00
|
|
|
mp_ops->start_cpu(cpu, (unsigned long)_stext);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-09-21 17:32:32 +08:00
|
|
|
timeout = jiffies + HZ;
|
|
|
|
while (time_before(jiffies, timeout)) {
|
|
|
|
if (cpu_online(cpu))
|
|
|
|
break;
|
|
|
|
|
|
|
|
udelay(10);
|
2010-04-26 18:08:55 +08:00
|
|
|
barrier();
|
2007-09-21 17:32:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cpu_online(cpu))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return -ENOENT;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init smp_cpus_done(unsigned int max_cpus)
|
|
|
|
{
|
2007-09-21 17:32:32 +08:00
|
|
|
unsigned long bogosum = 0;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
|
bogosum += cpu_data[cpu].loops_per_jiffy;
|
|
|
|
|
|
|
|
printk(KERN_INFO "SMP: Total of %d processors activated "
|
|
|
|
"(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
|
|
|
|
bogosum / (500000/HZ),
|
|
|
|
(bogosum / (5000/HZ)) % 100);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void smp_send_reschedule(int cpu)
|
|
|
|
{
|
2010-03-30 11:38:01 +08:00
|
|
|
mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void smp_send_stop(void)
|
|
|
|
{
|
2008-06-06 17:18:06 +08:00
|
|
|
smp_call_function(stop_this_cpu, 0, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-06-12 21:02:35 +08:00
|
|
|
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-06-11 02:52:59 +08:00
|
|
|
int cpu;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-12 21:02:35 +08:00
|
|
|
for_each_cpu(cpu, mask)
|
2010-03-30 11:38:01 +08:00
|
|
|
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
|
2008-06-11 02:52:59 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-11 02:52:59 +08:00
|
|
|
void arch_send_call_function_single_ipi(int cpu)
|
|
|
|
{
|
2010-03-30 11:38:01 +08:00
|
|
|
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2016-02-13 06:05:11 +08:00
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
|
|
void tick_broadcast(const struct cpumask *mask)
|
2008-08-06 17:21:03 +08:00
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
2008-12-13 18:50:26 +08:00
|
|
|
for_each_cpu(cpu, mask)
|
2010-03-30 11:38:01 +08:00
|
|
|
mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
|
2008-08-06 17:21:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ipi_timer(void)
|
|
|
|
{
|
|
|
|
irq_enter();
|
2016-02-13 06:05:11 +08:00
|
|
|
tick_receive_broadcast();
|
2008-08-06 17:21:03 +08:00
|
|
|
irq_exit();
|
|
|
|
}
|
2016-02-13 06:05:11 +08:00
|
|
|
#endif
|
2008-08-06 17:21:03 +08:00
|
|
|
|
2008-08-06 17:02:48 +08:00
|
|
|
void smp_message_recv(unsigned int msg)
|
|
|
|
{
|
|
|
|
switch (msg) {
|
|
|
|
case SMP_MSG_FUNCTION:
|
|
|
|
generic_smp_call_function_interrupt();
|
|
|
|
break;
|
|
|
|
case SMP_MSG_RESCHEDULE:
|
2011-04-05 23:23:39 +08:00
|
|
|
scheduler_ipi();
|
2008-08-06 17:02:48 +08:00
|
|
|
break;
|
|
|
|
case SMP_MSG_FUNCTION_SINGLE:
|
|
|
|
generic_smp_call_function_single_interrupt();
|
|
|
|
break;
|
2016-02-13 06:05:11 +08:00
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
2008-08-06 17:21:03 +08:00
|
|
|
case SMP_MSG_TIMER:
|
|
|
|
ipi_timer();
|
|
|
|
break;
|
2016-02-13 06:05:11 +08:00
|
|
|
#endif
|
2008-08-06 17:02:48 +08:00
|
|
|
default:
|
|
|
|
printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
|
|
|
|
smp_processor_id(), __func__, msg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Not really SMP stuff ... */
|
|
|
|
int setup_profiling_timer(unsigned int multiplier)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-13 06:11:55 +08:00
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
|
2007-09-21 17:09:55 +08:00
|
|
|
static void flush_tlb_all_ipi(void *info)
|
|
|
|
{
|
|
|
|
local_flush_tlb_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_all(void)
|
|
|
|
{
|
2008-05-09 15:39:44 +08:00
|
|
|
on_each_cpu(flush_tlb_all_ipi, 0, 1);
|
2007-09-21 17:09:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void flush_tlb_mm_ipi(void *mm)
|
|
|
|
{
|
|
|
|
local_flush_tlb_mm((struct mm_struct *)mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following tlb flush calls are invoked when old translations are
|
|
|
|
* being torn down, or pte attributes are changing. For single threaded
|
|
|
|
* address spaces, a new context is obtained on the current cpu, and tlb
|
|
|
|
* context on other cpus are invalidated to force a new context allocation
|
|
|
|
* at switch_mm time, should the mm ever be used on other cpus. For
|
|
|
|
* multithreaded address spaces, intercpu interrupts have to be sent.
|
|
|
|
* Another case where intercpu interrupts are required is when the target
|
|
|
|
* mm might be active on another cpu (eg debuggers doing the flushes on
|
|
|
|
* behalf of debugees, kswapd stealing pages from another process etc).
|
|
|
|
* Kanoj 07/00.
|
|
|
|
*/
|
|
|
|
void flush_tlb_mm(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
|
2008-06-06 17:18:06 +08:00
|
|
|
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
|
2007-09-21 17:09:55 +08:00
|
|
|
} else {
|
|
|
|
int i;
|
2015-03-05 08:19:19 +08:00
|
|
|
for_each_online_cpu(i)
|
2007-09-21 17:09:55 +08:00
|
|
|
if (smp_processor_id() != i)
|
|
|
|
cpu_context(i, mm) = 0;
|
|
|
|
}
|
|
|
|
local_flush_tlb_mm(mm);
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
struct flush_tlb_data {
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
unsigned long addr1;
|
|
|
|
unsigned long addr2;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void flush_tlb_range_ipi(void *info)
|
|
|
|
{
|
|
|
|
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
|
|
|
|
|
|
|
|
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_range(struct vm_area_struct *vma,
|
|
|
|
unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
|
|
|
|
struct flush_tlb_data fd;
|
|
|
|
|
|
|
|
fd.vma = vma;
|
|
|
|
fd.addr1 = start;
|
|
|
|
fd.addr2 = end;
|
2008-06-06 17:18:06 +08:00
|
|
|
smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
|
2007-09-21 17:09:55 +08:00
|
|
|
} else {
|
|
|
|
int i;
|
2015-03-05 08:19:19 +08:00
|
|
|
for_each_online_cpu(i)
|
2007-09-21 17:09:55 +08:00
|
|
|
if (smp_processor_id() != i)
|
|
|
|
cpu_context(i, mm) = 0;
|
|
|
|
}
|
|
|
|
local_flush_tlb_range(vma, start, end);
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void flush_tlb_kernel_range_ipi(void *info)
|
|
|
|
{
|
|
|
|
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
|
|
|
|
|
|
|
|
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
struct flush_tlb_data fd;
|
|
|
|
|
|
|
|
fd.addr1 = start;
|
|
|
|
fd.addr2 = end;
|
2008-05-09 15:39:44 +08:00
|
|
|
on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
|
2007-09-21 17:09:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void flush_tlb_page_ipi(void *info)
|
|
|
|
{
|
|
|
|
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
|
|
|
|
|
|
|
|
local_flush_tlb_page(fd->vma, fd->addr1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
|
|
|
{
|
|
|
|
preempt_disable();
|
|
|
|
if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
|
|
|
|
(current->mm != vma->vm_mm)) {
|
|
|
|
struct flush_tlb_data fd;
|
|
|
|
|
|
|
|
fd.vma = vma;
|
|
|
|
fd.addr1 = page;
|
2008-06-06 17:18:06 +08:00
|
|
|
smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
|
2007-09-21 17:09:55 +08:00
|
|
|
} else {
|
|
|
|
int i;
|
2015-03-05 08:19:19 +08:00
|
|
|
for_each_online_cpu(i)
|
2007-09-21 17:09:55 +08:00
|
|
|
if (smp_processor_id() != i)
|
|
|
|
cpu_context(i, vma->vm_mm) = 0;
|
|
|
|
}
|
|
|
|
local_flush_tlb_page(vma, page);
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void flush_tlb_one_ipi(void *info)
|
|
|
|
{
|
|
|
|
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
|
|
|
|
local_flush_tlb_one(fd->addr1, fd->addr2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_one(unsigned long asid, unsigned long vaddr)
|
|
|
|
{
|
|
|
|
struct flush_tlb_data fd;
|
|
|
|
|
|
|
|
fd.addr1 = asid;
|
|
|
|
fd.addr2 = vaddr;
|
|
|
|
|
2008-06-06 17:18:06 +08:00
|
|
|
smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
|
2007-09-21 17:09:55 +08:00
|
|
|
local_flush_tlb_one(asid, vaddr);
|
|
|
|
}
|
2016-02-13 06:11:55 +08:00
|
|
|
|
|
|
|
#endif
|