2012-04-20 21:05:44 +08:00
|
|
|
/*
|
|
|
|
* Common SMP CPU bringup/teardown functions
|
|
|
|
*/
|
2012-07-16 18:42:36 +08:00
|
|
|
#include <linux/cpu.h>
|
2012-04-20 21:05:45 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/smp.h>
|
2012-04-20 21:05:44 +08:00
|
|
|
#include <linux/init.h>
|
2012-07-16 18:42:36 +08:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/slab.h>
|
2012-04-20 21:05:45 +08:00
|
|
|
#include <linux/sched.h>
|
2012-07-16 18:42:36 +08:00
|
|
|
#include <linux/export.h>
|
2012-04-20 21:05:45 +08:00
|
|
|
#include <linux/percpu.h>
|
2012-07-16 18:42:36 +08:00
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/smpboot.h>
|
2012-04-20 21:05:44 +08:00
|
|
|
|
|
|
|
#include "smpboot.h"
|
|
|
|
|
2012-07-12 16:55:54 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
2012-04-20 21:05:45 +08:00
|
|
|
#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
|
|
|
|
/*
|
|
|
|
* For the hotplug case we keep the task structs around and reuse
|
|
|
|
* them.
|
|
|
|
*/
|
|
|
|
static DEFINE_PER_CPU(struct task_struct *, idle_threads);
|
|
|
|
|
2012-04-21 08:08:50 +08:00
|
|
|
struct task_struct * __cpuinit idle_thread_get(unsigned int cpu)
|
2012-04-20 21:05:45 +08:00
|
|
|
{
|
|
|
|
struct task_struct *tsk = per_cpu(idle_threads, cpu);
|
|
|
|
|
|
|
|
if (!tsk)
|
2012-04-21 08:08:50 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2012-04-20 21:05:45 +08:00
|
|
|
init_idle(tsk, cpu);
|
|
|
|
return tsk;
|
|
|
|
}
|
|
|
|
|
2012-04-21 08:08:50 +08:00
|
|
|
void __init idle_thread_set_boot_cpu(void)
|
2012-04-20 21:05:45 +08:00
|
|
|
{
|
2012-04-21 08:08:50 +08:00
|
|
|
per_cpu(idle_threads, smp_processor_id()) = current;
|
2012-04-20 21:05:45 +08:00
|
|
|
}
|
|
|
|
|
2012-05-24 23:11:00 +08:00
|
|
|
/**
|
|
|
|
* idle_init - Initialize the idle thread for a cpu
|
|
|
|
* @cpu: The cpu for which the idle thread should be initialized
|
|
|
|
*
|
|
|
|
* Creates the thread if it does not exist.
|
|
|
|
*/
|
2012-04-21 08:08:50 +08:00
|
|
|
static inline void idle_init(unsigned int cpu)
|
2012-04-20 21:05:45 +08:00
|
|
|
{
|
2012-04-21 08:08:50 +08:00
|
|
|
struct task_struct *tsk = per_cpu(idle_threads, cpu);
|
|
|
|
|
|
|
|
if (!tsk) {
|
|
|
|
tsk = fork_idle(cpu);
|
|
|
|
if (IS_ERR(tsk))
|
|
|
|
pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
|
|
|
|
else
|
|
|
|
per_cpu(idle_threads, cpu) = tsk;
|
|
|
|
}
|
2012-04-20 21:05:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-05-24 23:11:00 +08:00
|
|
|
* idle_threads_init - Initialize idle threads for all cpus
|
2012-04-20 21:05:45 +08:00
|
|
|
*/
|
2012-04-21 08:08:50 +08:00
|
|
|
void __init idle_threads_init(void)
|
2012-04-20 21:05:45 +08:00
|
|
|
{
|
2012-05-24 23:10:55 +08:00
|
|
|
unsigned int cpu, boot_cpu;
|
|
|
|
|
|
|
|
boot_cpu = smp_processor_id();
|
2012-04-20 21:05:45 +08:00
|
|
|
|
2012-04-21 08:08:50 +08:00
|
|
|
for_each_possible_cpu(cpu) {
|
2012-05-24 23:10:55 +08:00
|
|
|
if (cpu != boot_cpu)
|
2012-04-21 08:08:50 +08:00
|
|
|
idle_init(cpu);
|
2012-04-20 21:05:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2012-07-16 18:42:36 +08:00
|
|
|
|
2012-07-12 16:55:54 +08:00
|
|
|
#endif /* #ifdef CONFIG_SMP */
|
|
|
|
|
2012-07-16 18:42:36 +08:00
|
|
|
static LIST_HEAD(hotplug_threads);
|
|
|
|
static DEFINE_MUTEX(smpboot_threads_lock);
|
|
|
|
|
|
|
|
struct smpboot_thread_data {
|
|
|
|
unsigned int cpu;
|
|
|
|
unsigned int status;
|
|
|
|
struct smp_hotplug_thread *ht;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
HP_THREAD_NONE = 0,
|
|
|
|
HP_THREAD_ACTIVE,
|
|
|
|
HP_THREAD_PARKED,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* smpboot_thread_fn - percpu hotplug thread loop function
|
|
|
|
* @data: thread data pointer
|
|
|
|
*
|
|
|
|
* Checks for thread stop and park conditions. Calls the necessary
|
|
|
|
* setup, cleanup, park and unpark functions for the registered
|
|
|
|
* thread.
|
|
|
|
*
|
|
|
|
* Returns 1 when the thread should exit, 0 otherwise.
|
|
|
|
*/
|
|
|
|
static int smpboot_thread_fn(void *data)
|
|
|
|
{
|
|
|
|
struct smpboot_thread_data *td = data;
|
|
|
|
struct smp_hotplug_thread *ht = td->ht;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
preempt_disable();
|
|
|
|
if (kthread_should_stop()) {
|
|
|
|
set_current_state(TASK_RUNNING);
|
|
|
|
preempt_enable();
|
|
|
|
if (ht->cleanup)
|
|
|
|
ht->cleanup(td->cpu, cpu_online(td->cpu));
|
|
|
|
kfree(td);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kthread_should_park()) {
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
preempt_enable();
|
|
|
|
if (ht->park && td->status == HP_THREAD_ACTIVE) {
|
|
|
|
BUG_ON(td->cpu != smp_processor_id());
|
|
|
|
ht->park(td->cpu);
|
|
|
|
td->status = HP_THREAD_PARKED;
|
|
|
|
}
|
|
|
|
kthread_parkme();
|
|
|
|
/* We might have been woken for stop */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
//BUG_ON(td->cpu != smp_processor_id());
|
2012-07-16 18:42:36 +08:00
|
|
|
|
|
|
|
/* Check for state change setup */
|
|
|
|
switch (td->status) {
|
|
|
|
case HP_THREAD_NONE:
|
|
|
|
preempt_enable();
|
|
|
|
if (ht->setup)
|
|
|
|
ht->setup(td->cpu);
|
|
|
|
td->status = HP_THREAD_ACTIVE;
|
|
|
|
preempt_disable();
|
|
|
|
break;
|
|
|
|
case HP_THREAD_PARKED:
|
|
|
|
preempt_enable();
|
|
|
|
if (ht->unpark)
|
|
|
|
ht->unpark(td->cpu);
|
|
|
|
td->status = HP_THREAD_ACTIVE;
|
|
|
|
preempt_disable();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ht->thread_should_run(td->cpu)) {
|
|
|
|
preempt_enable();
|
|
|
|
schedule();
|
|
|
|
} else {
|
|
|
|
set_current_state(TASK_RUNNING);
|
|
|
|
preempt_enable();
|
|
|
|
ht->thread_fn(td->cpu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
|
|
|
|
struct smpboot_thread_data *td;
|
|
|
|
|
|
|
|
if (tsk)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
|
|
|
|
if (!td)
|
|
|
|
return -ENOMEM;
|
|
|
|
td->cpu = cpu;
|
|
|
|
td->ht = ht;
|
|
|
|
|
|
|
|
tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
|
|
|
|
ht->thread_comm);
|
|
|
|
if (IS_ERR(tsk)) {
|
|
|
|
kfree(td);
|
|
|
|
return PTR_ERR(tsk);
|
|
|
|
}
|
|
|
|
get_task_struct(tsk);
|
|
|
|
*per_cpu_ptr(ht->store, cpu) = tsk;
|
2013-01-31 20:11:12 +08:00
|
|
|
if (ht->create)
|
|
|
|
ht->create(cpu);
|
2012-07-16 18:42:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int smpboot_create_threads(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct smp_hotplug_thread *cur;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&smpboot_threads_lock);
|
|
|
|
list_for_each_entry(cur, &hotplug_threads, list) {
|
|
|
|
ret = __smpboot_create_thread(cur, cpu);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mutex_unlock(&smpboot_threads_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
|
|
|
|
|
|
|
|
kthread_unpark(tsk);
|
|
|
|
}
|
|
|
|
|
|
|
|
void smpboot_unpark_threads(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct smp_hotplug_thread *cur;
|
|
|
|
|
|
|
|
mutex_lock(&smpboot_threads_lock);
|
|
|
|
list_for_each_entry(cur, &hotplug_threads, list)
|
|
|
|
smpboot_unpark_thread(cur, cpu);
|
|
|
|
mutex_unlock(&smpboot_threads_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
|
|
|
|
|
2013-01-31 20:11:12 +08:00
|
|
|
if (tsk && !ht->selfparking)
|
2012-07-16 18:42:36 +08:00
|
|
|
kthread_park(tsk);
|
|
|
|
}
|
|
|
|
|
|
|
|
void smpboot_park_threads(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct smp_hotplug_thread *cur;
|
|
|
|
|
|
|
|
mutex_lock(&smpboot_threads_lock);
|
|
|
|
list_for_each_entry_reverse(cur, &hotplug_threads, list)
|
|
|
|
smpboot_park_thread(cur, cpu);
|
|
|
|
mutex_unlock(&smpboot_threads_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
|
|
|
|
{
|
|
|
|
unsigned int cpu;
|
|
|
|
|
|
|
|
/* We need to destroy also the parked threads of offline cpus */
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
|
|
|
|
|
|
|
|
if (tsk) {
|
|
|
|
kthread_stop(tsk);
|
|
|
|
put_task_struct(tsk);
|
|
|
|
*per_cpu_ptr(ht->store, cpu) = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
|
|
|
|
* @plug_thread: Hotplug thread descriptor
|
|
|
|
*
|
|
|
|
* Creates and starts the threads on all online cpus.
|
|
|
|
*/
|
|
|
|
int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
|
|
|
{
|
|
|
|
unsigned int cpu;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&smpboot_threads_lock);
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
ret = __smpboot_create_thread(plug_thread, cpu);
|
|
|
|
if (ret) {
|
|
|
|
smpboot_destroy_threads(plug_thread);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
smpboot_unpark_thread(plug_thread, cpu);
|
|
|
|
}
|
|
|
|
list_add(&plug_thread->list, &hotplug_threads);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&smpboot_threads_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
|
|
|
|
* @plug_thread: Hotplug thread descriptor
|
|
|
|
*
|
|
|
|
* Stops all threads on all possible cpus.
|
|
|
|
*/
|
|
|
|
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
|
|
|
{
|
|
|
|
get_online_cpus();
|
|
|
|
mutex_lock(&smpboot_threads_lock);
|
|
|
|
list_del(&plug_thread->list);
|
|
|
|
smpboot_destroy_threads(plug_thread);
|
|
|
|
mutex_unlock(&smpboot_threads_lock);
|
|
|
|
put_online_cpus();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
|