2008-12-11 16:15:01 +08:00
|
|
|
/*
|
2008-12-20 05:48:34 +08:00
|
|
|
* NUMA irq-desc migration code
|
2008-12-11 16:15:01 +08:00
|
|
|
*
|
2008-12-20 05:48:34 +08:00
|
|
|
* Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
|
|
|
|
* the new "home node" of the IRQ.
|
2008-12-11 16:15:01 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
|
|
|
|
#include "internals.h"
|
|
|
|
|
|
|
|
static void init_copy_kstat_irqs(struct irq_desc *old_desc,
|
|
|
|
struct irq_desc *desc,
|
2009-04-28 09:00:38 +08:00
|
|
|
int node, int nr)
|
2008-12-11 16:15:01 +08:00
|
|
|
{
|
2009-04-28 09:00:38 +08:00
|
|
|
init_kstat_irqs(desc, node, nr);
|
2008-12-11 16:15:01 +08:00
|
|
|
|
2009-02-09 08:18:03 +08:00
|
|
|
if (desc->kstat_irqs != old_desc->kstat_irqs)
|
|
|
|
memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
|
|
|
|
nr * sizeof(*desc->kstat_irqs));
|
2008-12-11 16:15:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
if (old_desc->kstat_irqs == desc->kstat_irqs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
kfree(old_desc->kstat_irqs);
|
|
|
|
old_desc->kstat_irqs = NULL;
|
|
|
|
}
|
|
|
|
|
2009-01-11 13:58:09 +08:00
|
|
|
static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
|
2009-04-28 09:00:38 +08:00
|
|
|
struct irq_desc *desc, int node)
|
2008-12-11 16:15:01 +08:00
|
|
|
{
|
|
|
|
memcpy(desc, old_desc, sizeof(struct irq_desc));
|
2009-04-28 09:00:38 +08:00
|
|
|
if (!alloc_desc_masks(desc, node, false)) {
|
2009-01-11 13:58:09 +08:00
|
|
|
printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
|
|
|
|
"for migration.\n", irq);
|
|
|
|
return false;
|
|
|
|
}
|
2009-11-17 23:46:45 +08:00
|
|
|
raw_spin_lock_init(&desc->lock);
|
2009-04-28 09:00:38 +08:00
|
|
|
desc->node = node;
|
2008-12-11 16:15:01 +08:00
|
|
|
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
2009-04-28 09:00:38 +08:00
|
|
|
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
|
2009-01-11 13:58:08 +08:00
|
|
|
init_copy_desc_masks(old_desc, desc);
|
2009-04-28 09:00:38 +08:00
|
|
|
arch_init_copy_chip_data(old_desc, desc, node);
|
2009-01-11 13:58:09 +08:00
|
|
|
return true;
|
2008-12-11 16:15:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
free_kstat_irqs(old_desc, desc);
|
2009-03-31 11:37:20 +08:00
|
|
|
free_desc_masks(old_desc, desc);
|
2008-12-11 16:15:01 +08:00
|
|
|
arch_free_chip_data(old_desc, desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
2009-04-28 09:00:38 +08:00
|
|
|
int node)
|
2008-12-11 16:15:01 +08:00
|
|
|
{
|
|
|
|
struct irq_desc *desc;
|
|
|
|
unsigned int irq;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
irq = old_desc->irq;
|
|
|
|
|
2009-11-17 23:46:45 +08:00
|
|
|
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
|
2008-12-11 16:15:01 +08:00
|
|
|
|
|
|
|
/* We have to check it to avoid races with another CPU */
|
2010-02-10 17:20:34 +08:00
|
|
|
desc = irq_to_desc(irq);
|
2008-12-11 16:15:01 +08:00
|
|
|
|
|
|
|
if (desc && old_desc != desc)
|
2009-02-01 06:50:07 +08:00
|
|
|
goto out_unlock;
|
2008-12-11 16:15:01 +08:00
|
|
|
|
|
|
|
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
|
|
|
|
if (!desc) {
|
2009-01-11 13:58:08 +08:00
|
|
|
printk(KERN_ERR "irq %d: can not get new irq_desc "
|
|
|
|
"for migration.\n", irq);
|
2008-12-11 16:15:01 +08:00
|
|
|
/* still use old one */
|
|
|
|
desc = old_desc;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2009-04-28 09:00:38 +08:00
|
|
|
if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) {
|
2008-12-11 16:15:01 +08:00
|
|
|
/* still use old one */
|
2009-01-11 13:58:08 +08:00
|
|
|
kfree(desc);
|
2008-12-11 16:15:01 +08:00
|
|
|
desc = old_desc;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2010-02-10 17:20:34 +08:00
|
|
|
replace_irq_desc(irq, desc);
|
2009-11-17 23:46:45 +08:00
|
|
|
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
2008-12-11 16:15:01 +08:00
|
|
|
|
|
|
|
/* free the old one */
|
|
|
|
free_one_irq_desc(old_desc, desc);
|
|
|
|
kfree(old_desc);
|
2009-02-01 06:50:07 +08:00
|
|
|
|
|
|
|
return desc;
|
2008-12-11 16:15:01 +08:00
|
|
|
|
|
|
|
out_unlock:
|
2009-11-17 23:46:45 +08:00
|
|
|
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
2008-12-11 16:15:01 +08:00
|
|
|
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
|
2009-04-28 09:00:38 +08:00
|
|
|
struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
|
2008-12-11 16:15:01 +08:00
|
|
|
{
|
2009-08-05 00:01:33 +08:00
|
|
|
/* those static or target node is -1, do not move them */
|
|
|
|
if (desc->irq < NR_IRQS_LEGACY || node == -1)
|
2008-12-11 16:15:01 +08:00
|
|
|
return desc;
|
|
|
|
|
2009-04-28 09:00:38 +08:00
|
|
|
if (desc->node != node)
|
|
|
|
desc = __real_move_irq_desc(desc, node);
|
2008-12-11 16:15:01 +08:00
|
|
|
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
|