Pull vector-domain into release branch
This commit is contained in:
commit
f4fbfb0dda
|
@ -1885,6 +1885,9 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
vdso=1: enable VDSO (default)
|
||||
vdso=0: disable VDSO mapping
|
||||
|
||||
vector= [IA-64,SMP]
|
||||
vector=percpu: enable percpu vector domain
|
||||
|
||||
video= [FB] Frame buffer configuration
|
||||
See Documentation/fb/modedb.txt.
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq)
|
|||
#ifdef CONFIG_IA64_GENERIC
|
||||
unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
|
||||
{
|
||||
return (unsigned int) vec;
|
||||
return __get_cpu_var(vector_irq)[vec];
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -46,6 +46,12 @@
|
|||
|
||||
#define IRQ_DEBUG 0
|
||||
|
||||
#define IRQ_VECTOR_UNASSIGNED (0)
|
||||
|
||||
#define IRQ_UNUSED (0)
|
||||
#define IRQ_USED (1)
|
||||
#define IRQ_RSVD (2)
|
||||
|
||||
/* These can be overridden in platform_irq_init */
|
||||
int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
|
||||
int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
|
||||
|
@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
|
|||
void __iomem *ipi_base_addr = ((void __iomem *)
|
||||
(__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu);
|
||||
|
||||
/*
|
||||
* Legacy IRQ to IA-64 vector translation table.
|
||||
*/
|
||||
|
@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = {
|
|||
};
|
||||
EXPORT_SYMBOL(isa_irq_to_vector_map);
|
||||
|
||||
static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
|
||||
DEFINE_SPINLOCK(vector_lock);
|
||||
|
||||
struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
|
||||
[0 ... NR_IRQS - 1] = {
|
||||
.vector = IRQ_VECTOR_UNASSIGNED,
|
||||
.domain = CPU_MASK_NONE
|
||||
}
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
|
||||
[0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
|
||||
};
|
||||
|
||||
static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
|
||||
[0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
|
||||
};
|
||||
|
||||
static int irq_status[NR_IRQS] = {
|
||||
[0 ... NR_IRQS -1] = IRQ_UNUSED
|
||||
};
|
||||
|
||||
int check_irq_used(int irq)
|
||||
{
|
||||
if (irq_status[irq] == IRQ_USED)
|
||||
return 1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void reserve_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
irq_status[irq] = IRQ_RSVD;
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
}
|
||||
|
||||
static inline int find_unassigned_irq(void)
|
||||
{
|
||||
int irq;
|
||||
|
||||
for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
|
||||
if (irq_status[irq] == IRQ_UNUSED)
|
||||
return irq;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static inline int find_unassigned_vector(cpumask_t domain)
|
||||
{
|
||||
cpumask_t mask;
|
||||
int pos;
|
||||
|
||||
cpus_and(mask, domain, cpu_online_map);
|
||||
if (cpus_empty(mask))
|
||||
return -EINVAL;
|
||||
|
||||
for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
|
||||
cpus_and(mask, domain, vector_table[pos]);
|
||||
if (!cpus_empty(mask))
|
||||
continue;
|
||||
return IA64_FIRST_DEVICE_VECTOR + pos;
|
||||
}
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
|
||||
{
|
||||
cpumask_t mask;
|
||||
int cpu, pos;
|
||||
struct irq_cfg *cfg = &irq_cfg[irq];
|
||||
|
||||
cpus_and(mask, domain, cpu_online_map);
|
||||
if (cpus_empty(mask))
|
||||
return -EINVAL;
|
||||
if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
|
||||
return 0;
|
||||
if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
|
||||
return -EBUSY;
|
||||
for_each_cpu_mask(cpu, mask)
|
||||
per_cpu(vector_irq, cpu)[vector] = irq;
|
||||
cfg->vector = vector;
|
||||
cfg->domain = domain;
|
||||
irq_status[irq] = IRQ_USED;
|
||||
pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
||||
cpus_or(vector_table[pos], vector_table[pos], domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bind_irq_vector(int irq, int vector, cpumask_t domain)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
ret = __bind_irq_vector(irq, vector, domain);
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __clear_irq_vector(int irq)
|
||||
{
|
||||
int vector, cpu, pos;
|
||||
cpumask_t mask;
|
||||
cpumask_t domain;
|
||||
struct irq_cfg *cfg = &irq_cfg[irq];
|
||||
|
||||
BUG_ON((unsigned)irq >= NR_IRQS);
|
||||
BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
|
||||
vector = cfg->vector;
|
||||
domain = cfg->domain;
|
||||
cpus_and(mask, cfg->domain, cpu_online_map);
|
||||
for_each_cpu_mask(cpu, mask)
|
||||
per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
|
||||
cfg->vector = IRQ_VECTOR_UNASSIGNED;
|
||||
cfg->domain = CPU_MASK_NONE;
|
||||
irq_status[irq] = IRQ_UNUSED;
|
||||
pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
||||
cpus_andnot(vector_table[pos], vector_table[pos], domain);
|
||||
}
|
||||
|
||||
static void clear_irq_vector(int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
__clear_irq_vector(irq);
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
}
|
||||
|
||||
int
|
||||
assign_irq_vector (int irq)
|
||||
{
|
||||
int pos, vector;
|
||||
again:
|
||||
pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
|
||||
vector = IA64_FIRST_DEVICE_VECTOR + pos;
|
||||
if (vector > IA64_LAST_DEVICE_VECTOR)
|
||||
return -ENOSPC;
|
||||
if (test_and_set_bit(pos, ia64_vector_mask))
|
||||
goto again;
|
||||
unsigned long flags;
|
||||
int vector, cpu;
|
||||
cpumask_t domain;
|
||||
|
||||
vector = -ENOSPC;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
if (irq < 0) {
|
||||
goto out;
|
||||
}
|
||||
for_each_online_cpu(cpu) {
|
||||
domain = vector_allocation_domain(cpu);
|
||||
vector = find_unassigned_vector(domain);
|
||||
if (vector >= 0)
|
||||
break;
|
||||
}
|
||||
if (vector < 0)
|
||||
goto out;
|
||||
BUG_ON(__bind_irq_vector(irq, vector, domain));
|
||||
out:
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
return vector;
|
||||
}
|
||||
|
||||
void
|
||||
free_irq_vector (int vector)
|
||||
{
|
||||
int pos;
|
||||
|
||||
if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
|
||||
if (vector < IA64_FIRST_DEVICE_VECTOR ||
|
||||
vector > IA64_LAST_DEVICE_VECTOR)
|
||||
return;
|
||||
|
||||
pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
||||
if (!test_and_clear_bit(pos, ia64_vector_mask))
|
||||
printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
|
||||
clear_irq_vector(vector);
|
||||
}
|
||||
|
||||
int
|
||||
reserve_irq_vector (int vector)
|
||||
{
|
||||
int pos;
|
||||
|
||||
if (vector < IA64_FIRST_DEVICE_VECTOR ||
|
||||
vector > IA64_LAST_DEVICE_VECTOR)
|
||||
return -EINVAL;
|
||||
return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
|
||||
}
|
||||
|
||||
pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
||||
return test_and_set_bit(pos, ia64_vector_mask);
|
||||
/*
|
||||
* Initialize vector_irq on a new cpu. This function must be called
|
||||
* with vector_lock held.
|
||||
*/
|
||||
void __setup_vector_irq(int cpu)
|
||||
{
|
||||
int irq, vector;
|
||||
|
||||
/* Clear vector_irq */
|
||||
for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
|
||||
per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
|
||||
/* Mark the inuse vectors */
|
||||
for (irq = 0; irq < NR_IRQS; ++irq) {
|
||||
if (!cpu_isset(cpu, irq_cfg[irq].domain))
|
||||
continue;
|
||||
vector = irq_to_vector(irq);
|
||||
per_cpu(vector_irq, cpu)[vector] = irq;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
|
||||
static enum vector_domain_type {
|
||||
VECTOR_DOMAIN_NONE,
|
||||
VECTOR_DOMAIN_PERCPU
|
||||
} vector_domain_type = VECTOR_DOMAIN_NONE;
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
{
|
||||
if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
|
||||
return cpumask_of_cpu(cpu);
|
||||
return CPU_MASK_ALL;
|
||||
}
|
||||
|
||||
static int __init parse_vector_domain(char *arg)
|
||||
{
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
if (!strcmp(arg, "percpu")) {
|
||||
vector_domain_type = VECTOR_DOMAIN_PERCPU;
|
||||
no_int_routing = 1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
early_param("vector", parse_vector_domain);
|
||||
#else
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
{
|
||||
return CPU_MASK_ALL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void destroy_and_reserve_irq(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_cleanup(irq);
|
||||
|
||||
clear_irq_vector(irq);
|
||||
reserve_irq(irq);
|
||||
}
|
||||
|
||||
static int __reassign_irq_vector(int irq, int cpu)
|
||||
{
|
||||
struct irq_cfg *cfg = &irq_cfg[irq];
|
||||
int vector;
|
||||
cpumask_t domain;
|
||||
|
||||
if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
|
||||
return -EINVAL;
|
||||
if (cpu_isset(cpu, cfg->domain))
|
||||
return 0;
|
||||
domain = vector_allocation_domain(cpu);
|
||||
vector = find_unassigned_vector(domain);
|
||||
if (vector < 0)
|
||||
return -ENOSPC;
|
||||
__clear_irq_vector(irq);
|
||||
BUG_ON(__bind_irq_vector(irq, vector, domain));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int reassign_irq_vector(int irq, int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
ret = __reassign_irq_vector(irq, cpu);
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -111,18 +342,35 @@ reserve_irq_vector (int vector)
|
|||
*/
|
||||
int create_irq(void)
|
||||
{
|
||||
int vector = assign_irq_vector(AUTO_ASSIGN);
|
||||
unsigned long flags;
|
||||
int irq, vector, cpu;
|
||||
cpumask_t domain;
|
||||
|
||||
if (vector >= 0)
|
||||
dynamic_irq_init(vector);
|
||||
|
||||
return vector;
|
||||
irq = vector = -ENOSPC;
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
for_each_online_cpu(cpu) {
|
||||
domain = vector_allocation_domain(cpu);
|
||||
vector = find_unassigned_vector(domain);
|
||||
if (vector >= 0)
|
||||
break;
|
||||
}
|
||||
if (vector < 0)
|
||||
goto out;
|
||||
irq = find_unassigned_irq();
|
||||
if (irq < 0)
|
||||
goto out;
|
||||
BUG_ON(__bind_irq_vector(irq, vector, domain));
|
||||
out:
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
if (irq >= 0)
|
||||
dynamic_irq_init(irq);
|
||||
return irq;
|
||||
}
|
||||
|
||||
void destroy_irq(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_cleanup(irq);
|
||||
free_irq_vector(irq);
|
||||
clear_irq_vector(irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -301,14 +549,13 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
|
|||
irq_desc_t *desc;
|
||||
unsigned int irq;
|
||||
|
||||
for (irq = 0; irq < NR_IRQS; ++irq)
|
||||
if (irq_to_vector(irq) == vec) {
|
||||
desc = irq_desc + irq;
|
||||
desc->status |= IRQ_PER_CPU;
|
||||
desc->chip = &irq_type_ia64_lsapic;
|
||||
if (action)
|
||||
setup_irq(irq, action);
|
||||
}
|
||||
irq = vec;
|
||||
BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
|
||||
desc = irq_desc + irq;
|
||||
desc->status |= IRQ_PER_CPU;
|
||||
desc->chip = &irq_type_ia64_lsapic;
|
||||
if (action)
|
||||
setup_irq(irq, action);
|
||||
}
|
||||
|
||||
void __init
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
#define MSI_DATA_VECTOR_SHIFT 0
|
||||
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
|
||||
#define MSI_DATA_VECTOR_MASK 0xffffff00
|
||||
|
||||
#define MSI_DATA_DELIVERY_SHIFT 8
|
||||
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
|
||||
|
@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip;
|
|||
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
|
||||
{
|
||||
struct msi_msg msg;
|
||||
u32 addr;
|
||||
u32 addr, data;
|
||||
int cpu = first_cpu(cpu_mask);
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
|
||||
if (reassign_irq_vector(irq, cpu))
|
||||
return;
|
||||
|
||||
read_msi_msg(irq, &msg);
|
||||
|
||||
addr = msg.address_lo;
|
||||
addr &= MSI_ADDR_DESTID_MASK;
|
||||
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask)));
|
||||
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
|
||||
msg.address_lo = addr;
|
||||
|
||||
data = msg.data;
|
||||
data &= MSI_DATA_VECTOR_MASK;
|
||||
data |= MSI_DATA_VECTOR(irq_to_vector(irq));
|
||||
msg.data = data;
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = cpu_mask;
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
|
|||
struct msi_msg msg;
|
||||
unsigned long dest_phys_id;
|
||||
int irq, vector;
|
||||
cpumask_t mask;
|
||||
|
||||
irq = create_irq();
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
set_irq_msi(irq, desc);
|
||||
dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
|
||||
cpus_and(mask, irq_to_domain(irq), cpu_online_map);
|
||||
dest_phys_id = cpu_physical_id(first_cpu(mask));
|
||||
vector = irq_to_vector(irq);
|
||||
|
||||
msg.address_hi = 0;
|
||||
|
|
|
@ -395,9 +395,13 @@ smp_callin (void)
|
|||
fix_b0_for_bsp();
|
||||
|
||||
lock_ipi_calllock();
|
||||
spin_lock(&vector_lock);
|
||||
/* Setup the per cpu irq handling data structures */
|
||||
__setup_vector_irq(cpuid);
|
||||
cpu_set(cpuid, cpu_online_map);
|
||||
unlock_ipi_calllock();
|
||||
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
|
||||
spin_unlock(&vector_lock);
|
||||
|
||||
smp_setup_percpu_timer();
|
||||
|
||||
|
|
|
@ -90,13 +90,27 @@ enum {
|
|||
extern __u8 isa_irq_to_vector_map[16];
|
||||
#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
|
||||
|
||||
struct irq_cfg {
|
||||
ia64_vector vector;
|
||||
cpumask_t domain;
|
||||
};
|
||||
extern spinlock_t vector_lock;
|
||||
extern struct irq_cfg irq_cfg[NR_IRQS];
|
||||
#define irq_to_domain(x) irq_cfg[(x)].domain
|
||||
DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
|
||||
|
||||
extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
|
||||
|
||||
extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
|
||||
extern int assign_irq_vector (int irq); /* allocate a free vector */
|
||||
extern void free_irq_vector (int vector);
|
||||
extern int reserve_irq_vector (int vector);
|
||||
extern void __setup_vector_irq(int cpu);
|
||||
extern int reassign_irq_vector(int irq, int cpu);
|
||||
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
|
||||
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
|
||||
extern int check_irq_used (int irq);
|
||||
extern void destroy_and_reserve_irq (unsigned int irq);
|
||||
|
||||
static inline void ia64_resend_irq(unsigned int vector)
|
||||
{
|
||||
|
@ -113,7 +127,7 @@ extern irq_desc_t irq_desc[NR_IRQS];
|
|||
static inline unsigned int
|
||||
__ia64_local_vector_to_irq (ia64_vector vec)
|
||||
{
|
||||
return (unsigned int) vec;
|
||||
return __get_cpu_var(vector_irq)[vec];
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -131,7 +145,7 @@ __ia64_local_vector_to_irq (ia64_vector vec)
|
|||
static inline ia64_vector
|
||||
irq_to_vector (int irq)
|
||||
{
|
||||
return (ia64_vector) irq;
|
||||
return irq_cfg[irq].vector;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -47,19 +47,21 @@
|
|||
#define IOSAPIC_MASK_SHIFT 16
|
||||
#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
|
||||
|
||||
#define IOSAPIC_VECTOR_MASK 0xffffff00
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_IOSAPIC
|
||||
|
||||
#define NR_IOSAPICS 256
|
||||
|
||||
static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
|
||||
static inline unsigned int __iosapic_read(char __iomem *iosapic, unsigned int reg)
|
||||
{
|
||||
writel(reg, iosapic + IOSAPIC_REG_SELECT);
|
||||
return readl(iosapic + IOSAPIC_WINDOW);
|
||||
}
|
||||
|
||||
static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
|
||||
static inline void __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
|
||||
{
|
||||
writel(reg, iosapic + IOSAPIC_REG_SELECT);
|
||||
writel(val, iosapic + IOSAPIC_WINDOW);
|
||||
|
|
|
@ -14,8 +14,13 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#define NR_IRQS 256
|
||||
#define NR_IRQ_VECTORS NR_IRQS
|
||||
#define NR_VECTORS 256
|
||||
|
||||
#if (NR_VECTORS + 32 * NR_CPUS) < 1024
|
||||
#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
|
||||
#else
|
||||
#define NR_IRQS 1024
|
||||
#endif
|
||||
|
||||
static __inline__ int
|
||||
irq_canonicalize (int irq)
|
||||
|
|
Loading…
Reference in New Issue