Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 apic updates from Thomas Gleixner: "This udpate contains: - rework the irq vector array to store a pointer to the irq descriptor instead of the irq number to avoid a lookup of the irq descriptor in the irq entry path - lguest interrupt handling cleanups - conversion of the local apic timer to the new clockevent callbacks - preparatory changes for the irq argument removal of interrupt flow handlers" * 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/irq: Do not dereference irq descriptor before checking it tools/lguest: Clean up include dir tools/lguest: Fix redefinition of struct virtio_pci_cfg_cap x86/irq: Store irq descriptor in vector array genirq: Provide irq_desc_has_action x86/irq: Get rid of an indentation level x86/irq: Rename VECTOR_UNDEFINED to VECTOR_UNUSED x86/irq: Replace numeric constant x86/irq: Protect smp_cleanup_move x86/lguest: Do not setup unused irq vectors x86/lguest: Clean up lguest_setup_irq x86/apic: Drop local_irq_save/restore in timer callbacks x86/apic: Migrate apic timer to new set_state interface x86/irq: Use access helper irq_data_get_affinity_mask() x86/irq: Use accessor irq_data_get_irq_handler_data() x86/irq: Use accessor irq_data_get_node()
This commit is contained in:
commit
43af9872f5
|
@ -182,10 +182,10 @@ extern char irq_entries_start[];
|
|||
#define trace_irq_entries_start irq_entries_start
|
||||
#endif
|
||||
|
||||
#define VECTOR_UNDEFINED (-1)
|
||||
#define VECTOR_RETRIGGERED (-2)
|
||||
#define VECTOR_UNUSED NULL
|
||||
#define VECTOR_RETRIGGERED ((void *)~0UL)
|
||||
|
||||
typedef int vector_irq_t[NR_VECTORS];
|
||||
typedef struct irq_desc* vector_irq_t[NR_VECTORS];
|
||||
DECLARE_PER_CPU(vector_irq_t, vector_irq);
|
||||
|
||||
#endif /* !ASSEMBLY_ */
|
||||
|
|
|
@ -36,7 +36,9 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
|
|||
|
||||
extern void (*x86_platform_ipi_callback)(void);
|
||||
extern void native_init_IRQ(void);
|
||||
extern bool handle_irq(unsigned irq, struct pt_regs *regs);
|
||||
|
||||
struct irq_desc;
|
||||
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
|
||||
|
||||
extern __visible unsigned int do_IRQ(struct pt_regs *regs);
|
||||
|
||||
|
|
|
@ -462,40 +462,40 @@ static int lapic_next_deadline(unsigned long delta,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the lapic timer in periodic or oneshot mode
|
||||
*/
|
||||
static void lapic_timer_setup(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
static int lapic_timer_shutdown(struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int v;
|
||||
|
||||
/* Lapic used as dummy for broadcast ? */
|
||||
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
v = apic_read(APIC_LVTT);
|
||||
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
|
||||
apic_write(APIC_LVTT, v);
|
||||
apic_write(APIC_TMICT, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
__setup_APIC_LVTT(lapic_timer_frequency,
|
||||
mode != CLOCK_EVT_MODE_PERIODIC, 1);
|
||||
break;
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
v = apic_read(APIC_LVTT);
|
||||
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
|
||||
apic_write(APIC_LVTT, v);
|
||||
apic_write(APIC_TMICT, 0);
|
||||
break;
|
||||
case CLOCK_EVT_MODE_RESUME:
|
||||
/* Nothing to do here */
|
||||
break;
|
||||
}
|
||||
static inline int
|
||||
lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
|
||||
{
|
||||
/* Lapic used as dummy for broadcast ? */
|
||||
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
|
||||
return 0;
|
||||
|
||||
local_irq_restore(flags);
|
||||
__setup_APIC_LVTT(lapic_timer_frequency, oneshot, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lapic_timer_set_periodic(struct clock_event_device *evt)
|
||||
{
|
||||
return lapic_timer_set_periodic_oneshot(evt, false);
|
||||
}
|
||||
|
||||
static int lapic_timer_set_oneshot(struct clock_event_device *evt)
|
||||
{
|
||||
return lapic_timer_set_periodic_oneshot(evt, true);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -513,15 +513,18 @@ static void lapic_timer_broadcast(const struct cpumask *mask)
|
|||
* The local apic timer can be used for any function which is CPU local.
|
||||
*/
|
||||
static struct clock_event_device lapic_clockevent = {
|
||||
.name = "lapic",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
|
||||
| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
|
||||
.shift = 32,
|
||||
.set_mode = lapic_timer_setup,
|
||||
.set_next_event = lapic_next_event,
|
||||
.broadcast = lapic_timer_broadcast,
|
||||
.rating = 100,
|
||||
.irq = -1,
|
||||
.name = "lapic",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC |
|
||||
CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
|
||||
| CLOCK_EVT_FEAT_DUMMY,
|
||||
.shift = 32,
|
||||
.set_state_shutdown = lapic_timer_shutdown,
|
||||
.set_state_periodic = lapic_timer_set_periodic,
|
||||
.set_state_oneshot = lapic_timer_set_oneshot,
|
||||
.set_next_event = lapic_next_event,
|
||||
.broadcast = lapic_timer_broadcast,
|
||||
.rating = 100,
|
||||
.irq = -1,
|
||||
};
|
||||
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||
|
||||
|
@ -778,7 +781,7 @@ static int __init calibrate_APIC_clock(void)
|
|||
* Setup the apic timer manually
|
||||
*/
|
||||
levt->event_handler = lapic_cal_handler;
|
||||
lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
|
||||
lapic_timer_set_periodic(levt);
|
||||
lapic_cal_loops = -1;
|
||||
|
||||
/* Let the interrupts run */
|
||||
|
@ -788,7 +791,8 @@ static int __init calibrate_APIC_clock(void)
|
|||
cpu_relax();
|
||||
|
||||
/* Stop the lapic timer */
|
||||
lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
|
||||
local_irq_disable();
|
||||
lapic_timer_shutdown(levt);
|
||||
|
||||
/* Jiffies delta */
|
||||
deltaj = lapic_cal_j2 - lapic_cal_j1;
|
||||
|
@ -799,8 +803,8 @@ static int __init calibrate_APIC_clock(void)
|
|||
apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
|
||||
else
|
||||
levt->features |= CLOCK_EVT_FEAT_DUMMY;
|
||||
} else
|
||||
local_irq_enable();
|
||||
}
|
||||
local_irq_enable();
|
||||
|
||||
if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
|
||||
pr_warning("APIC timer disabled due to verification failure\n");
|
||||
|
@ -878,7 +882,7 @@ static void local_apic_timer_interrupt(void)
|
|||
if (!evt->event_handler) {
|
||||
pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
|
||||
/* Switch it off */
|
||||
lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
|
||||
lapic_timer_shutdown(evt);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -2541,7 +2541,7 @@ void __init setup_ioapic_dest(void)
|
|||
* Honour affinities which have been set in early boot
|
||||
*/
|
||||
if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
|
||||
mask = idata->affinity;
|
||||
mask = irq_data_get_affinity_mask(idata);
|
||||
else
|
||||
mask = apic->target_cpus();
|
||||
|
||||
|
|
|
@ -264,7 +264,7 @@ static inline int hpet_dev_id(struct irq_domain *domain)
|
|||
|
||||
static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
hpet_msi_write(data->handler_data, msg);
|
||||
hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
|
||||
}
|
||||
|
||||
static struct irq_chip hpet_msi_controller = {
|
||||
|
|
|
@ -169,8 +169,7 @@ next:
|
|||
goto next;
|
||||
|
||||
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
|
||||
if (per_cpu(vector_irq, new_cpu)[vector] >
|
||||
VECTOR_UNDEFINED)
|
||||
if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
|
||||
goto next;
|
||||
}
|
||||
/* Found one! */
|
||||
|
@ -182,7 +181,7 @@ next:
|
|||
cpumask_intersects(d->old_domain, cpu_online_mask);
|
||||
}
|
||||
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
|
||||
per_cpu(vector_irq, new_cpu)[vector] = irq;
|
||||
per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
|
||||
d->cfg.vector = vector;
|
||||
cpumask_copy(d->domain, vector_cpumask);
|
||||
err = 0;
|
||||
|
@ -224,15 +223,16 @@ static int assign_irq_vector_policy(int irq, int node,
|
|||
|
||||
static void clear_irq_vector(int irq, struct apic_chip_data *data)
|
||||
{
|
||||
int cpu, vector;
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
int cpu, vector;
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
BUG_ON(!data->cfg.vector);
|
||||
|
||||
vector = data->cfg.vector;
|
||||
for_each_cpu_and(cpu, data->domain, cpu_online_mask)
|
||||
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
|
||||
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
|
||||
|
||||
data->cfg.vector = 0;
|
||||
cpumask_clear(data->domain);
|
||||
|
@ -242,12 +242,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
|
|||
return;
|
||||
}
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
|
||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
|
||||
vector++) {
|
||||
if (per_cpu(vector_irq, cpu)[vector] != irq)
|
||||
if (per_cpu(vector_irq, cpu)[vector] != desc)
|
||||
continue;
|
||||
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
|
||||
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -296,7 +297,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
|||
struct irq_alloc_info *info = arg;
|
||||
struct apic_chip_data *data;
|
||||
struct irq_data *irq_data;
|
||||
int i, err;
|
||||
int i, err, node;
|
||||
|
||||
if (disable_apic)
|
||||
return -ENXIO;
|
||||
|
@ -308,12 +309,13 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
|||
for (i = 0; i < nr_irqs; i++) {
|
||||
irq_data = irq_domain_get_irq_data(domain, virq + i);
|
||||
BUG_ON(!irq_data);
|
||||
node = irq_data_get_node(irq_data);
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
|
||||
data = legacy_irq_data[virq + i];
|
||||
else
|
||||
#endif
|
||||
data = alloc_apic_chip_data(irq_data->node);
|
||||
data = alloc_apic_chip_data(node);
|
||||
if (!data) {
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
|
@ -322,8 +324,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
|||
irq_data->chip = &lapic_controller;
|
||||
irq_data->chip_data = data;
|
||||
irq_data->hwirq = virq + i;
|
||||
err = assign_irq_vector_policy(virq + i, irq_data->node, data,
|
||||
info);
|
||||
err = assign_irq_vector_policy(virq + i, node, data, info);
|
||||
if (err)
|
||||
goto error;
|
||||
}
|
||||
|
@ -403,32 +404,32 @@ int __init arch_early_irq_init(void)
|
|||
return arch_early_ioapic_init();
|
||||
}
|
||||
|
||||
/* Initialize vector_irq on a new cpu */
|
||||
static void __setup_vector_irq(int cpu)
|
||||
{
|
||||
/* Initialize vector_irq on a new cpu */
|
||||
int irq, vector;
|
||||
struct apic_chip_data *data;
|
||||
struct irq_desc *desc;
|
||||
int irq, vector;
|
||||
|
||||
/* Mark the inuse vectors */
|
||||
for_each_active_irq(irq) {
|
||||
data = apic_chip_data(irq_get_irq_data(irq));
|
||||
if (!data)
|
||||
continue;
|
||||
for_each_irq_desc(irq, desc) {
|
||||
struct irq_data *idata = irq_desc_get_irq_data(desc);
|
||||
|
||||
if (!cpumask_test_cpu(cpu, data->domain))
|
||||
data = apic_chip_data(idata);
|
||||
if (!data || !cpumask_test_cpu(cpu, data->domain))
|
||||
continue;
|
||||
vector = data->cfg.vector;
|
||||
per_cpu(vector_irq, cpu)[vector] = irq;
|
||||
per_cpu(vector_irq, cpu)[vector] = desc;
|
||||
}
|
||||
/* Mark the free vectors */
|
||||
for (vector = 0; vector < NR_VECTORS; ++vector) {
|
||||
irq = per_cpu(vector_irq, cpu)[vector];
|
||||
if (irq <= VECTOR_UNDEFINED)
|
||||
desc = per_cpu(vector_irq, cpu)[vector];
|
||||
if (IS_ERR_OR_NULL(desc))
|
||||
continue;
|
||||
|
||||
data = apic_chip_data(irq_get_irq_data(irq));
|
||||
data = apic_chip_data(irq_desc_get_irq_data(desc));
|
||||
if (!cpumask_test_cpu(cpu, data->domain))
|
||||
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
|
||||
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -448,7 +449,7 @@ void setup_vector_irq(int cpu)
|
|||
* legacy vector to irq mapping:
|
||||
*/
|
||||
for (irq = 0; irq < nr_legacy_irqs(); irq++)
|
||||
per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq;
|
||||
per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
|
||||
|
||||
__setup_vector_irq(cpu);
|
||||
}
|
||||
|
@ -490,7 +491,8 @@ static int apic_set_affinity(struct irq_data *irq_data,
|
|||
if (err) {
|
||||
struct irq_data *top = irq_get_irq_data(irq);
|
||||
|
||||
if (assign_irq_vector(irq, data, top->affinity))
|
||||
if (assign_irq_vector(irq, data,
|
||||
irq_data_get_affinity_mask(top)))
|
||||
pr_err("Failed to recover vector for irq %d\n", irq);
|
||||
return err;
|
||||
}
|
||||
|
@ -538,27 +540,30 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
|
|||
|
||||
entering_ack_irq();
|
||||
|
||||
/* Prevent vectors vanishing under us */
|
||||
raw_spin_lock(&vector_lock);
|
||||
|
||||
me = smp_processor_id();
|
||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
|
||||
int irq;
|
||||
unsigned int irr;
|
||||
struct irq_desc *desc;
|
||||
struct apic_chip_data *data;
|
||||
struct irq_desc *desc;
|
||||
unsigned int irr;
|
||||
|
||||
irq = __this_cpu_read(vector_irq[vector]);
|
||||
|
||||
if (irq <= VECTOR_UNDEFINED)
|
||||
retry:
|
||||
desc = __this_cpu_read(vector_irq[vector]);
|
||||
if (IS_ERR_OR_NULL(desc))
|
||||
continue;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc)
|
||||
continue;
|
||||
if (!raw_spin_trylock(&desc->lock)) {
|
||||
raw_spin_unlock(&vector_lock);
|
||||
cpu_relax();
|
||||
raw_spin_lock(&vector_lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
data = apic_chip_data(&desc->irq_data);
|
||||
data = apic_chip_data(irq_desc_get_irq_data(desc));
|
||||
if (!data)
|
||||
continue;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* Check if the irq migration is in progress. If so, we
|
||||
|
@ -583,11 +588,13 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
|
|||
apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
|
||||
goto unlock;
|
||||
}
|
||||
__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
|
||||
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
|
||||
unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
|
||||
raw_spin_unlock(&vector_lock);
|
||||
|
||||
exiting_irq();
|
||||
}
|
||||
|
||||
|
|
|
@ -426,7 +426,7 @@ static struct irq_domain *hpet_domain;
|
|||
|
||||
void hpet_msi_unmask(struct irq_data *data)
|
||||
{
|
||||
struct hpet_dev *hdev = data->handler_data;
|
||||
struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
|
||||
unsigned int cfg;
|
||||
|
||||
/* unmask it */
|
||||
|
@ -437,7 +437,7 @@ void hpet_msi_unmask(struct irq_data *data)
|
|||
|
||||
void hpet_msi_mask(struct irq_data *data)
|
||||
{
|
||||
struct hpet_dev *hdev = data->handler_data;
|
||||
struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
|
||||
unsigned int cfg;
|
||||
|
||||
/* mask it */
|
||||
|
|
|
@ -214,10 +214,9 @@ u64 arch_irq_stat(void)
|
|||
__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
struct irq_desc * desc;
|
||||
/* high bit used in ret_from_ code */
|
||||
unsigned vector = ~regs->orig_ax;
|
||||
unsigned irq;
|
||||
|
||||
/*
|
||||
* NB: Unlike exception entries, IRQ entries do not reliably
|
||||
|
@ -236,17 +235,17 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
|||
/* entering_irq() tells RCU that we're not quiescent. Check it. */
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
|
||||
|
||||
irq = __this_cpu_read(vector_irq[vector]);
|
||||
desc = __this_cpu_read(vector_irq[vector]);
|
||||
|
||||
if (!handle_irq(irq, regs)) {
|
||||
if (!handle_irq(desc, regs)) {
|
||||
ack_APIC_irq();
|
||||
|
||||
if (irq != VECTOR_RETRIGGERED) {
|
||||
pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
|
||||
if (desc != VECTOR_RETRIGGERED) {
|
||||
pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
|
||||
__func__, smp_processor_id(),
|
||||
vector, irq);
|
||||
vector);
|
||||
} else {
|
||||
__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
|
||||
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,10 +347,10 @@ static struct cpumask affinity_new, online_new;
|
|||
*/
|
||||
int check_irq_vectors_for_cpu_disable(void)
|
||||
{
|
||||
int irq, cpu;
|
||||
unsigned int this_cpu, vector, this_count, count;
|
||||
struct irq_desc *desc;
|
||||
struct irq_data *data;
|
||||
int cpu;
|
||||
|
||||
this_cpu = smp_processor_id();
|
||||
cpumask_copy(&online_new, cpu_online_mask);
|
||||
|
@ -359,47 +358,43 @@ int check_irq_vectors_for_cpu_disable(void)
|
|||
|
||||
this_count = 0;
|
||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
|
||||
irq = __this_cpu_read(vector_irq[vector]);
|
||||
if (irq >= 0) {
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Protect against concurrent action removal,
|
||||
* affinity changes etc.
|
||||
*/
|
||||
raw_spin_lock(&desc->lock);
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
cpumask_copy(&affinity_new, data->affinity);
|
||||
cpumask_clear_cpu(this_cpu, &affinity_new);
|
||||
|
||||
/* Do not count inactive or per-cpu irqs. */
|
||||
if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
|
||||
raw_spin_unlock(&desc->lock);
|
||||
continue;
|
||||
}
|
||||
desc = __this_cpu_read(vector_irq[vector]);
|
||||
if (IS_ERR_OR_NULL(desc))
|
||||
continue;
|
||||
/*
|
||||
* Protect against concurrent action removal, affinity
|
||||
* changes etc.
|
||||
*/
|
||||
raw_spin_lock(&desc->lock);
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
cpumask_copy(&affinity_new,
|
||||
irq_data_get_affinity_mask(data));
|
||||
cpumask_clear_cpu(this_cpu, &affinity_new);
|
||||
|
||||
/* Do not count inactive or per-cpu irqs. */
|
||||
if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
|
||||
raw_spin_unlock(&desc->lock);
|
||||
/*
|
||||
* A single irq may be mapped to multiple
|
||||
* cpu's vector_irq[] (for example IOAPIC cluster
|
||||
* mode). In this case we have two
|
||||
* possibilities:
|
||||
*
|
||||
* 1) the resulting affinity mask is empty; that is
|
||||
* this the down'd cpu is the last cpu in the irq's
|
||||
* affinity mask, or
|
||||
*
|
||||
* 2) the resulting affinity mask is no longer
|
||||
* a subset of the online cpus but the affinity
|
||||
* mask is not zero; that is the down'd cpu is the
|
||||
* last online cpu in a user set affinity mask.
|
||||
*/
|
||||
if (cpumask_empty(&affinity_new) ||
|
||||
!cpumask_subset(&affinity_new, &online_new))
|
||||
this_count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
raw_spin_unlock(&desc->lock);
|
||||
/*
|
||||
* A single irq may be mapped to multiple cpu's
|
||||
* vector_irq[] (for example IOAPIC cluster mode). In
|
||||
* this case we have two possibilities:
|
||||
*
|
||||
* 1) the resulting affinity mask is empty; that is
|
||||
* this the down'd cpu is the last cpu in the irq's
|
||||
* affinity mask, or
|
||||
*
|
||||
* 2) the resulting affinity mask is no longer a
|
||||
* subset of the online cpus but the affinity mask is
|
||||
* not zero; that is the down'd cpu is the last online
|
||||
* cpu in a user set affinity mask.
|
||||
*/
|
||||
if (cpumask_empty(&affinity_new) ||
|
||||
!cpumask_subset(&affinity_new, &online_new))
|
||||
this_count++;
|
||||
}
|
||||
|
||||
count = 0;
|
||||
|
@ -418,8 +413,8 @@ int check_irq_vectors_for_cpu_disable(void)
|
|||
for (vector = FIRST_EXTERNAL_VECTOR;
|
||||
vector < first_system_vector; vector++) {
|
||||
if (!test_bit(vector, used_vectors) &&
|
||||
per_cpu(vector_irq, cpu)[vector] < 0)
|
||||
count++;
|
||||
IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -455,7 +450,7 @@ void fixup_irqs(void)
|
|||
raw_spin_lock(&desc->lock);
|
||||
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
affinity = data->affinity;
|
||||
affinity = irq_data_get_affinity_mask(data);
|
||||
if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
|
||||
cpumask_subset(affinity, cpu_online_mask)) {
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
@ -523,14 +518,13 @@ void fixup_irqs(void)
|
|||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
|
||||
unsigned int irr;
|
||||
|
||||
if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED)
|
||||
if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
|
||||
continue;
|
||||
|
||||
irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
|
||||
if (irr & (1 << (vector % 32))) {
|
||||
irq = __this_cpu_read(vector_irq[vector]);
|
||||
desc = __this_cpu_read(vector_irq[vector]);
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
raw_spin_lock(&desc->lock);
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
|
@ -541,7 +535,7 @@ void fixup_irqs(void)
|
|||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
|
||||
__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
|
||||
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -148,21 +148,21 @@ void do_softirq_own_stack(void)
|
|||
call_on_stack(__do_softirq, isp);
|
||||
}
|
||||
|
||||
bool handle_irq(unsigned irq, struct pt_regs *regs)
|
||||
bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned int irq;
|
||||
int overflow;
|
||||
|
||||
overflow = check_stack_overflow();
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (unlikely(!desc))
|
||||
if (IS_ERR_OR_NULL(desc))
|
||||
return false;
|
||||
|
||||
irq = irq_desc_get_irq(desc);
|
||||
if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
|
||||
if (unlikely(overflow))
|
||||
print_stack_overflow();
|
||||
desc->handle_irq(irq, desc);
|
||||
generic_handle_irq_desc(irq, desc);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -68,16 +68,13 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
|||
#endif
|
||||
}
|
||||
|
||||
bool handle_irq(unsigned irq, struct pt_regs *regs)
|
||||
bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
stack_overflow_check(regs);
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (unlikely(!desc))
|
||||
if (unlikely(IS_ERR_OR_NULL(desc)))
|
||||
return false;
|
||||
|
||||
generic_handle_irq_desc(irq, desc);
|
||||
generic_handle_irq_desc(irq_desc_get_irq(desc), desc);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ static struct irqaction irq2 = {
|
|||
};
|
||||
|
||||
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
|
||||
[0 ... NR_VECTORS - 1] = VECTOR_UNDEFINED,
|
||||
[0 ... NR_VECTORS - 1] = VECTOR_UNUSED,
|
||||
};
|
||||
|
||||
int vector_used_by_percpu_irq(unsigned int vector)
|
||||
|
@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector)
|
|||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED)
|
||||
if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ void __init init_IRQ(void)
|
|||
* irq's migrate etc.
|
||||
*/
|
||||
for (i = 0; i < nr_legacy_irqs(); i++)
|
||||
per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = i;
|
||||
per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
|
||||
|
||||
x86_init.irqs.intr_init();
|
||||
}
|
||||
|
|
|
@ -835,16 +835,46 @@ static struct irq_chip lguest_irq_controller = {
|
|||
.irq_unmask = enable_lguest_irq,
|
||||
};
|
||||
|
||||
/*
|
||||
* Interrupt descriptors are allocated as-needed, but low-numbered ones are
|
||||
* reserved by the generic x86 code. So we ignore irq_alloc_desc_at if it
|
||||
* tells us the irq is already used: other errors (ie. ENOMEM) we take
|
||||
* seriously.
|
||||
*/
|
||||
static int lguest_setup_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int err;
|
||||
|
||||
/* Returns -ve error or vector number. */
|
||||
err = irq_alloc_desc_at(irq, 0);
|
||||
if (err < 0 && err != -EEXIST)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Tell the Linux infrastructure that the interrupt is
|
||||
* controlled by our level-based lguest interrupt controller.
|
||||
*/
|
||||
irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
|
||||
handle_level_irq, "level");
|
||||
|
||||
/* Some systems map "vectors" to interrupts weirdly. Not us! */
|
||||
desc = irq_to_desc(irq);
|
||||
__this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lguest_enable_irq(struct pci_dev *dev)
|
||||
{
|
||||
int err;
|
||||
u8 line = 0;
|
||||
|
||||
/* We literally use the PCI interrupt line as the irq number. */
|
||||
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
|
||||
irq_set_chip_and_handler_name(line, &lguest_irq_controller,
|
||||
handle_level_irq, "level");
|
||||
dev->irq = line;
|
||||
return 0;
|
||||
err = lguest_setup_irq(line);
|
||||
if (!err)
|
||||
dev->irq = line;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* We don't do hotplug PCI, so this shouldn't be called. */
|
||||
|
@ -855,17 +885,13 @@ static void lguest_disable_irq(struct pci_dev *dev)
|
|||
|
||||
/*
|
||||
* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
|
||||
* interrupt (except 128, which is used for system calls), and then tells the
|
||||
* Linux infrastructure that each interrupt is controlled by our level-based
|
||||
* lguest interrupt controller.
|
||||
* interrupt (except 128, which is used for system calls).
|
||||
*/
|
||||
static void __init lguest_init_IRQ(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
|
||||
/* Some systems map "vectors" to interrupts weirdly. Not us! */
|
||||
__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
|
||||
if (i != IA32_SYSCALL_VECTOR)
|
||||
set_intr_gate(i, irq_entries_start +
|
||||
8 * (i - FIRST_EXTERNAL_VECTOR));
|
||||
|
@ -878,26 +904,6 @@ static void __init lguest_init_IRQ(void)
|
|||
irq_ctx_init(smp_processor_id());
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt descriptors are allocated as-needed, but low-numbered ones are
|
||||
* reserved by the generic x86 code. So we ignore irq_alloc_desc_at if it
|
||||
* tells us the irq is already used: other errors (ie. ENOMEM) we take
|
||||
* seriously.
|
||||
*/
|
||||
int lguest_setup_irq(unsigned int irq)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Returns -ve error or vector number. */
|
||||
err = irq_alloc_desc_at(irq, 0);
|
||||
if (err < 0 && err != -EEXIST)
|
||||
return err;
|
||||
|
||||
irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
|
||||
handle_level_irq, "level");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Time.
|
||||
*
|
||||
|
@ -1028,7 +1034,8 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
|
|||
static void lguest_time_init(void)
|
||||
{
|
||||
/* Set up the timer interrupt (0) to go to our simple timer routine */
|
||||
lguest_setup_irq(0);
|
||||
if (lguest_setup_irq(0) != 0)
|
||||
panic("Could not set up timer irq");
|
||||
irq_set_handler(0, lguest_time_irq);
|
||||
|
||||
clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
|
||||
|
|
|
@ -89,7 +89,7 @@ static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
return -EINVAL;
|
||||
|
||||
chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
|
||||
irq_data->node);
|
||||
irq_data_get_node(irq_data));
|
||||
if (!chip_data)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -336,7 +336,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
|
|||
|
||||
BUG_ON(irq == -1);
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
|
||||
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
|
||||
#endif
|
||||
xen_evtchn_port_bind_to_cpu(info, cpu);
|
||||
|
||||
|
@ -373,7 +373,7 @@ static void xen_irq_init(unsigned irq)
|
|||
struct irq_info *info;
|
||||
#ifdef CONFIG_SMP
|
||||
/* By default all event channels notify CPU#0. */
|
||||
cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0));
|
||||
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
|
||||
#endif
|
||||
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
|
|
|
@ -166,10 +166,14 @@ static inline int handle_domain_irq(struct irq_domain *domain,
|
|||
#endif
|
||||
|
||||
/* Test to see if a driver has successfully requested an irq */
|
||||
static inline int irq_desc_has_action(struct irq_desc *desc)
|
||||
{
|
||||
return desc->action != NULL;
|
||||
}
|
||||
|
||||
static inline int irq_has_action(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc->action != NULL;
|
||||
return irq_desc_has_action(irq_to_desc(irq));
|
||||
}
|
||||
|
||||
/* caller has locked the irq_desc and both params are valid */
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
lguest
|
||||
include
|
||||
|
|
|
@ -11,3 +11,4 @@ lguest: include/linux/virtio_types.h
|
|||
|
||||
clean:
|
||||
rm -f lguest
|
||||
rm -rf include
|
||||
|
|
|
@ -125,7 +125,11 @@ struct device_list {
|
|||
/* The list of Guest devices, based on command line arguments. */
|
||||
static struct device_list devices;
|
||||
|
||||
struct virtio_pci_cfg_cap {
|
||||
/*
|
||||
* Just like struct virtio_pci_cfg_cap in uapi/linux/virtio_pci.h,
|
||||
* but uses a u32 explicitly for the data.
|
||||
*/
|
||||
struct virtio_pci_cfg_cap_u32 {
|
||||
struct virtio_pci_cap cap;
|
||||
u32 pci_cfg_data; /* Data for BAR access. */
|
||||
};
|
||||
|
@ -157,7 +161,7 @@ struct pci_config {
|
|||
struct virtio_pci_notify_cap notify;
|
||||
struct virtio_pci_cap isr;
|
||||
struct virtio_pci_cap device;
|
||||
struct virtio_pci_cfg_cap cfg_access;
|
||||
struct virtio_pci_cfg_cap_u32 cfg_access;
|
||||
};
|
||||
|
||||
/* The device structure describes a single device. */
|
||||
|
@ -1291,7 +1295,7 @@ static struct device *dev_and_reg(u32 *reg)
|
|||
* only fault if they try to write with some invalid bar/offset/length.
|
||||
*/
|
||||
static bool valid_bar_access(struct device *d,
|
||||
struct virtio_pci_cfg_cap *cfg_access)
|
||||
struct virtio_pci_cfg_cap_u32 *cfg_access)
|
||||
{
|
||||
/* We only have 1 bar (BAR0) */
|
||||
if (cfg_access->cap.bar != 0)
|
||||
|
|
Loading…
Reference in New Issue