2010-05-29 11:09:12 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation, version 2.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <hv/drv_pcie_rc_intf.h>
|
2010-06-26 04:41:11 +08:00
|
|
|
#include <arch/spr_def.h>
|
|
|
|
#include <asm/traps.h>
|
2014-01-28 10:03:50 +08:00
|
|
|
#include <linux/perf_event.h>
|
2010-06-26 04:41:11 +08:00
|
|
|
|
|
|
|
/* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
|
|
|
|
#define IS_HW_CLEARED 1
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
/*
|
2010-11-02 03:24:29 +08:00
|
|
|
* The set of interrupts we enable for arch_local_irq_enable().
|
2010-05-29 11:09:12 +08:00
|
|
|
* This is initialized to have just a single interrupt that the kernel
|
|
|
|
* doesn't actually use as a sentinel. During kernel init,
|
|
|
|
* interrupts are added as the kernel gets prepared to support them.
|
|
|
|
* NOTE: we could probably initialize them all statically up front.
|
|
|
|
*/
|
|
|
|
DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
|
|
|
|
INITIAL_INTERRUPTS_ENABLED;
|
|
|
|
EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
|
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
/* Define per-tile device interrupt statistics state. */
|
2010-05-29 11:09:12 +08:00
|
|
|
DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
|
|
|
|
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
/*
|
|
|
|
* Define per-tile irq disable mask; the hardware/HV only has a single
|
|
|
|
* mask that we use to implement both masking and disabling.
|
|
|
|
*/
|
|
|
|
static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
|
|
|
|
____cacheline_internodealigned_in_smp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per-tile IRQ nesting depth. Used to make sure we enable newly
|
|
|
|
* enabled IRQs before exiting the outermost interrupt.
|
|
|
|
*/
|
|
|
|
static DEFINE_PER_CPU(int, irq_depth);
|
|
|
|
|
|
|
|
#if CHIP_HAS_IPI()
|
|
|
|
/* Use SPRs to manipulate device interrupts. */
|
2010-10-15 04:23:03 +08:00
|
|
|
#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
|
|
|
|
#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
|
|
|
|
#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
|
2010-06-26 04:41:11 +08:00
|
|
|
#else
|
|
|
|
/* Use HV to manipulate device interrupts. */
|
|
|
|
#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
|
|
|
|
#define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
|
|
|
|
#define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
|
|
|
|
#endif
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
/*
|
2010-06-26 04:41:11 +08:00
|
|
|
* The interrupt handling path, implemented in terms of HV interrupt
|
2013-08-16 04:23:24 +08:00
|
|
|
* emulation on TILEPro, and IPI hardware on TILE-Gx.
|
2013-08-07 23:36:54 +08:00
|
|
|
* Entered with interrupts disabled.
|
2010-05-29 11:09:12 +08:00
|
|
|
*/
|
|
|
|
void tile_dev_intr(struct pt_regs *regs, int intnum)
|
|
|
|
{
|
tile: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:50 +08:00
|
|
|
int depth = __this_cpu_inc_return(irq_depth);
|
2010-06-26 04:41:11 +08:00
|
|
|
unsigned long original_irqs;
|
|
|
|
unsigned long remaining_irqs;
|
|
|
|
struct pt_regs *old_regs;
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
#if CHIP_HAS_IPI()
|
2010-05-29 11:09:12 +08:00
|
|
|
/*
|
2010-06-26 04:41:11 +08:00
|
|
|
* Pending interrupts are listed in an SPR. We might be
|
|
|
|
* nested, so be sure to only handle irqs that weren't already
|
|
|
|
* masked by a previous interrupt. Then, mask out the ones
|
|
|
|
* we're going to handle.
|
2010-05-29 11:09:12 +08:00
|
|
|
*/
|
2010-10-15 04:23:03 +08:00
|
|
|
unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
|
|
|
|
original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
|
|
|
|
__insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
|
2010-06-26 04:41:11 +08:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* Hypervisor performs the equivalent of the Gx code above and
|
|
|
|
* then puts the pending interrupt mask into a system save reg
|
|
|
|
* for us to find.
|
|
|
|
*/
|
2010-10-15 04:23:03 +08:00
|
|
|
original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
|
2010-06-26 04:41:11 +08:00
|
|
|
#endif
|
|
|
|
remaining_irqs = original_irqs;
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
/* Track time spent here in an interrupt context. */
|
2010-06-26 04:41:11 +08:00
|
|
|
old_regs = set_irq_regs(regs);
|
2010-05-29 11:09:12 +08:00
|
|
|
irq_enter();
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
|
|
|
/* Debugging check for stack overflow: less than 1/8th stack free? */
|
|
|
|
{
|
|
|
|
long sp = stack_pointer - (long) current_thread_info();
|
|
|
|
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
|
2014-11-01 01:50:46 +08:00
|
|
|
pr_emerg("%s: stack overflow: %ld\n",
|
|
|
|
__func__, sp - sizeof(struct thread_info));
|
2010-05-29 11:09:12 +08:00
|
|
|
dump_stack();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2010-06-26 04:41:11 +08:00
|
|
|
while (remaining_irqs) {
|
|
|
|
unsigned long irq = __ffs(remaining_irqs);
|
|
|
|
remaining_irqs &= ~(1UL << irq);
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
/* Count device irqs; Linux IPIs are counted elsewhere. */
|
|
|
|
if (irq != IRQ_RESCHEDULE)
|
tile: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:50 +08:00
|
|
|
__this_cpu_inc(irq_stat.irq_dev_intr_count);
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
generic_handle_irq(irq);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
/*
|
|
|
|
* If we weren't nested, turn on all enabled interrupts,
|
|
|
|
* including any that were reenabled during interrupt
|
|
|
|
* handling.
|
|
|
|
*/
|
tile: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:50 +08:00
|
|
|
if (depth == 1)
|
|
|
|
unmask_irqs(~__this_cpu_read(irq_disable_mask));
|
2010-06-26 04:41:11 +08:00
|
|
|
|
tile: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:50 +08:00
|
|
|
__this_cpu_dec(irq_depth);
|
2010-06-26 04:41:11 +08:00
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
/*
|
|
|
|
* Track time spent against the current process again and
|
|
|
|
* process any softirqs if they are waiting.
|
|
|
|
*/
|
|
|
|
irq_exit();
|
|
|
|
set_irq_regs(old_regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
/*
|
|
|
|
* Remove an irq from the disabled mask. If we're in an interrupt
|
|
|
|
* context, defer enabling the HW interrupt until we leave.
|
|
|
|
*/
|
2011-12-02 01:58:19 +08:00
|
|
|
static void tile_irq_chip_enable(struct irq_data *d)
|
2010-06-26 04:41:11 +08:00
|
|
|
{
|
2011-12-02 01:58:19 +08:00
|
|
|
get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
|
tile: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:50 +08:00
|
|
|
if (__this_cpu_read(irq_depth) == 0)
|
2011-12-02 01:58:19 +08:00
|
|
|
unmask_irqs(1UL << d->irq);
|
2010-06-26 04:41:11 +08:00
|
|
|
put_cpu_var(irq_disable_mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add an irq to the disabled mask. We disable the HW interrupt
|
|
|
|
* immediately so that there's no possibility of it firing. If we're
|
|
|
|
* in an interrupt context, the return path is careful to avoid
|
|
|
|
* unmasking a newly disabled interrupt.
|
|
|
|
*/
|
2011-12-02 01:58:19 +08:00
|
|
|
static void tile_irq_chip_disable(struct irq_data *d)
|
2010-06-26 04:41:11 +08:00
|
|
|
{
|
2011-12-02 01:58:19 +08:00
|
|
|
get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
|
|
|
|
mask_irqs(1UL << d->irq);
|
2010-06-26 04:41:11 +08:00
|
|
|
put_cpu_var(irq_disable_mask);
|
|
|
|
}
|
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
/* Mask an interrupt. */
|
2011-02-07 07:04:40 +08:00
|
|
|
static void tile_irq_chip_mask(struct irq_data *d)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
2011-02-07 07:04:40 +08:00
|
|
|
mask_irqs(1UL << d->irq);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Unmask an interrupt. */
|
2011-02-07 07:04:40 +08:00
|
|
|
static void tile_irq_chip_unmask(struct irq_data *d)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
2011-02-07 07:04:40 +08:00
|
|
|
unmask_irqs(1UL << d->irq);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-06-26 04:41:11 +08:00
|
|
|
* Clear an interrupt before processing it so that any new assertions
|
|
|
|
* will trigger another irq.
|
2010-05-29 11:09:12 +08:00
|
|
|
*/
|
2011-02-07 07:04:40 +08:00
|
|
|
static void tile_irq_chip_ack(struct irq_data *d)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
2011-02-07 07:04:40 +08:00
|
|
|
if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
|
|
|
|
clear_irqs(1UL << d->irq);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-06-26 04:41:11 +08:00
|
|
|
* For per-cpu interrupts, we need to avoid unmasking any interrupts
|
|
|
|
* that we disabled via disable_percpu_irq().
|
2010-05-29 11:09:12 +08:00
|
|
|
*/
|
2011-02-07 07:04:40 +08:00
|
|
|
static void tile_irq_chip_eoi(struct irq_data *d)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
tile: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:50 +08:00
|
|
|
if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
|
2011-02-07 07:04:40 +08:00
|
|
|
unmask_irqs(1UL << d->irq);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
static struct irq_chip tile_irq_chip = {
|
2010-09-24 00:40:07 +08:00
|
|
|
.name = "tile_irq_chip",
|
2011-12-02 01:58:19 +08:00
|
|
|
.irq_enable = tile_irq_chip_enable,
|
|
|
|
.irq_disable = tile_irq_chip_disable,
|
2011-02-07 07:04:40 +08:00
|
|
|
.irq_ack = tile_irq_chip_ack,
|
|
|
|
.irq_eoi = tile_irq_chip_eoi,
|
|
|
|
.irq_mask = tile_irq_chip_mask,
|
|
|
|
.irq_unmask = tile_irq_chip_unmask,
|
2010-05-29 11:09:12 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void __init init_IRQ(void)
|
|
|
|
{
|
2010-06-26 04:41:11 +08:00
|
|
|
ipi_init();
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
2013-06-19 05:28:07 +08:00
|
|
|
void setup_irq_regs(void)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
2010-06-26 04:41:11 +08:00
|
|
|
/* Enable interrupt delivery. */
|
|
|
|
unmask_irqs(~0UL);
|
|
|
|
#if CHIP_HAS_IPI()
|
2010-11-02 03:24:29 +08:00
|
|
|
arch_local_irq_unmask(INT_IPI_K);
|
2010-06-26 04:41:11 +08:00
|
|
|
#endif
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
void tile_irq_activate(unsigned int irq, int tile_irq_type)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
|
|
|
/*
|
2010-06-26 04:41:11 +08:00
|
|
|
* We use handle_level_irq() by default because the pending
|
2013-08-16 04:23:24 +08:00
|
|
|
* interrupt vector (whether modeled by the HV on
|
2010-06-26 04:41:11 +08:00
|
|
|
* TILEPro or implemented in hardware on TILE-Gx) has
|
|
|
|
* level-style semantics for each bit. An interrupt fires
|
|
|
|
* whenever a bit is high, not just at edges.
|
|
|
|
*/
|
|
|
|
irq_flow_handler_t handle = handle_level_irq;
|
|
|
|
if (tile_irq_type == TILE_IRQ_PERCPU)
|
|
|
|
handle = handle_percpu_irq;
|
2011-03-25 22:21:16 +08:00
|
|
|
irq_set_chip_and_handler(irq, &tile_irq_chip, handle);
|
2010-06-26 04:41:11 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Flag interrupts that are hardware-cleared so that ack()
|
|
|
|
* won't clear them.
|
2010-05-29 11:09:12 +08:00
|
|
|
*/
|
2010-06-26 04:41:11 +08:00
|
|
|
if (tile_irq_type == TILE_IRQ_HW_CLEAR)
|
2011-03-25 22:21:16 +08:00
|
|
|
irq_set_chip_data(irq, (void *)IS_HW_CLEARED);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
2010-06-26 04:41:11 +08:00
|
|
|
EXPORT_SYMBOL(tile_irq_activate);
|
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
void ack_bad_irq(unsigned int irq)
|
|
|
|
{
|
2010-06-26 04:41:11 +08:00
|
|
|
pr_err("unexpected IRQ trap at vector %02x\n", irq);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
2014-01-28 10:03:50 +08:00
|
|
|
/*
|
|
|
|
* /proc/interrupts printing:
|
|
|
|
*/
|
|
|
|
int arch_show_interrupts(struct seq_file *p, int prec)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
int i;
|
|
|
|
|
|
|
|
seq_printf(p, "%*s: ", prec, "PMI");
|
|
|
|
|
|
|
|
for_each_online_cpu(i)
|
|
|
|
seq_printf(p, "%10llu ", per_cpu(perf_irqs, i));
|
|
|
|
seq_puts(p, " perf_events\n");
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-06-26 04:41:11 +08:00
|
|
|
#if CHIP_HAS_IPI()
|
2014-05-07 23:44:16 +08:00
|
|
|
int arch_setup_hwirq(unsigned int irq, int node)
|
2014-05-07 23:44:13 +08:00
|
|
|
{
|
2014-05-07 23:44:16 +08:00
|
|
|
return irq >= NR_IRQS ? -EINVAL : 0;
|
2014-05-07 23:44:13 +08:00
|
|
|
}
|
|
|
|
|
2014-05-07 23:44:16 +08:00
|
|
|
void arch_teardown_hwirq(unsigned int irq) { }
|
2010-06-26 04:41:11 +08:00
|
|
|
#endif
|