Merge branch 'irqdomain/next' into gpio/next

This commit is contained in:
Grant Likely 2012-02-28 13:48:58 -06:00
commit b3950d50cf
662 changed files with 5737 additions and 46630 deletions

View File

@ -102,9 +102,12 @@ X!Iinclude/linux/kobject.h
!Iinclude/linux/device.h !Iinclude/linux/device.h
</sect1> </sect1>
<sect1><title>Device Drivers Base</title> <sect1><title>Device Drivers Base</title>
!Idrivers/base/init.c
!Edrivers/base/driver.c !Edrivers/base/driver.c
!Edrivers/base/core.c !Edrivers/base/core.c
!Edrivers/base/syscore.c
!Edrivers/base/class.c !Edrivers/base/class.c
!Idrivers/base/node.c
!Edrivers/base/firmware_class.c !Edrivers/base/firmware_class.c
!Edrivers/base/transport_class.c !Edrivers/base/transport_class.c
<!-- Cannot be included, because <!-- Cannot be included, because
@ -113,13 +116,18 @@ X!Iinclude/linux/kobject.h
exceed allowed 44 characters maximum exceed allowed 44 characters maximum
X!Edrivers/base/attribute_container.c X!Edrivers/base/attribute_container.c
--> -->
!Edrivers/base/sys.c !Edrivers/base/dd.c
<!-- <!--
X!Edrivers/base/interface.c X!Edrivers/base/interface.c
--> -->
!Iinclude/linux/platform_device.h !Iinclude/linux/platform_device.h
!Edrivers/base/platform.c !Edrivers/base/platform.c
!Edrivers/base/bus.c !Edrivers/base/bus.c
</sect1>
<sect1><title>Device Drivers DMA Management</title>
!Edrivers/base/dma-buf.c
!Edrivers/base/dma-coherent.c
!Edrivers/base/dma-mapping.c
</sect1> </sect1>
<sect1><title>Device Drivers Power Management</title> <sect1><title>Device Drivers Power Management</title>
!Edrivers/base/power/main.c !Edrivers/base/power/main.c
@ -219,7 +227,7 @@ X!Isound/sound_firmware.c
<chapter id="uart16x50"> <chapter id="uart16x50">
<title>16x50 UART Driver</title> <title>16x50 UART Driver</title>
!Edrivers/tty/serial/serial_core.c !Edrivers/tty/serial/serial_core.c
!Edrivers/tty/serial/8250.c !Edrivers/tty/serial/8250/8250.c
</chapter> </chapter>
<chapter id="fbdev"> <chapter id="fbdev">

View File

@ -0,0 +1,117 @@
irq_domain interrupt number mapping library
The current design of the Linux kernel uses a single large number
space where each separate IRQ source is assigned a different number.
This is simple when there is only one interrupt controller, but in
systems with multiple interrupt controllers the kernel must ensure
that each one gets assigned non-overlapping allocations of Linux
IRQ numbers.
The irq_alloc_desc*() and irq_free_desc*() APIs provide allocation of
irq numbers, but they don't provide any support for reverse mapping of
the controller-local IRQ (hwirq) number into the Linux IRQ number
space.
The irq_domain library adds mapping between hwirq and IRQ numbers on
top of the irq_alloc_desc*() API. An irq_domain to manage mapping is
preferred over interrupt controller drivers open coding their own
reverse mapping scheme.
irq_domain also implements translation from Device Tree interrupt
specifiers to hwirq numbers, and can be easily extended to support
other IRQ topology data sources.
=== irq_domain usage ===
An interrupt controller driver creates and registers an irq_domain by
calling one of the irq_domain_add_*() functions (each mapping method
has a different allocator function, more on that later). The function
will return a pointer to the irq_domain on success. The caller must
provide the allocator function with an irq_domain_ops structure with
the .map callback populated as a minimum.
In most cases, the irq_domain will begin empty without any mappings
between hwirq and IRQ numbers. Mappings are added to the irq_domain
by calling irq_create_mapping() which accepts the irq_domain and a
hwirq number as arguments. If a mapping for the hwirq doesn't already
exist then it will allocate a new Linux irq_desc, associate it with
the hwirq, and call the .map() callback so the driver can perform any
required hardware setup.
When an interrupt is received, irq_find_mapping() function should
be used to find the Linux IRQ number from the hwirq number.
If the driver has the Linux IRQ number or the irq_data pointer, and
needs to know the associated hwirq number (such as in the irq_chip
callbacks) then it can be directly obtained from irq_data->hwirq.
=== Types of irq_domain mappings ===
There are several mechanisms available for reverse mapping from hwirq
to Linux irq, and each mechanism uses a different allocation function.
Which reverse map type should be used depends on the use case. Each
of the reverse map types are described below:
==== Linear ====
irq_domain_add_linear()
The linear reverse map maintains a fixed size table indexed by the
hwirq number. When a hwirq is mapped, an irq_desc is allocated for
the hwirq, and the IRQ number is stored in the table.
The Linear map is a good choice when the maximum number of hwirqs is
fixed and a relatively small number (~ < 256). The advantages of this
map are fixed time lookup for IRQ numbers, and irq_descs are only
allocated for in-use IRQs. The disadvantage is that the table must be
as large as the largest possible hwirq number.
The majority of drivers should use the linear map.
==== Tree ====
irq_domain_add_tree()
The irq_domain maintains a radix tree map from hwirq numbers to Linux
IRQs. When an hwirq is mapped, an irq_desc is allocated and the
hwirq is used as the lookup key for the radix tree.
The tree map is a good choice if the hwirq number can be very large
since it doesn't need to allocate a table as large as the largest
hwirq number. The disadvantage is that hwirq to IRQ number lookup is
dependent on how many entries are in the table.
Very few drivers should need this mapping. At the moment, powerpc
iseries is the only user.
==== No Map ===-
irq_domain_add_nomap()
The No Map mapping is to be used when the hwirq number is
programmable in the hardware. In this case it is best to program the
Linux IRQ number into the hardware itself so that no mapping is
required. Calling irq_create_direct_mapping() will allocate a Linux
IRQ number and call the .map() callback so that driver can program the
Linux IRQ number into the hardware.
Most drivers cannot use this mapping.
==== Legacy ====
irq_domain_add_legacy()
irq_domain_add_legacy_isa()
The Legacy mapping is a special case for drivers that already have a
range of irq_descs allocated for the hwirqs. It is used when the
driver cannot be immediately converted to use the linear mapping. For
example, many embedded system board support files use a set of #defines
for IRQ numbers that are passed to struct device registrations. In that
case the Linux IRQ numbers cannot be dynamically assigned and the legacy
mapping should be used.
The legacy map assumes a contiguous range of IRQ numbers has already
been allocated for the controller and that the IRQ number can be
calculated by adding a fixed offset to the hwirq number, and
visa-versa. The disadvantage is that it requires the interrupt
controller to manage IRQ allocations and it requires an irq_desc to be
allocated for every hwirq, even if it is unused.
The legacy map should only be used if fixed IRQ mappings must be
supported. For example, ISA controllers would use the legacy map for
mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ
numbers.

View File

@ -17,11 +17,11 @@ reports supported by a device are also provided by sysfs in
class/input/event*/device/capabilities/, and the properties of a device are class/input/event*/device/capabilities/, and the properties of a device are
provided in class/input/event*/device/properties. provided in class/input/event*/device/properties.
Types: Event types:
========== ===========
Types are groupings of codes under a logical input construct. Each type has a Event types are groupings of codes under a logical input construct. Each
set of applicable codes to be used in generating events. See the Codes section type has a set of applicable codes to be used in generating events. See the
for details on valid codes for each type. Codes section for details on valid codes for each type.
* EV_SYN: * EV_SYN:
- Used as markers to separate events. Events may be separated in time or in - Used as markers to separate events. Events may be separated in time or in
@ -63,9 +63,9 @@ for details on valid codes for each type.
* EV_FF_STATUS: * EV_FF_STATUS:
- Used to receive force feedback device status. - Used to receive force feedback device status.
Codes: Event codes:
========== ===========
Codes define the precise type of event. Event codes define the precise type of event.
EV_SYN: EV_SYN:
---------- ----------
@ -220,6 +220,56 @@ EV_PWR:
EV_PWR events are a special type of event used specifically for power EV_PWR events are a special type of event used specifically for power
mangement. Its usage is not well defined. To be addressed later. mangement. Its usage is not well defined. To be addressed later.
Device properties:
=================
Normally, userspace sets up an input device based on the data it emits,
i.e., the event types. In the case of two devices emitting the same event
types, additional information can be provided in the form of device
properties.
INPUT_PROP_DIRECT + INPUT_PROP_POINTER:
--------------------------------------
The INPUT_PROP_DIRECT property indicates that device coordinates should be
directly mapped to screen coordinates (not taking into account trivial
transformations, such as scaling, flipping and rotating). Non-direct input
devices require non-trivial transformation, such as absolute to relative
transformation for touchpads. Typical direct input devices: touchscreens,
drawing tablets; non-direct devices: touchpads, mice.
The INPUT_PROP_POINTER property indicates that the device is not transposed
on the screen and thus requires use of an on-screen pointer to trace user's
movements. Typical pointer devices: touchpads, tablets, mice; non-pointer
device: touchscreen.
If neither INPUT_PROP_DIRECT or INPUT_PROP_POINTER are set, the property is
considered undefined and the device type should be deduced in the
traditional way, using emitted event types.
INPUT_PROP_BUTTONPAD:
--------------------
For touchpads where the button is placed beneath the surface, such that
pressing down on the pad causes a button click, this property should be
set. Common in clickpad notebooks and macbooks from 2009 and onwards.
Originally, the buttonpad property was coded into the bcm5974 driver
version field under the name integrated button. For backwards
compatibility, both methods need to be checked in userspace.
INPUT_PROP_SEMI_MT:
------------------
Some touchpads, most common between 2008 and 2011, can detect the presence
of multiple contacts without resolving the individual positions; only the
number of contacts and a rectangular shape is known. For such
touchpads, the semi-mt property should be set.
Depending on the device, the rectangle may enclose all touches, like a
bounding box, or just some of them, for instance the two most recent
touches. The diversity makes the rectangle of limited use, but some
gestures can normally be extracted from it.
If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
device.
Guidelines: Guidelines:
========== ==========
The guidelines below ensure proper single-touch and multi-finger functionality. The guidelines below ensure proper single-touch and multi-finger functionality.
@ -240,6 +290,8 @@ used to report when a touch is active on the screen.
BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
contact. BTN_TOOL_<name> events should be reported where possible. contact. BTN_TOOL_<name> events should be reported where possible.
For new hardware, INPUT_PROP_DIRECT should be set.
Trackpads: Trackpads:
---------- ----------
Legacy trackpads that only provide relative position information must report Legacy trackpads that only provide relative position information must report
@ -250,6 +302,8 @@ location of the touch. BTN_TOUCH should be used to report when a touch is active
on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
be used to report the number of touches active on the trackpad. be used to report the number of touches active on the trackpad.
For new hardware, INPUT_PROP_POINTER should be set.
Tablets: Tablets:
---------- ----------
BTN_TOOL_<name> events must be reported when a stylus or other tool is active on BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
@ -260,3 +314,5 @@ button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
meaningful buttons, like BTN_FORWARD, unless the button is labeled for that meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
purpose on the device. purpose on the device.
For new hardware, both INPUT_PROP_DIRECT and INPUT_PROP_POINTER should be set.

View File

@ -601,6 +601,8 @@ can be ORed together:
instead of using the one provided by the hardware. instead of using the one provided by the hardware.
512 - A kernel warning has occurred. 512 - A kernel warning has occurred.
1024 - A module from drivers/staging was loaded. 1024 - A module from drivers/staging was loaded.
2048 - The system is working around a severe firmware bug.
4096 - An out-of-tree module has been loaded.
============================================================== ==============================================================

View File

@ -159,7 +159,7 @@ S: Maintained
F: drivers/net/ethernet/realtek/r8169.c F: drivers/net/ethernet/realtek/r8169.c
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
M: Greg Kroah-Hartman <gregkh@suse.de> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-serial@vger.kernel.org L: linux-serial@vger.kernel.org
W: http://serial.sourceforge.net W: http://serial.sourceforge.net
S: Maintained S: Maintained
@ -789,12 +789,6 @@ F: arch/arm/mach-mx*/
F: arch/arm/mach-imx/ F: arch/arm/mach-imx/
F: arch/arm/plat-mxc/ F: arch/arm/plat-mxc/
ARM/FREESCALE IMX51
M: Amit Kucheria <amit.kucheria@canonical.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-mx5/
ARM/FREESCALE IMX6 ARM/FREESCALE IMX6
M: Shawn Guo <shawn.guo@linaro.org> M: Shawn Guo <shawn.guo@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -1783,9 +1777,9 @@ X: net/wireless/wext*
CHAR and MISC DRIVERS CHAR and MISC DRIVERS
M: Arnd Bergmann <arnd@arndb.de> M: Arnd Bergmann <arnd@arndb.de>
M: Greg Kroah-Hartman <greg@kroah.com> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
S: Maintained S: Supported
F: drivers/char/* F: drivers/char/*
F: drivers/misc/* F: drivers/misc/*
@ -2287,7 +2281,7 @@ F: drivers/acpi/dock.c
DOCUMENTATION DOCUMENTATION
M: Randy Dunlap <rdunlap@xenotime.net> M: Randy Dunlap <rdunlap@xenotime.net>
L: linux-doc@vger.kernel.org L: linux-doc@vger.kernel.org
T: quilt http://userweb.kernel.org/~rdunlap/kernel-doc-patches/current/ T: quilt http://xenotime.net/kernel-doc-patches/current/
S: Maintained S: Maintained
F: Documentation/ F: Documentation/
@ -2320,7 +2314,7 @@ F: lib/lru_cache.c
F: Documentation/blockdev/drbd/ F: Documentation/blockdev/drbd/
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@suse.de> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git
S: Supported S: Supported
F: Documentation/kobject.txt F: Documentation/kobject.txt
@ -3324,6 +3318,12 @@ S: Maintained
F: net/ieee802154/ F: net/ieee802154/
F: drivers/ieee802154/ F: drivers/ieee802154/
IIO SUBSYSTEM AND DRIVERS
M: Jonathan Cameron <jic23@cam.ac.uk>
L: linux-iio@vger.kernel.org
S: Maintained
F: drivers/staging/iio/
IKANOS/ADI EAGLE ADSL USB DRIVER IKANOS/ADI EAGLE ADSL USB DRIVER
M: Matthieu Castet <castet.matthieu@free.fr> M: Matthieu Castet <castet.matthieu@free.fr>
M: Stanislaw Gruszka <stf_xl@wp.pl> M: Stanislaw Gruszka <stf_xl@wp.pl>
@ -3640,6 +3640,15 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: kernel/irq/ F: kernel/irq/
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
M: Grant Likely <grant.likely@secretlab.ca>
T: git git://git.secretlab.ca/git/linux-2.6.git irqdomain/next
S: Maintained
F: Documentation/IRQ-domain.txt
F: include/linux/irqdomain.h
F: kernel/irq/irqdomain.c
ISAPNP ISAPNP
M: Jaroslav Kysela <perex@perex.cz> M: Jaroslav Kysela <perex@perex.cz>
S: Maintained S: Maintained
@ -3992,11 +4001,11 @@ M: Rusty Russell <rusty@rustcorp.com.au>
L: lguest@lists.ozlabs.org L: lguest@lists.ozlabs.org
W: http://lguest.ozlabs.org/ W: http://lguest.ozlabs.org/
S: Odd Fixes S: Odd Fixes
F: Documentation/virtual/lguest/ F: arch/x86/include/asm/lguest*.h
F: arch/x86/lguest/ F: arch/x86/lguest/
F: drivers/lguest/ F: drivers/lguest/
F: include/linux/lguest*.h F: include/linux/lguest*.h
F: arch/x86/include/asm/lguest*.h F: tools/lguest/
LINUX FOR IBM pSERIES (RS/6000) LINUX FOR IBM pSERIES (RS/6000)
M: Paul Mackerras <paulus@au.ibm.com> M: Paul Mackerras <paulus@au.ibm.com>
@ -4136,7 +4145,7 @@ L: linux-ntfs-dev@lists.sourceforge.net
W: http://www.linux-ntfs.org/content/view/19/37/ W: http://www.linux-ntfs.org/content/view/19/37/
S: Maintained S: Maintained
F: Documentation/ldm.txt F: Documentation/ldm.txt
F: fs/partitions/ldm.* F: block/partitions/ldm.*
LogFS LogFS
M: Joern Engel <joern@logfs.org> M: Joern Engel <joern@logfs.org>
@ -5633,7 +5642,7 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported S: Supported
F: arch/s390/ F: arch/s390/
F: drivers/s390/ F: drivers/s390/
F: fs/partitions/ibm.c F: block/partitions/ibm.c
F: Documentation/s390/ F: Documentation/s390/
F: Documentation/DocBook/s390* F: Documentation/DocBook/s390*
@ -6276,15 +6285,15 @@ S: Maintained
F: arch/alpha/kernel/srm_env.c F: arch/alpha/kernel/srm_env.c
STABLE BRANCH STABLE BRANCH
M: Greg Kroah-Hartman <greg@kroah.com> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: stable@vger.kernel.org L: stable@vger.kernel.org
S: Maintained S: Supported
STAGING SUBSYSTEM STAGING SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@suse.de> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
L: devel@driverdev.osuosl.org L: devel@driverdev.osuosl.org
S: Maintained S: Supported
F: drivers/staging/ F: drivers/staging/
STAGING - AGERE HERMES II and II.5 WIRELESS DRIVERS STAGING - AGERE HERMES II and II.5 WIRELESS DRIVERS
@ -6396,11 +6405,6 @@ M: Omar Ramirez Luna <omar.ramirez@ti.com>
S: Odd Fixes S: Odd Fixes
F: drivers/staging/tidspbridge/ F: drivers/staging/tidspbridge/
STAGING - TRIDENT TVMASTER TMxxxx USB VIDEO CAPTURE DRIVERS
L: linux-media@vger.kernel.org
S: Odd Fixes
F: drivers/staging/tm6000/
STAGING - USB ENE SM/MS CARD READER DRIVER STAGING - USB ENE SM/MS CARD READER DRIVER
M: Al Cho <acho@novell.com> M: Al Cho <acho@novell.com>
S: Odd Fixes S: Odd Fixes
@ -6669,8 +6673,8 @@ S: Maintained
K: ^Subject:.*(?i)trivial K: ^Subject:.*(?i)trivial
TTY LAYER TTY LAYER
M: Greg Kroah-Hartman <gregkh@suse.de> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
F: drivers/tty/ F: drivers/tty/
F: drivers/tty/serial/serial_core.c F: drivers/tty/serial/serial_core.c
@ -6958,7 +6962,7 @@ S: Maintained
F: drivers/usb/serial/digi_acceleport.c F: drivers/usb/serial/digi_acceleport.c
USB SERIAL DRIVER USB SERIAL DRIVER
M: Greg Kroah-Hartman <gregkh@suse.de> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Supported S: Supported
F: Documentation/usb/usb-serial.txt F: Documentation/usb/usb-serial.txt
@ -6973,9 +6977,8 @@ S: Maintained
F: drivers/usb/serial/empeg.c F: drivers/usb/serial/empeg.c
USB SERIAL KEYSPAN DRIVER USB SERIAL KEYSPAN DRIVER
M: Greg Kroah-Hartman <greg@kroah.com> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
W: http://www.kroah.com/linux/
S: Maintained S: Maintained
F: drivers/usb/serial/*keyspan* F: drivers/usb/serial/*keyspan*
@ -7003,7 +7006,7 @@ F: Documentation/video4linux/sn9c102.txt
F: drivers/media/video/sn9c102/ F: drivers/media/video/sn9c102/
USB SUBSYSTEM USB SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@suse.de> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
W: http://www.linux-usb.org W: http://www.linux-usb.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
@ -7090,7 +7093,7 @@ F: fs/hppfs/
USERSPACE I/O (UIO) USERSPACE I/O (UIO)
M: "Hans J. Koch" <hjk@hansjkoch.de> M: "Hans J. Koch" <hjk@hansjkoch.de>
M: Greg Kroah-Hartman <gregkh@suse.de> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained S: Maintained
F: Documentation/DocBook/uio-howto.tmpl F: Documentation/DocBook/uio-howto.tmpl
F: drivers/uio/ F: drivers/uio/

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 3 PATCHLEVEL = 3
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc2 EXTRAVERSION = -rc3
NAME = Saber-toothed Squirrel NAME = Saber-toothed Squirrel
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -51,7 +51,6 @@ union gic_base {
}; };
struct gic_chip_data { struct gic_chip_data {
unsigned int irq_offset;
union gic_base dist_base; union gic_base dist_base;
union gic_base cpu_base; union gic_base cpu_base;
#ifdef CONFIG_CPU_PM #ifdef CONFIG_CPU_PM
@ -61,9 +60,7 @@ struct gic_chip_data {
u32 __percpu *saved_ppi_enable; u32 __percpu *saved_ppi_enable;
u32 __percpu *saved_ppi_conf; u32 __percpu *saved_ppi_conf;
#endif #endif
#ifdef CONFIG_IRQ_DOMAIN struct irq_domain *domain;
struct irq_domain domain;
#endif
unsigned int gic_irqs; unsigned int gic_irqs;
#ifdef CONFIG_GIC_NON_BANKED #ifdef CONFIG_GIC_NON_BANKED
void __iomem *(*get_base)(union gic_base *); void __iomem *(*get_base)(union gic_base *);
@ -282,7 +279,7 @@ asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
irqnr = irqstat & ~0x1c00; irqnr = irqstat & ~0x1c00;
if (likely(irqnr > 15 && irqnr < 1021)) { if (likely(irqnr > 15 && irqnr < 1021)) {
irqnr = irq_domain_to_irq(&gic->domain, irqnr); irqnr = irq_find_mapping(gic->domain, irqnr);
handle_IRQ(irqnr, regs); handle_IRQ(irqnr, regs);
continue; continue;
} }
@ -314,8 +311,8 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
if (gic_irq == 1023) if (gic_irq == 1023)
goto out; goto out;
cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq); cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) if (unlikely(gic_irq < 32 || gic_irq > 1020))
do_bad_IRQ(cascade_irq, desc); do_bad_IRQ(cascade_irq, desc);
else else
generic_handle_irq(cascade_irq); generic_handle_irq(cascade_irq);
@ -348,10 +345,9 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
static void __init gic_dist_init(struct gic_chip_data *gic) static void __init gic_dist_init(struct gic_chip_data *gic)
{ {
unsigned int i, irq; unsigned int i;
u32 cpumask; u32 cpumask;
unsigned int gic_irqs = gic->gic_irqs; unsigned int gic_irqs = gic->gic_irqs;
struct irq_domain *domain = &gic->domain;
void __iomem *base = gic_data_dist_base(gic); void __iomem *base = gic_data_dist_base(gic);
u32 cpu = cpu_logical_map(smp_processor_id()); u32 cpu = cpu_logical_map(smp_processor_id());
@ -386,23 +382,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
for (i = 32; i < gic_irqs; i += 32) for (i = 32; i < gic_irqs; i += 32)
writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
/*
* Setup the Linux IRQ subsystem.
*/
irq_domain_for_each_irq(domain, i, irq) {
if (i < 32) {
irq_set_percpu_devid(irq);
irq_set_chip_and_handler(irq, &gic_chip,
handle_percpu_devid_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
} else {
irq_set_chip_and_handler(irq, &gic_chip,
handle_fasteoi_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
irq_set_chip_data(irq, gic);
}
writel_relaxed(1, base + GIC_DIST_CTRL); writel_relaxed(1, base + GIC_DIST_CTRL);
} }
@ -618,11 +597,27 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
} }
#endif #endif
#ifdef CONFIG_OF static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
static int gic_irq_domain_dt_translate(struct irq_domain *d, irq_hw_number_t hw)
struct device_node *controller, {
const u32 *intspec, unsigned int intsize, if (hw < 32) {
unsigned long *out_hwirq, unsigned int *out_type) irq_set_percpu_devid(irq);
irq_set_chip_and_handler(irq, &gic_chip,
handle_percpu_devid_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
} else {
irq_set_chip_and_handler(irq, &gic_chip,
handle_fasteoi_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
irq_set_chip_data(irq, d->host_data);
return 0;
}
static int gic_irq_domain_xlate(struct irq_domain *d,
struct device_node *controller,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{ {
if (d->of_node != controller) if (d->of_node != controller)
return -EINVAL; return -EINVAL;
@ -639,26 +634,23 @@ static int gic_irq_domain_dt_translate(struct irq_domain *d,
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
return 0; return 0;
} }
#endif
const struct irq_domain_ops gic_irq_domain_ops = { const struct irq_domain_ops gic_irq_domain_ops = {
#ifdef CONFIG_OF .map = gic_irq_domain_map,
.dt_translate = gic_irq_domain_dt_translate, .xlate = gic_irq_domain_xlate,
#endif
}; };
void __init gic_init_bases(unsigned int gic_nr, int irq_start, void __init gic_init_bases(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base, void __iomem *dist_base, void __iomem *cpu_base,
u32 percpu_offset) u32 percpu_offset, struct device_node *node)
{ {
irq_hw_number_t hwirq_base;
struct gic_chip_data *gic; struct gic_chip_data *gic;
struct irq_domain *domain; int gic_irqs, irq_base;
int gic_irqs;
BUG_ON(gic_nr >= MAX_GIC_NR); BUG_ON(gic_nr >= MAX_GIC_NR);
gic = &gic_data[gic_nr]; gic = &gic_data[gic_nr];
domain = &gic->domain;
#ifdef CONFIG_GIC_NON_BANKED #ifdef CONFIG_GIC_NON_BANKED
if (percpu_offset) { /* Frankein-GIC without banked registers... */ if (percpu_offset) { /* Frankein-GIC without banked registers... */
unsigned int cpu; unsigned int cpu;
@ -694,10 +686,10 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
* For primary GICs, skip over SGIs. * For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too. * For secondary GICs, skip over PPIs, too.
*/ */
domain->hwirq_base = 32; hwirq_base = 32;
if (gic_nr == 0) { if (gic_nr == 0) {
if ((irq_start & 31) > 0) { if ((irq_start & 31) > 0) {
domain->hwirq_base = 16; hwirq_base = 16;
if (irq_start != -1) if (irq_start != -1)
irq_start = (irq_start & ~31) + 16; irq_start = (irq_start & ~31) + 16;
} }
@ -713,17 +705,17 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_irqs = 1020; gic_irqs = 1020;
gic->gic_irqs = gic_irqs; gic->gic_irqs = gic_irqs;
domain->nr_irq = gic_irqs - domain->hwirq_base; gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq, irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
numa_node_id()); if (IS_ERR_VALUE(irq_base)) {
if (IS_ERR_VALUE(domain->irq_base)) {
WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
irq_start); irq_start);
domain->irq_base = irq_start; irq_base = irq_start;
} }
domain->priv = gic; gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
domain->ops = &gic_irq_domain_ops; hwirq_base, &gic_irq_domain_ops, gic);
irq_domain_add(domain); if (WARN_ON(!gic->domain))
return;
gic_chip.flags |= gic_arch_extn.flags; gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic); gic_dist_init(gic);
@ -768,7 +760,6 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
void __iomem *dist_base; void __iomem *dist_base;
u32 percpu_offset; u32 percpu_offset;
int irq; int irq;
struct irq_domain *domain = &gic_data[gic_cnt].domain;
if (WARN_ON(!node)) if (WARN_ON(!node))
return -ENODEV; return -ENODEV;
@ -782,9 +773,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
percpu_offset = 0; percpu_offset = 0;
domain->of_node = of_node_get(node); gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
if (parent) { if (parent) {
irq = irq_of_parse_and_map(node, 0); irq = irq_of_parse_and_map(node, 0);

View File

@ -56,7 +56,7 @@ struct vic_device {
u32 int_enable; u32 int_enable;
u32 soft_int; u32 soft_int;
u32 protect; u32 protect;
struct irq_domain domain; struct irq_domain *domain;
}; };
/* we cannot allocate memory when VICs are initially registered */ /* we cannot allocate memory when VICs are initially registered */
@ -192,14 +192,8 @@ static void __init vic_register(void __iomem *base, unsigned int irq,
v->resume_sources = resume_sources; v->resume_sources = resume_sources;
v->irq = irq; v->irq = irq;
vic_id++; vic_id++;
v->domain = irq_domain_add_legacy(node, 32, irq, 0,
v->domain.irq_base = irq; &irq_domain_simple_ops, v);
v->domain.nr_irq = 32;
#ifdef CONFIG_OF_IRQ
v->domain.of_node = of_node_get(node);
#endif /* CONFIG_OF */
v->domain.ops = &irq_domain_simple_ops;
irq_domain_add(&v->domain);
} }
static void vic_ack_irq(struct irq_data *d) static void vic_ack_irq(struct irq_data *d)
@ -348,7 +342,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
vic_register(base, irq_start, 0, node); vic_register(base, irq_start, 0, node);
} }
static void __init __vic_init(void __iomem *base, unsigned int irq_start, void __init __vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources, u32 vic_sources, u32 resume_sources,
struct device_node *node) struct device_node *node)
{ {
@ -444,7 +438,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
stat = readl_relaxed(vic->base + VIC_IRQ_STATUS); stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
while (stat) { while (stat) {
irq = ffs(stat) - 1; irq = ffs(stat) - 1;
handle_IRQ(irq_domain_to_irq(&vic->domain, irq), regs); handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
stat &= ~(1 << irq); stat &= ~(1 << irq);
handled = 1; handled = 1;
} }

View File

@ -39,7 +39,7 @@ struct device_node;
extern struct irq_chip gic_arch_extn; extern struct irq_chip gic_arch_extn;
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset); u32 offset, struct device_node *);
int gic_of_init(struct device_node *node, struct device_node *parent); int gic_of_init(struct device_node *node, struct device_node *parent);
void gic_secondary_init(unsigned int); void gic_secondary_init(unsigned int);
void gic_handle_irq(struct pt_regs *regs); void gic_handle_irq(struct pt_regs *regs);
@ -49,7 +49,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
static inline void gic_init(unsigned int nr, int start, static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu) void __iomem *dist , void __iomem *cpu)
{ {
gic_init_bases(nr, start, dist, cpu, 0); gic_init_bases(nr, start, dist, cpu, 0, NULL);
} }
#endif #endif

View File

@ -47,6 +47,8 @@
struct device_node; struct device_node;
struct pt_regs; struct pt_regs;
void __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources,
u32 resume_sources, struct device_node *node);
void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
int vic_of_init(struct device_node *node, struct device_node *parent); int vic_of_init(struct device_node *node, struct device_node *parent);
void vic_handle_irq(struct pt_regs *regs); void vic_handle_irq(struct pt_regs *regs);

View File

@ -198,7 +198,15 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr) unsigned long addr)
{ {
pgtable_page_dtor(pte); pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr);
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
*/
addr &= PMD_MASK;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M);
tlb_remove_page(tlb, pte); tlb_remove_page(tlb, pte);
} }

View File

@ -790,7 +790,7 @@ __kuser_cmpxchg64: @ 0xffff0f60
smp_dmb arm smp_dmb arm
rsbs r0, r3, #0 @ set returned val and C flag rsbs r0, r3, #0 @ set returned val and C flag
ldmfd sp!, {r4, r5, r6, r7} ldmfd sp!, {r4, r5, r6, r7}
bx lr usr_ret lr
#elif !defined(CONFIG_SMP) #elif !defined(CONFIG_SMP)

View File

@ -469,6 +469,20 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
}, },
}, },
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
}; };
/* /*
@ -579,6 +593,20 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
}, },
}, },
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
}; };
/* /*

View File

@ -699,10 +699,13 @@ static int vfp_set(struct task_struct *target,
{ {
int ret; int ret;
struct thread_info *thread = task_thread_info(target); struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct new_vfp = thread->vfpstate.hard; struct vfp_hard_struct new_vfp;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
vfp_sync_hwstate(thread);
new_vfp = thread->vfpstate.hard;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpregs, &new_vfp.fpregs,
user_fpregs_offset, user_fpregs_offset,
@ -723,9 +726,8 @@ static int vfp_set(struct task_struct *target,
if (ret) if (ret)
return ret; return ret;
vfp_sync_hwstate(thread);
thread->vfpstate.hard = new_vfp;
vfp_flush_hwstate(thread); vfp_flush_hwstate(thread);
thread->vfpstate.hard = new_vfp;
return 0; return 0;
} }

View File

@ -227,6 +227,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL; return -EINVAL;
vfp_flush_hwstate(thread);
/* /*
* Copy the floating point registers. There can be unused * Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details. * registers see asm/hwcap.h for details.
@ -251,9 +253,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
if (!err)
vfp_flush_hwstate(thread);
return err ? -EFAULT : 0; return err ? -EFAULT : 0;
} }

View File

@ -266,6 +266,7 @@ void die(const char *str, struct pt_regs *regs, int err)
{ {
struct thread_info *thread = current_thread_info(); struct thread_info *thread = current_thread_info();
int ret; int ret;
enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
oops_enter(); oops_enter();
@ -273,7 +274,9 @@ void die(const char *str, struct pt_regs *regs, int err)
console_verbose(); console_verbose();
bust_spinlocks(1); bust_spinlocks(1);
if (!user_mode(regs)) if (!user_mode(regs))
report_bug(regs->ARM_pc, regs); bug_type = report_bug(regs->ARM_pc, regs);
if (bug_type != BUG_TRAP_TYPE_NONE)
str = "Oops - BUG";
ret = __die(str, err, thread, regs); ret = __die(str, err, thread, regs);
if (regs && kexec_should_crash(thread->task)) if (regs && kexec_should_crash(thread->task))

View File

@ -10,6 +10,7 @@
#include <asm/page.h> #include <asm/page.h>
#define PROC_INFO \ #define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \ VMLINUX_SYMBOL(__proc_info_begin) = .; \
*(.proc.info.init) \ *(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .; VMLINUX_SYMBOL(__proc_info_end) = .;

View File

@ -194,6 +194,6 @@ MACHINE_START(BCMRING, "BCMRING")
.init_early = bcmring_init_early, .init_early = bcmring_init_early,
.init_irq = bcmring_init_irq, .init_irq = bcmring_init_irq,
.timer = &bcmring_timer, .timer = &bcmring_timer,
.init_machine = bcmring_init_machine .init_machine = bcmring_init_machine,
.restart = bcmring_restart, .restart = bcmring_restart,
MACHINE_END MACHINE_END

View File

@ -33,17 +33,11 @@
#include <mach/timer.h> #include <mach/timer.h>
#include <linux/mm.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <mach/dma.h> #include <mach/dma.h>
/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
/* especially since dc4 doesn't use kmalloc'd memory. */
#define ALLOW_MAP_OF_KMALLOC_MEMORY 0
/* ---- Public Variables ------------------------------------------------- */ /* ---- Public Variables ------------------------------------------------- */
/* ---- Private Constants and Types -------------------------------------- */ /* ---- Private Constants and Types -------------------------------------- */
@ -53,58 +47,18 @@
#define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f) #define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f)
#define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f) #define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f)
#define DMA_MAP_DEBUG 0
#if DMA_MAP_DEBUG
# define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args)
#else
# define DMA_MAP_PRINT(fmt, args...)
#endif
/* ---- Private Variables ------------------------------------------------ */ /* ---- Private Variables ------------------------------------------------ */
static DMA_Global_t gDMA; static DMA_Global_t gDMA;
static struct proc_dir_entry *gDmaDir; static struct proc_dir_entry *gDmaDir;
static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
#include "dma_device.c" #include "dma_device.c"
/* ---- Private Function Prototypes -------------------------------------- */ /* ---- Private Function Prototypes -------------------------------------- */
/* ---- Functions ------------------------------------------------------- */ /* ---- Functions ------------------------------------------------------- */
/****************************************************************************/
/**
* Displays information for /proc/dma/mem-type
*/
/****************************************************************************/
static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
int count, int *eof, void *data)
{
int len = 0;
len += sprintf(buf + len, "dma_map_mem statistics\n");
len +=
sprintf(buf + len, "coherent: %d\n",
atomic_read(&gDmaStatMemTypeCoherent));
len +=
sprintf(buf + len, "kmalloc: %d\n",
atomic_read(&gDmaStatMemTypeKmalloc));
len +=
sprintf(buf + len, "vmalloc: %d\n",
atomic_read(&gDmaStatMemTypeVmalloc));
len +=
sprintf(buf + len, "user: %d\n",
atomic_read(&gDmaStatMemTypeUser));
return len;
}
/****************************************************************************/ /****************************************************************************/
/** /**
* Displays information for /proc/dma/channels * Displays information for /proc/dma/channels
@ -846,8 +800,6 @@ int dma_init(void)
dma_proc_read_channels, NULL); dma_proc_read_channels, NULL);
create_proc_read_entry("devices", 0, gDmaDir, create_proc_read_entry("devices", 0, gDmaDir,
dma_proc_read_devices, NULL); dma_proc_read_devices, NULL);
create_proc_read_entry("mem-type", 0, gDmaDir,
dma_proc_read_mem_type, NULL);
} }
out: out:
@ -1565,767 +1517,3 @@ int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for.
} }
EXPORT_SYMBOL(dma_set_device_handler); EXPORT_SYMBOL(dma_set_device_handler);
/****************************************************************************/
/**
* Initializes a memory mapping structure
*/
/****************************************************************************/
int dma_init_mem_map(DMA_MemMap_t *memMap)
{
memset(memMap, 0, sizeof(*memMap));
sema_init(&memMap->lock, 1);
return 0;
}
EXPORT_SYMBOL(dma_init_mem_map);
/****************************************************************************/
/**
* Releases any memory currently being held by a memory mapping structure.
*/
/****************************************************************************/
int dma_term_mem_map(DMA_MemMap_t *memMap)
{
down(&memMap->lock); /* Just being paranoid */
/* Free up any allocated memory */
up(&memMap->lock);
memset(memMap, 0, sizeof(*memMap));
return 0;
}
EXPORT_SYMBOL(dma_term_mem_map);
/****************************************************************************/
/**
* Looks at a memory address and categorizes it.
*
* @return One of the values from the DMA_MemType_t enumeration.
*/
/****************************************************************************/
DMA_MemType_t dma_mem_type(void *addr)
{
unsigned long addrVal = (unsigned long)addr;
if (addrVal >= CONSISTENT_BASE) {
/* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
/* dma_alloc_xxx pages are physically and virtually contiguous */
return DMA_MEM_TYPE_DMA;
}
/* Technically, we could add one more classification. Addresses between VMALLOC_END */
/* and the beginning of the DMA virtual address could be considered to be I/O space. */
/* Right now, nobody cares about this particular classification, so we ignore it. */
if (is_vmalloc_addr(addr)) {
/* Address comes from the vmalloc'd region. Pages are virtually */
/* contiguous but NOT physically contiguous */
return DMA_MEM_TYPE_VMALLOC;
}
if (addrVal >= PAGE_OFFSET) {
/* PAGE_OFFSET is typically 0xC0000000 */
/* kmalloc'd pages are physically contiguous */
return DMA_MEM_TYPE_KMALLOC;
}
return DMA_MEM_TYPE_USER;
}
EXPORT_SYMBOL(dma_mem_type);
/****************************************************************************/
/**
* Looks at a memory address and determines if we support DMA'ing to/from
* that type of memory.
*
* @return boolean -
* return value != 0 means dma supported
* return value == 0 means dma not supported
*/
/****************************************************************************/
int dma_mem_supports_dma(void *addr)
{
DMA_MemType_t memType = dma_mem_type(addr);
return (memType == DMA_MEM_TYPE_DMA)
#if ALLOW_MAP_OF_KMALLOC_MEMORY
|| (memType == DMA_MEM_TYPE_KMALLOC)
#endif
|| (memType == DMA_MEM_TYPE_USER);
}
EXPORT_SYMBOL(dma_mem_supports_dma);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return
*/
/****************************************************************************/
int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
enum dma_data_direction dir /* Direction that the mapping will be going */
) {
int rc;
down(&memMap->lock);
DMA_MAP_PRINT("memMap: %p\n", memMap);
if (memMap->inUse) {
printk(KERN_ERR "%s: memory map %p is already being used\n",
__func__, memMap);
rc = -EBUSY;
goto out;
}
memMap->inUse = 1;
memMap->dir = dir;
memMap->numRegionsUsed = 0;
rc = 0;
out:
DMA_MAP_PRINT("returning %d", rc);
up(&memMap->lock);
return rc;
}
EXPORT_SYMBOL(dma_map_start);
/****************************************************************************/
/**
* Adds a segment of memory to a memory map. Each segment is both
* physically and virtually contiguous.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */
DMA_Region_t *region, /* Region that the segment belongs to */
void *virtAddr, /* Virtual address of the segment being added */
dma_addr_t physAddr, /* Physical address of the segment being added */
size_t numBytes /* Number of bytes of the segment being added */
) {
DMA_Segment_t *segment;
DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
physAddr, numBytes);
/* Sanity check */
if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
|| (((unsigned long)virtAddr + numBytes)) >
((unsigned long)region->virtAddr + region->numBytes)) {
printk(KERN_ERR
"%s: virtAddr %p is outside region @ %p len: %d\n",
__func__, virtAddr, region->virtAddr, region->numBytes);
return -EINVAL;
}
if (region->numSegmentsUsed > 0) {
/* Check to see if this segment is physically contiguous with the previous one */
segment = &region->segment[region->numSegmentsUsed - 1];
if ((segment->physAddr + segment->numBytes) == physAddr) {
/* It is - just add on to the end */
DMA_MAP_PRINT("appending %d bytes to last segment\n",
numBytes);
segment->numBytes += numBytes;
return 0;
}
}
/* Reallocate to hold more segments, if required. */
if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
DMA_Segment_t *newSegment;
size_t oldSize =
region->numSegmentsAllocated * sizeof(*newSegment);
int newAlloc = region->numSegmentsAllocated + 4;
size_t newSize = newAlloc * sizeof(*newSegment);
newSegment = kmalloc(newSize, GFP_KERNEL);
if (newSegment == NULL) {
return -ENOMEM;
}
memcpy(newSegment, region->segment, oldSize);
memset(&((uint8_t *) newSegment)[oldSize], 0,
newSize - oldSize);
kfree(region->segment);
region->numSegmentsAllocated = newAlloc;
region->segment = newSegment;
}
segment = &region->segment[region->numSegmentsUsed];
region->numSegmentsUsed++;
segment->virtAddr = virtAddr;
segment->physAddr = physAddr;
segment->numBytes = numBytes;
DMA_MAP_PRINT("returning success\n");
return 0;
}
/****************************************************************************/
/**
* Adds a region of memory to a memory map. Each region is virtually
* contiguous, but not necessarily physically contiguous.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
void *mem, /* Virtual address that we want to get a map of */
size_t numBytes /* Number of bytes being mapped */
) {
unsigned long addr = (unsigned long)mem;
unsigned int offset;
int rc = 0;
DMA_Region_t *region;
dma_addr_t physAddr;
down(&memMap->lock);
DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
if (!memMap->inUse) {
printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
__func__);
rc = -EINVAL;
goto out;
}
/* Reallocate to hold more regions. */
if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
DMA_Region_t *newRegion;
size_t oldSize =
memMap->numRegionsAllocated * sizeof(*newRegion);
int newAlloc = memMap->numRegionsAllocated + 4;
size_t newSize = newAlloc * sizeof(*newRegion);
newRegion = kmalloc(newSize, GFP_KERNEL);
if (newRegion == NULL) {
rc = -ENOMEM;
goto out;
}
memcpy(newRegion, memMap->region, oldSize);
memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
kfree(memMap->region);
memMap->numRegionsAllocated = newAlloc;
memMap->region = newRegion;
}
region = &memMap->region[memMap->numRegionsUsed];
memMap->numRegionsUsed++;
offset = addr & ~PAGE_MASK;
region->memType = dma_mem_type(mem);
region->virtAddr = mem;
region->numBytes = numBytes;
region->numSegmentsUsed = 0;
region->numLockedPages = 0;
region->lockedPages = NULL;
switch (region->memType) {
case DMA_MEM_TYPE_VMALLOC:
{
atomic_inc(&gDmaStatMemTypeVmalloc);
/* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
/* vmalloc'd pages are not physically contiguous */
rc = -EINVAL;
break;
}
case DMA_MEM_TYPE_KMALLOC:
{
atomic_inc(&gDmaStatMemTypeKmalloc);
/* kmalloc'd pages are physically contiguous, so they'll have exactly */
/* one segment */
#if ALLOW_MAP_OF_KMALLOC_MEMORY
physAddr =
dma_map_single(NULL, mem, numBytes, memMap->dir);
rc = dma_map_add_segment(memMap, region, mem, physAddr,
numBytes);
#else
rc = -EINVAL;
#endif
break;
}
case DMA_MEM_TYPE_DMA:
{
/* dma_alloc_xxx pages are physically contiguous */
atomic_inc(&gDmaStatMemTypeCoherent);
physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
dma_sync_single_for_cpu(NULL, physAddr, numBytes,
memMap->dir);
rc = dma_map_add_segment(memMap, region, mem, physAddr,
numBytes);
break;
}
case DMA_MEM_TYPE_USER:
{
size_t firstPageOffset;
size_t firstPageSize;
struct page **pages;
struct task_struct *userTask;
atomic_inc(&gDmaStatMemTypeUser);
#if 1
/* If the pages are user pages, then the dma_mem_map_set_user_task function */
/* must have been previously called. */
if (memMap->userTask == NULL) {
printk(KERN_ERR
"%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
__func__);
return -EINVAL;
}
/* User pages need to be locked. */
firstPageOffset =
(unsigned long)region->virtAddr & (PAGE_SIZE - 1);
firstPageSize = PAGE_SIZE - firstPageOffset;
region->numLockedPages = (firstPageOffset
+ region->numBytes +
PAGE_SIZE - 1) / PAGE_SIZE;
pages =
kmalloc(region->numLockedPages *
sizeof(struct page *), GFP_KERNEL);
if (pages == NULL) {
region->numLockedPages = 0;
return -ENOMEM;
}
userTask = memMap->userTask;
down_read(&userTask->mm->mmap_sem);
rc = get_user_pages(userTask, /* task */
userTask->mm, /* mm */
(unsigned long)region->virtAddr, /* start */
region->numLockedPages, /* len */
memMap->dir == DMA_FROM_DEVICE, /* write */
0, /* force */
pages, /* pages (array of pointers to page) */
NULL); /* vmas */
up_read(&userTask->mm->mmap_sem);
if (rc != region->numLockedPages) {
kfree(pages);
region->numLockedPages = 0;
if (rc >= 0) {
rc = -EINVAL;
}
} else {
uint8_t *virtAddr = region->virtAddr;
size_t bytesRemaining;
int pageIdx;
rc = 0; /* Since get_user_pages returns +ve number */
region->lockedPages = pages;
/* We've locked the user pages. Now we need to walk them and figure */
/* out the physical addresses. */
/* The first page may be partial */
dma_map_add_segment(memMap,
region,
virtAddr,
PFN_PHYS(page_to_pfn
(pages[0])) +
firstPageOffset,
firstPageSize);
virtAddr += firstPageSize;
bytesRemaining =
region->numBytes - firstPageSize;
for (pageIdx = 1;
pageIdx < region->numLockedPages;
pageIdx++) {
size_t bytesThisPage =
(bytesRemaining >
PAGE_SIZE ? PAGE_SIZE :
bytesRemaining);
DMA_MAP_PRINT
("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
pageIdx, pages[pageIdx],
page_to_pfn(pages[pageIdx]),
PFN_PHYS(page_to_pfn
(pages[pageIdx])));
dma_map_add_segment(memMap,
region,
virtAddr,
PFN_PHYS(page_to_pfn
(pages
[pageIdx])),
bytesThisPage);
virtAddr += bytesThisPage;
bytesRemaining -= bytesThisPage;
}
}
#else
printk(KERN_ERR
"%s: User mode pages are not yet supported\n",
__func__);
/* user pages are not physically contiguous */
rc = -EINVAL;
#endif
break;
}
default:
{
printk(KERN_ERR "%s: Unsupported memory type: %d\n",
__func__, region->memType);
rc = -EINVAL;
break;
}
}
if (rc != 0) {
memMap->numRegionsUsed--;
}
out:
DMA_MAP_PRINT("returning %d\n", rc);
up(&memMap->lock);
return rc;
}
EXPORT_SYMBOL(dma_map_add_segment);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
void *mem, /* Virtual address that we want to get a map of */
size_t numBytes, /* Number of bytes being mapped */
enum dma_data_direction dir /* Direction that the mapping will be going */
) {
int rc;
rc = dma_map_start(memMap, dir);
if (rc == 0) {
rc = dma_map_add_region(memMap, mem, numBytes);
if (rc < 0) {
/* Since the add fails, this function will fail, and the caller won't */
/* call unmap, so we need to do it here. */
dma_unmap(memMap, 0);
}
}
return rc;
}
EXPORT_SYMBOL(dma_map_mem);
/****************************************************************************/
/**
* Setup a descriptor ring for a given memory map.
*
* It is assumed that the descriptor ring has already been initialized, and
* this routine will only reallocate a new descriptor ring if the existing
* one is too small.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
DMA_MemMap_t *memMap, /* Memory map that will be used */
dma_addr_t devPhysAddr /* Physical address of device */
) {
int rc;
int numDescriptors;
DMA_DeviceAttribute_t *devAttr;
DMA_Region_t *region;
DMA_Segment_t *segment;
dma_addr_t srcPhysAddr;
dma_addr_t dstPhysAddr;
int regionIdx;
int segmentIdx;
devAttr = &DMA_gDeviceAttribute[dev];
down(&memMap->lock);
/* Figure out how many descriptors we need */
numDescriptors = 0;
for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
region = &memMap->region[regionIdx];
for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
segmentIdx++) {
segment = &region->segment[segmentIdx];
if (memMap->dir == DMA_TO_DEVICE) {
srcPhysAddr = segment->physAddr;
dstPhysAddr = devPhysAddr;
} else {
srcPhysAddr = devPhysAddr;
dstPhysAddr = segment->physAddr;
}
rc =
dma_calculate_descriptor_count(dev, srcPhysAddr,
dstPhysAddr,
segment->
numBytes);
if (rc < 0) {
printk(KERN_ERR
"%s: dma_calculate_descriptor_count failed: %d\n",
__func__, rc);
goto out;
}
numDescriptors += rc;
}
}
/* Adjust the size of the ring, if it isn't big enough */
if (numDescriptors > devAttr->ring.descriptorsAllocated) {
dma_free_descriptor_ring(&devAttr->ring);
rc =
dma_alloc_descriptor_ring(&devAttr->ring,
numDescriptors);
if (rc < 0) {
printk(KERN_ERR
"%s: dma_alloc_descriptor_ring failed: %d\n",
__func__, rc);
goto out;
}
} else {
rc =
dma_init_descriptor_ring(&devAttr->ring,
numDescriptors);
if (rc < 0) {
printk(KERN_ERR
"%s: dma_init_descriptor_ring failed: %d\n",
__func__, rc);
goto out;
}
}
/* Populate the descriptors */
for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
region = &memMap->region[regionIdx];
for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
segmentIdx++) {
segment = &region->segment[segmentIdx];
if (memMap->dir == DMA_TO_DEVICE) {
srcPhysAddr = segment->physAddr;
dstPhysAddr = devPhysAddr;
} else {
srcPhysAddr = devPhysAddr;
dstPhysAddr = segment->physAddr;
}
rc =
dma_add_descriptors(&devAttr->ring, dev,
srcPhysAddr, dstPhysAddr,
segment->numBytes);
if (rc < 0) {
printk(KERN_ERR
"%s: dma_add_descriptors failed: %d\n",
__func__, rc);
goto out;
}
}
}
rc = 0;
out:
up(&memMap->lock);
return rc;
}
EXPORT_SYMBOL(dma_map_create_descriptor_ring);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return
*/
/****************************************************************************/
int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
int dirtied /* non-zero if any of the pages were modified */
) {
int rc = 0;
int regionIdx;
int segmentIdx;
DMA_Region_t *region;
DMA_Segment_t *segment;
down(&memMap->lock);
for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
region = &memMap->region[regionIdx];
for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
segmentIdx++) {
segment = &region->segment[segmentIdx];
switch (region->memType) {
case DMA_MEM_TYPE_VMALLOC:
{
printk(KERN_ERR
"%s: vmalloc'd pages are not yet supported\n",
__func__);
rc = -EINVAL;
goto out;
}
case DMA_MEM_TYPE_KMALLOC:
{
#if ALLOW_MAP_OF_KMALLOC_MEMORY
dma_unmap_single(NULL,
segment->physAddr,
segment->numBytes,
memMap->dir);
#endif
break;
}
case DMA_MEM_TYPE_DMA:
{
dma_sync_single_for_cpu(NULL,
segment->
physAddr,
segment->
numBytes,
memMap->dir);
break;
}
case DMA_MEM_TYPE_USER:
{
/* Nothing to do here. */
break;
}
default:
{
printk(KERN_ERR
"%s: Unsupported memory type: %d\n",
__func__, region->memType);
rc = -EINVAL;
goto out;
}
}
segment->virtAddr = NULL;
segment->physAddr = 0;
segment->numBytes = 0;
}
if (region->numLockedPages > 0) {
int pageIdx;
/* Some user pages were locked. We need to go and unlock them now. */
for (pageIdx = 0; pageIdx < region->numLockedPages;
pageIdx++) {
struct page *page =
region->lockedPages[pageIdx];
if (memMap->dir == DMA_FROM_DEVICE) {
SetPageDirty(page);
}
page_cache_release(page);
}
kfree(region->lockedPages);
region->numLockedPages = 0;
region->lockedPages = NULL;
}
region->memType = DMA_MEM_TYPE_NONE;
region->virtAddr = NULL;
region->numBytes = 0;
region->numSegmentsUsed = 0;
}
memMap->userTask = NULL;
memMap->numRegionsUsed = 0;
memMap->inUse = 0;
out:
up(&memMap->lock);
return rc;
}
EXPORT_SYMBOL(dma_unmap);

View File

@ -26,15 +26,9 @@
/* ---- Include Files ---------------------------------------------------- */ /* ---- Include Files ---------------------------------------------------- */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <csp/dmacHw.h> #include <csp/dmacHw.h>
#include <mach/timer.h> #include <mach/timer.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
/* ---- Constants and Types ---------------------------------------------- */ /* ---- Constants and Types ---------------------------------------------- */
@ -111,78 +105,6 @@ typedef struct {
} DMA_DescriptorRing_t; } DMA_DescriptorRing_t;
/****************************************************************************
*
* The DMA_MemType_t and DMA_MemMap_t are helper structures used to setup
* DMA chains from a variety of memory sources.
*
*****************************************************************************/
#define DMA_MEM_MAP_MIN_SIZE 4096 /* Pages less than this size are better */
/* off not being DMA'd. */
typedef enum {
DMA_MEM_TYPE_NONE, /* Not a valid setting */
DMA_MEM_TYPE_VMALLOC, /* Memory came from vmalloc call */
DMA_MEM_TYPE_KMALLOC, /* Memory came from kmalloc call */
DMA_MEM_TYPE_DMA, /* Memory came from dma_alloc_xxx call */
DMA_MEM_TYPE_USER, /* Memory came from user space. */
} DMA_MemType_t;
/* A segment represents a physically and virtually contiguous chunk of memory. */
/* i.e. each segment can be DMA'd */
/* A user of the DMA code will add memory regions. Each region may need to be */
/* represented by one or more segments. */
typedef struct {
void *virtAddr; /* Virtual address used for this segment */
dma_addr_t physAddr; /* Physical address this segment maps to */
size_t numBytes; /* Size of the segment, in bytes */
} DMA_Segment_t;
/* A region represents a virtually contiguous chunk of memory, which may be */
/* made up of multiple segments. */
typedef struct {
DMA_MemType_t memType;
void *virtAddr;
size_t numBytes;
/* Each region (virtually contiguous) consists of one or more segments. Each */
/* segment is virtually and physically contiguous. */
int numSegmentsUsed;
int numSegmentsAllocated;
DMA_Segment_t *segment;
/* When a region corresponds to user memory, we need to lock all of the pages */
/* down before we can figure out the physical addresses. The lockedPage array contains */
/* the pages that were locked, and which subsequently need to be unlocked once the */
/* memory is unmapped. */
unsigned numLockedPages;
struct page **lockedPages;
} DMA_Region_t;
typedef struct {
int inUse; /* Is this mapping currently being used? */
struct semaphore lock; /* Acquired when using this structure */
enum dma_data_direction dir; /* Direction this transfer is intended for */
/* In the event that we're mapping user memory, we need to know which task */
/* the memory is for, so that we can obtain the correct mm locks. */
struct task_struct *userTask;
int numRegionsUsed;
int numRegionsAllocated;
DMA_Region_t *region;
} DMA_MemMap_t;
/**************************************************************************** /****************************************************************************
* *
* The DMA_DeviceAttribute_t contains information which describes a * The DMA_DeviceAttribute_t contains information which describes a
@ -568,124 +490,6 @@ int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */
size_t numBytes /* Number of bytes in each destination buffer */ size_t numBytes /* Number of bytes in each destination buffer */
); );
/****************************************************************************/
/**
* Initializes a DMA_MemMap_t data structure
*/
/****************************************************************************/
int dma_init_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */
);
/****************************************************************************/
/**
* Releases any memory currently being held by a memory mapping structure.
*/
/****************************************************************************/
int dma_term_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */
);
/****************************************************************************/
/**
* Looks at a memory address and categorizes it.
*
* @return One of the values from the DMA_MemType_t enumeration.
*/
/****************************************************************************/
DMA_MemType_t dma_mem_type(void *addr);
/****************************************************************************/
/**
* Sets the process (aka userTask) associated with a mem map. This is
* required if user-mode segments will be added to the mapping.
*/
/****************************************************************************/
static inline void dma_mem_map_set_user_task(DMA_MemMap_t *memMap,
struct task_struct *task)
{
memMap->userTask = task;
}
/****************************************************************************/
/**
* Looks at a memory address and determines if we support DMA'ing to/from
* that type of memory.
*
* @return boolean -
* return value != 0 means dma supported
* return value == 0 means dma not supported
*/
/****************************************************************************/
int dma_mem_supports_dma(void *addr);
/****************************************************************************/
/**
* Initializes a memory map for use. Since this function acquires a
* sempaphore within the memory map, it is VERY important that dma_unmap
* be called when you're finished using the map.
*/
/****************************************************************************/
int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
enum dma_data_direction dir /* Direction that the mapping will be going */
);
/****************************************************************************/
/**
* Adds a segment of memory to a memory map.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
void *mem, /* Virtual address that we want to get a map of */
size_t numBytes /* Number of bytes being mapped */
);
/****************************************************************************/
/**
* Creates a descriptor ring from a memory mapping.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
DMA_MemMap_t *memMap, /* Memory map that will be used */
dma_addr_t devPhysAddr /* Physical address of device */
);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return
*/
/****************************************************************************/
int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
void *addr, /* Virtual address that we want to get a map of */
size_t count, /* Number of bytes being mapped */
enum dma_data_direction dir /* Direction that the mapping will be going */
);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return
*/
/****************************************************************************/
int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
int dirtied /* non-zero if any of the pages were modified */
);
/****************************************************************************/ /****************************************************************************/
/** /**
* Initiates a transfer when the descriptors have already been setup. * Initiates a transfer when the descriptors have already been setup.

View File

@ -44,7 +44,7 @@
#include <mach/aemif.h> #include <mach/aemif.h>
#include <mach/spi.h> #include <mach/spi.h>
#define DA850_EVM_PHY_ID "0:00" #define DA850_EVM_PHY_ID "davinci_mdio-0:00"
#define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8) #define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8)
#define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15) #define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15)

View File

@ -54,7 +54,7 @@ static inline int have_tvp7002(void)
return 0; return 0;
} }
#define DM365_EVM_PHY_ID "0:01" #define DM365_EVM_PHY_ID "davinci_mdio-0:01"
/* /*
* A MAX-II CPLD is used for various board control functions. * A MAX-II CPLD is used for various board control functions.
*/ */

View File

@ -40,7 +40,7 @@
#include <mach/usb.h> #include <mach/usb.h>
#include <mach/aemif.h> #include <mach/aemif.h>
#define DM644X_EVM_PHY_ID "0:01" #define DM644X_EVM_PHY_ID "davinci_mdio-0:01"
#define LXT971_PHY_ID (0x001378e2) #define LXT971_PHY_ID (0x001378e2)
#define LXT971_PHY_MASK (0xfffffff0) #define LXT971_PHY_MASK (0xfffffff0)

View File

@ -736,7 +736,7 @@ static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0), .enabled_uarts = (1 << 0),
}; };
#define DM646X_EVM_PHY_ID "0:01" #define DM646X_EVM_PHY_ID "davinci_mdio-0:01"
/* /*
* The following EDMA channels/slots are not being used by drivers (for * The following EDMA channels/slots are not being used by drivers (for
* example: Timer, GPIO, UART events etc) on dm646x, hence they are being * example: Timer, GPIO, UART events etc) on dm646x, hence they are being

View File

@ -39,7 +39,7 @@
#include <mach/mmc.h> #include <mach/mmc.h>
#include <mach/usb.h> #include <mach/usb.h>
#define NEUROS_OSD2_PHY_ID "0:01" #define NEUROS_OSD2_PHY_ID "davinci_mdio-0:01"
#define LXT971_PHY_ID 0x001378e2 #define LXT971_PHY_ID 0x001378e2
#define LXT971_PHY_MASK 0xfffffff0 #define LXT971_PHY_MASK 0xfffffff0

View File

@ -21,7 +21,7 @@
#include <mach/da8xx.h> #include <mach/da8xx.h>
#include <mach/mux.h> #include <mach/mux.h>
#define HAWKBOARD_PHY_ID "0:07" #define HAWKBOARD_PHY_ID "davinci_mdio-0:07"
#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12) #define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12)
#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13) #define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13)

View File

@ -42,7 +42,7 @@
#include <mach/mux.h> #include <mach/mux.h>
#include <mach/usb.h> #include <mach/usb.h>
#define SFFSDR_PHY_ID "0:01" #define SFFSDR_PHY_ID "davinci_mdio-0:01"
static struct mtd_partition davinci_sffsdr_nandflash_partition[] = { static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
/* U-Boot Environment: Block 0 /* U-Boot Environment: Block 0
* UBL: Block 1 * UBL: Block 1

View File

@ -153,34 +153,6 @@ static struct clk pll1_sysclk3 = {
.div_reg = PLLDIV3, .div_reg = PLLDIV3,
}; };
static struct clk pll1_sysclk4 = {
.name = "pll1_sysclk4",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV4,
};
static struct clk pll1_sysclk5 = {
.name = "pll1_sysclk5",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV5,
};
static struct clk pll1_sysclk6 = {
.name = "pll0_sysclk6",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV6,
};
static struct clk pll1_sysclk7 = {
.name = "pll1_sysclk7",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV7,
};
static struct clk i2c0_clk = { static struct clk i2c0_clk = {
.name = "i2c0", .name = "i2c0",
.parent = &pll0_aux_clk, .parent = &pll0_aux_clk,
@ -397,10 +369,6 @@ static struct clk_lookup da850_clks[] = {
CLK(NULL, "pll1_aux", &pll1_aux_clk), CLK(NULL, "pll1_aux", &pll1_aux_clk),
CLK(NULL, "pll1_sysclk2", &pll1_sysclk2), CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
CLK(NULL, "pll1_sysclk3", &pll1_sysclk3), CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
CLK(NULL, "pll1_sysclk4", &pll1_sysclk4),
CLK(NULL, "pll1_sysclk5", &pll1_sysclk5),
CLK(NULL, "pll1_sysclk6", &pll1_sysclk6),
CLK(NULL, "pll1_sysclk7", &pll1_sysclk7),
CLK("i2c_davinci.1", NULL, &i2c0_clk), CLK("i2c_davinci.1", NULL, &i2c0_clk),
CLK(NULL, "timer0", &timerp64_0_clk), CLK(NULL, "timer0", &timerp64_0_clk),
CLK("watchdog", NULL, &timerp64_1_clk), CLK("watchdog", NULL, &timerp64_1_clk),

View File

@ -402,7 +402,7 @@ void __init exynos4_init_irq(void)
gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000; gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
if (!of_have_populated_dt()) if (!of_have_populated_dt())
gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset); gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
#ifdef CONFIG_OF #ifdef CONFIG_OF
else else
of_irq_init(exynos4_dt_irq_match); of_irq_init(exynos4_dt_irq_match);

View File

@ -47,7 +47,7 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
static int __init imx51_tzic_add_irq_domain(struct device_node *np, static int __init imx51_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent) struct device_node *interrupt_parent)
{ {
irq_domain_add_simple(np, 0); irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
return 0; return 0;
} }
@ -57,7 +57,7 @@ static int __init imx51_gpio_add_irq_domain(struct device_node *np,
static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
gpio_irq_base -= 32; gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base); irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
return 0; return 0;
} }

View File

@ -51,7 +51,7 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
static int __init imx53_tzic_add_irq_domain(struct device_node *np, static int __init imx53_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent) struct device_node *interrupt_parent)
{ {
irq_domain_add_simple(np, 0); irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
return 0; return 0;
} }
@ -61,7 +61,7 @@ static int __init imx53_gpio_add_irq_domain(struct device_node *np,
static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
gpio_irq_base -= 32; gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base); irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
return 0; return 0;
} }

View File

@ -97,7 +97,8 @@ static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
gpio_irq_base -= 32; gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base); irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops,
NULL);
return 0; return 0;
} }

View File

@ -80,12 +80,8 @@ static struct of_device_id msm_dt_gic_match[] __initdata = {
static void __init msm8x60_dt_init(void) static void __init msm8x60_dt_init(void)
{ {
struct device_node *node; irq_domain_generate_simple(msm_dt_gic_match, MSM8X60_QGIC_DIST_PHYS,
GIC_SPI_START);
node = of_find_matching_node_by_address(NULL, msm_dt_gic_match,
MSM8X60_QGIC_DIST_PHYS);
if (node)
irq_domain_add_simple(node, GIC_SPI_START);
if (of_machine_is_compatible("qcom,msm8660-surf")) { if (of_machine_is_compatible("qcom,msm8660-surf")) {
printk(KERN_INFO "Init surf UART registers\n"); printk(KERN_INFO "Init surf UART registers\n");

View File

@ -213,13 +213,12 @@ config MACH_OMAP3_PANDORA
depends on ARCH_OMAP3 depends on ARCH_OMAP3
default y default y
select OMAP_PACKAGE_CBB select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_OMAP3_TOUCHBOOK config MACH_OMAP3_TOUCHBOOK
bool "OMAP3 Touch Book" bool "OMAP3 Touch Book"
depends on ARCH_OMAP3 depends on ARCH_OMAP3
default y default y
select BACKLIGHT_CLASS_DEVICE
config MACH_OMAP_3430SDP config MACH_OMAP_3430SDP
bool "OMAP 3430 SDP board" bool "OMAP 3430 SDP board"
@ -265,7 +264,7 @@ config MACH_OMAP_ZOOM2
select SERIAL_8250 select SERIAL_8250
select SERIAL_CORE_CONSOLE select SERIAL_CORE_CONSOLE
select SERIAL_8250_CONSOLE select SERIAL_8250_CONSOLE
select REGULATOR_FIXED_VOLTAGE select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_OMAP_ZOOM3 config MACH_OMAP_ZOOM3
bool "OMAP3630 Zoom3 board" bool "OMAP3630 Zoom3 board"
@ -275,7 +274,7 @@ config MACH_OMAP_ZOOM3
select SERIAL_8250 select SERIAL_8250
select SERIAL_CORE_CONSOLE select SERIAL_CORE_CONSOLE
select SERIAL_8250_CONSOLE select SERIAL_8250_CONSOLE
select REGULATOR_FIXED_VOLTAGE select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_CM_T35 config MACH_CM_T35
bool "CompuLab CM-T35/CM-T3730 modules" bool "CompuLab CM-T35/CM-T3730 modules"
@ -334,7 +333,7 @@ config MACH_OMAP_4430SDP
depends on ARCH_OMAP4 depends on ARCH_OMAP4
select OMAP_PACKAGE_CBL select OMAP_PACKAGE_CBL
select OMAP_PACKAGE_CBS select OMAP_PACKAGE_CBS
select REGULATOR_FIXED_VOLTAGE select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_OMAP4_PANDA config MACH_OMAP4_PANDA
bool "OMAP4 Panda Board" bool "OMAP4 Panda Board"
@ -342,7 +341,7 @@ config MACH_OMAP4_PANDA
depends on ARCH_OMAP4 depends on ARCH_OMAP4
select OMAP_PACKAGE_CBL select OMAP_PACKAGE_CBL
select OMAP_PACKAGE_CBS select OMAP_PACKAGE_CBS
select REGULATOR_FIXED_VOLTAGE select REGULATOR_FIXED_VOLTAGE if REGULATOR
config OMAP3_EMU config OMAP3_EMU
bool "OMAP3 debugging peripherals" bool "OMAP3 debugging peripherals"

View File

@ -52,8 +52,9 @@
#define ETH_KS8851_QUART 138 #define ETH_KS8851_QUART 138
#define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184
#define OMAP4_SFH7741_ENABLE_GPIO 188 #define OMAP4_SFH7741_ENABLE_GPIO 188
#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */ #define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
#define HDMI_GPIO_HPD 63 /* Hotplug detect */
#define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */ #define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */
#define DLP_POWER_ON_GPIO 40 #define DLP_POWER_ON_GPIO 40
@ -603,8 +604,9 @@ static void __init omap_sfh7741prox_init(void)
} }
static struct gpio sdp4430_hdmi_gpios[] = { static struct gpio sdp4430_hdmi_gpios[] = {
{ HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
{ HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
{ HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
}; };
static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
@ -621,8 +623,7 @@ static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev) static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev)
{ {
gpio_free(HDMI_GPIO_LS_OE); gpio_free_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios));
gpio_free(HDMI_GPIO_HPD);
} }
static struct nokia_dsi_panel_data dsi1_panel = { static struct nokia_dsi_panel_data dsi1_panel = {
@ -738,6 +739,10 @@ static void sdp4430_lcd_init(void)
pr_err("%s: Could not get lcd2_reset_gpio\n", __func__); pr_err("%s: Could not get lcd2_reset_gpio\n", __func__);
} }
static struct omap_dss_hdmi_data sdp4430_hdmi_data = {
.hpd_gpio = HDMI_GPIO_HPD,
};
static struct omap_dss_device sdp4430_hdmi_device = { static struct omap_dss_device sdp4430_hdmi_device = {
.name = "hdmi", .name = "hdmi",
.driver_name = "hdmi_panel", .driver_name = "hdmi_panel",
@ -745,6 +750,7 @@ static struct omap_dss_device sdp4430_hdmi_device = {
.platform_enable = sdp4430_panel_enable_hdmi, .platform_enable = sdp4430_panel_enable_hdmi,
.platform_disable = sdp4430_panel_disable_hdmi, .platform_disable = sdp4430_panel_disable_hdmi,
.channel = OMAP_DSS_CHANNEL_DIGIT, .channel = OMAP_DSS_CHANNEL_DIGIT,
.data = &sdp4430_hdmi_data,
}; };
static struct picodlp_panel_data sdp4430_picodlp_pdata = { static struct picodlp_panel_data sdp4430_picodlp_pdata = {
@ -808,7 +814,7 @@ static struct omap_dss_board_info sdp4430_dss_data = {
.default_device = &sdp4430_lcd_device, .default_device = &sdp4430_lcd_device,
}; };
static void omap_4430sdp_display_init(void) static void __init omap_4430sdp_display_init(void)
{ {
int r; int r;
@ -829,6 +835,10 @@ static void omap_4430sdp_display_init(void)
omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP); omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
else else
omap_hdmi_init(0); omap_hdmi_init(0);
omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
} }
#ifdef CONFIG_OMAP_MUX #ifdef CONFIG_OMAP_MUX
@ -841,7 +851,7 @@ static struct omap_board_mux board_mux[] __initdata = {
#define board_mux NULL #define board_mux NULL
#endif #endif
static void omap4_sdp4430_wifi_mux_init(void) static void __init omap4_sdp4430_wifi_mux_init(void)
{ {
omap_mux_init_gpio(GPIO_WIFI_IRQ, OMAP_PIN_INPUT | omap_mux_init_gpio(GPIO_WIFI_IRQ, OMAP_PIN_INPUT |
OMAP_PIN_OFF_WAKEUPENABLE); OMAP_PIN_OFF_WAKEUPENABLE);
@ -868,12 +878,17 @@ static struct wl12xx_platform_data omap4_sdp4430_wlan_data __initdata = {
.board_tcxo_clock = WL12XX_TCXOCLOCK_26, .board_tcxo_clock = WL12XX_TCXOCLOCK_26,
}; };
static void omap4_sdp4430_wifi_init(void) static void __init omap4_sdp4430_wifi_init(void)
{ {
int ret;
omap4_sdp4430_wifi_mux_init(); omap4_sdp4430_wifi_mux_init();
if (wl12xx_set_platform_data(&omap4_sdp4430_wlan_data)) ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data);
pr_err("Error setting wl12xx data\n"); if (ret)
platform_device_register(&omap_vwlan_device); pr_err("Error setting wl12xx data: %d\n", ret);
ret = platform_device_register(&omap_vwlan_device);
if (ret)
pr_err("Error registering wl12xx device: %d\n", ret);
} }
static void __init omap_4430sdp_init(void) static void __init omap_4430sdp_init(void)

View File

@ -67,7 +67,7 @@ static void __init omap_generic_init(void)
{ {
struct device_node *node = of_find_matching_node(NULL, intc_match); struct device_node *node = of_find_matching_node(NULL, intc_match);
if (node) if (node)
irq_domain_add_simple(node, 0); irq_domain_add_legacy(node, 32, 0, 0, &irq_domain_simple_ops, NULL);
omap_sdrc_init(NULL, NULL); omap_sdrc_init(NULL, NULL);

View File

@ -617,6 +617,21 @@ static struct gpio omap3_evm_ehci_gpios[] __initdata = {
{ OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" }, { OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" },
}; };
static void __init omap3_evm_wl12xx_init(void)
{
#ifdef CONFIG_WL12XX_PLATFORM_DATA
int ret;
/* WL12xx WLAN Init */
ret = wl12xx_set_platform_data(&omap3evm_wlan_data);
if (ret)
pr_err("error setting wl12xx data: %d\n", ret);
ret = platform_device_register(&omap3evm_wlan_regulator);
if (ret)
pr_err("error registering wl12xx device: %d\n", ret);
#endif
}
static void __init omap3_evm_init(void) static void __init omap3_evm_init(void)
{ {
omap3_evm_get_revision(); omap3_evm_get_revision();
@ -665,13 +680,7 @@ static void __init omap3_evm_init(void)
omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL); omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
omap3evm_init_smsc911x(); omap3evm_init_smsc911x();
omap3_evm_display_init(); omap3_evm_display_init();
omap3_evm_wl12xx_init();
#ifdef CONFIG_WL12XX_PLATFORM_DATA
/* WL12xx WLAN Init */
if (wl12xx_set_platform_data(&omap3evm_wlan_data))
pr_err("error setting wl12xx data\n");
platform_device_register(&omap3evm_wlan_regulator);
#endif
} }
MACHINE_START(OMAP3EVM, "OMAP3 EVM") MACHINE_START(OMAP3EVM, "OMAP3 EVM")

View File

@ -51,8 +51,9 @@
#define GPIO_HUB_NRESET 62 #define GPIO_HUB_NRESET 62
#define GPIO_WIFI_PMENA 43 #define GPIO_WIFI_PMENA 43
#define GPIO_WIFI_IRQ 53 #define GPIO_WIFI_IRQ 53
#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */ #define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
#define HDMI_GPIO_HPD 63 /* Hotplug detect */
/* wl127x BT, FM, GPS connectivity chip */ /* wl127x BT, FM, GPS connectivity chip */
static int wl1271_gpios[] = {46, -1, -1}; static int wl1271_gpios[] = {46, -1, -1};
@ -413,8 +414,9 @@ int __init omap4_panda_dvi_init(void)
} }
static struct gpio panda_hdmi_gpios[] = { static struct gpio panda_hdmi_gpios[] = {
{ HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
{ HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
{ HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
}; };
static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev) static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
@ -431,10 +433,13 @@ static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev) static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev)
{ {
gpio_free(HDMI_GPIO_LS_OE); gpio_free_array(panda_hdmi_gpios, ARRAY_SIZE(panda_hdmi_gpios));
gpio_free(HDMI_GPIO_HPD);
} }
static struct omap_dss_hdmi_data omap4_panda_hdmi_data = {
.hpd_gpio = HDMI_GPIO_HPD,
};
static struct omap_dss_device omap4_panda_hdmi_device = { static struct omap_dss_device omap4_panda_hdmi_device = {
.name = "hdmi", .name = "hdmi",
.driver_name = "hdmi_panel", .driver_name = "hdmi_panel",
@ -442,6 +447,7 @@ static struct omap_dss_device omap4_panda_hdmi_device = {
.platform_enable = omap4_panda_panel_enable_hdmi, .platform_enable = omap4_panda_panel_enable_hdmi,
.platform_disable = omap4_panda_panel_disable_hdmi, .platform_disable = omap4_panda_panel_disable_hdmi,
.channel = OMAP_DSS_CHANNEL_DIGIT, .channel = OMAP_DSS_CHANNEL_DIGIT,
.data = &omap4_panda_hdmi_data,
}; };
static struct omap_dss_device *omap4_panda_dss_devices[] = { static struct omap_dss_device *omap4_panda_dss_devices[] = {
@ -473,18 +479,24 @@ void omap4_panda_display_init(void)
omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP); omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
else else
omap_hdmi_init(0); omap_hdmi_init(0);
omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
} }
static void __init omap4_panda_init(void) static void __init omap4_panda_init(void)
{ {
int package = OMAP_PACKAGE_CBS; int package = OMAP_PACKAGE_CBS;
int ret;
if (omap_rev() == OMAP4430_REV_ES1_0) if (omap_rev() == OMAP4430_REV_ES1_0)
package = OMAP_PACKAGE_CBL; package = OMAP_PACKAGE_CBL;
omap4_mux_init(board_mux, NULL, package); omap4_mux_init(board_mux, NULL, package);
if (wl12xx_set_platform_data(&omap_panda_wlan_data)) ret = wl12xx_set_platform_data(&omap_panda_wlan_data);
pr_err("error setting wl12xx data\n"); if (ret)
pr_err("error setting wl12xx data: %d\n", ret);
omap4_panda_i2c_init(); omap4_panda_i2c_init();
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));

View File

@ -296,8 +296,10 @@ static void enable_board_wakeup_source(void)
void __init zoom_peripherals_init(void) void __init zoom_peripherals_init(void)
{ {
if (wl12xx_set_platform_data(&omap_zoom_wlan_data)) int ret = wl12xx_set_platform_data(&omap_zoom_wlan_data);
pr_err("error setting wl12xx data\n");
if (ret)
pr_err("error setting wl12xx data: %d\n", ret);
omap_i2c_init(); omap_i2c_init();
platform_device_register(&omap_vwlan_device); platform_device_register(&omap_vwlan_device);

View File

@ -405,6 +405,7 @@ static int omap_mcspi_init(struct omap_hwmod *oh, void *unused)
break; break;
default: default:
pr_err("Invalid McSPI Revision value\n"); pr_err("Invalid McSPI Revision value\n");
kfree(pdata);
return -EINVAL; return -EINVAL;
} }

View File

@ -103,12 +103,8 @@ static void omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
u32 reg; u32 reg;
u16 control_i2c_1; u16 control_i2c_1;
/* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
omap_mux_init_signal("hdmi_hpd",
OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("hdmi_cec", omap_mux_init_signal("hdmi_cec",
OMAP_PIN_INPUT_PULLUP); OMAP_PIN_INPUT_PULLUP);
/* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
omap_mux_init_signal("hdmi_ddc_scl", omap_mux_init_signal("hdmi_ddc_scl",
OMAP_PIN_INPUT_PULLUP); OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("hdmi_ddc_sda", omap_mux_init_signal("hdmi_ddc_sda",

View File

@ -528,7 +528,13 @@ int gpmc_cs_configure(int cs, int cmd, int wval)
case GPMC_CONFIG_DEV_SIZE: case GPMC_CONFIG_DEV_SIZE:
regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
/* clear 2 target bits */
regval &= ~GPMC_CONFIG1_DEVICESIZE(3);
/* set the proper value */
regval |= GPMC_CONFIG1_DEVICESIZE(wval); regval |= GPMC_CONFIG1_DEVICESIZE(wval);
gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval); gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
break; break;

View File

@ -175,14 +175,15 @@ static void hsmmc2_select_input_clk_src(struct omap_mmc_platform_data *mmc)
{ {
u32 reg; u32 reg;
if (mmc->slots[0].internal_clock) { reg = omap_ctrl_readl(control_devconf1_offset);
reg = omap_ctrl_readl(control_devconf1_offset); if (mmc->slots[0].internal_clock)
reg |= OMAP2_MMCSDIO2ADPCLKISEL; reg |= OMAP2_MMCSDIO2ADPCLKISEL;
omap_ctrl_writel(reg, control_devconf1_offset); else
} reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
omap_ctrl_writel(reg, control_devconf1_offset);
} }
static void hsmmc23_before_set_reg(struct device *dev, int slot, static void hsmmc2_before_set_reg(struct device *dev, int slot,
int power_on, int vdd) int power_on, int vdd)
{ {
struct omap_mmc_platform_data *mmc = dev->platform_data; struct omap_mmc_platform_data *mmc = dev->platform_data;
@ -292,8 +293,8 @@ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
} }
} }
static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, static int omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
struct omap_mmc_platform_data *mmc) struct omap_mmc_platform_data *mmc)
{ {
char *hc_name; char *hc_name;
@ -407,14 +408,13 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
c->caps &= ~MMC_CAP_8_BIT_DATA; c->caps &= ~MMC_CAP_8_BIT_DATA;
c->caps |= MMC_CAP_4_BIT_DATA; c->caps |= MMC_CAP_4_BIT_DATA;
} }
/* FALLTHROUGH */
case 3:
if (mmc->slots[0].features & HSMMC_HAS_PBIAS) { if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
/* off-chip level shifting, or none */ /* off-chip level shifting, or none */
mmc->slots[0].before_set_reg = hsmmc23_before_set_reg; mmc->slots[0].before_set_reg = hsmmc2_before_set_reg;
mmc->slots[0].after_set_reg = NULL; mmc->slots[0].after_set_reg = NULL;
} }
break; break;
case 3:
case 4: case 4:
case 5: case 5:
mmc->slots[0].before_set_reg = NULL; mmc->slots[0].before_set_reg = NULL;
@ -430,7 +430,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
#define MAX_OMAP_MMC_HWMOD_NAME_LEN 16 #define MAX_OMAP_MMC_HWMOD_NAME_LEN 16
void __init omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr) void omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
{ {
struct omap_hwmod *oh; struct omap_hwmod *oh;
struct platform_device *pdev; struct platform_device *pdev;
@ -487,7 +487,7 @@ done:
kfree(mmc_data); kfree(mmc_data);
} }
void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers) void omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{ {
u32 reg; u32 reg;

View File

@ -388,7 +388,7 @@ static void __init omap_hwmod_init_postsetup(void)
omap_pm_if_early_init(); omap_pm_if_early_init();
} }
#ifdef CONFIG_ARCH_OMAP2 #ifdef CONFIG_SOC_OMAP2420
void __init omap2420_init_early(void) void __init omap2420_init_early(void)
{ {
omap2_set_globals_242x(); omap2_set_globals_242x();
@ -400,7 +400,9 @@ void __init omap2420_init_early(void)
omap_hwmod_init_postsetup(); omap_hwmod_init_postsetup();
omap2420_clk_init(); omap2420_clk_init();
} }
#endif
#ifdef CONFIG_SOC_OMAP2430
void __init omap2430_init_early(void) void __init omap2430_init_early(void)
{ {
omap2_set_globals_243x(); omap2_set_globals_243x();

View File

@ -100,8 +100,8 @@ void omap_mux_write_array(struct omap_mux_partition *partition,
static char *omap_mux_options; static char *omap_mux_options;
static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition, static int _omap_mux_init_gpio(struct omap_mux_partition *partition,
int gpio, int val) int gpio, int val)
{ {
struct omap_mux_entry *e; struct omap_mux_entry *e;
struct omap_mux *gpio_mux = NULL; struct omap_mux *gpio_mux = NULL;
@ -145,7 +145,7 @@ static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition,
return 0; return 0;
} }
int __init omap_mux_init_gpio(int gpio, int val) int omap_mux_init_gpio(int gpio, int val)
{ {
struct omap_mux_partition *partition; struct omap_mux_partition *partition;
int ret; int ret;
@ -159,9 +159,9 @@ int __init omap_mux_init_gpio(int gpio, int val)
return -ENODEV; return -ENODEV;
} }
static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition, static int _omap_mux_get_by_name(struct omap_mux_partition *partition,
const char *muxname, const char *muxname,
struct omap_mux **found_mux) struct omap_mux **found_mux)
{ {
struct omap_mux *mux = NULL; struct omap_mux *mux = NULL;
struct omap_mux_entry *e; struct omap_mux_entry *e;
@ -240,7 +240,7 @@ omap_mux_get_by_name(const char *muxname,
return -ENODEV; return -ENODEV;
} }
int __init omap_mux_init_signal(const char *muxname, int val) int omap_mux_init_signal(const char *muxname, int val)
{ {
struct omap_mux_partition *partition = NULL; struct omap_mux_partition *partition = NULL;
struct omap_mux *mux = NULL; struct omap_mux *mux = NULL;
@ -1094,8 +1094,8 @@ static void omap_mux_init_package(struct omap_mux *superset,
omap_mux_package_init_balls(package_balls, superset); omap_mux_package_init_balls(package_balls, superset);
} }
static void omap_mux_init_signals(struct omap_mux_partition *partition, static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
struct omap_board_mux *board_mux) struct omap_board_mux *board_mux)
{ {
omap_mux_set_cmdline_signals(); omap_mux_set_cmdline_signals();
omap_mux_write_array(partition, board_mux); omap_mux_write_array(partition, board_mux);
@ -1109,8 +1109,8 @@ static void omap_mux_init_package(struct omap_mux *superset,
{ {
} }
static void omap_mux_init_signals(struct omap_mux_partition *partition, static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
struct omap_board_mux *board_mux) struct omap_board_mux *board_mux)
{ {
} }

View File

@ -18,6 +18,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
__CPUINIT
/* /*
* OMAP4 specific entry point for secondary CPU to jump from ROM * OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which * code. This routine also provides a holding flag into which

View File

@ -1517,8 +1517,8 @@ static int _enable(struct omap_hwmod *oh)
if (oh->_state != _HWMOD_STATE_INITIALIZED && if (oh->_state != _HWMOD_STATE_INITIALIZED &&
oh->_state != _HWMOD_STATE_IDLE && oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_DISABLED) { oh->_state != _HWMOD_STATE_DISABLED) {
WARN(1, "omap_hwmod: %s: enabled state can only be entered " WARN(1, "omap_hwmod: %s: enabled state can only be entered from initialized, idle, or disabled state\n",
"from initialized, idle, or disabled state\n", oh->name); oh->name);
return -EINVAL; return -EINVAL;
} }
@ -1600,8 +1600,8 @@ static int _idle(struct omap_hwmod *oh)
pr_debug("omap_hwmod: %s: idling\n", oh->name); pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->_state != _HWMOD_STATE_ENABLED) { if (oh->_state != _HWMOD_STATE_ENABLED) {
WARN(1, "omap_hwmod: %s: idle state can only be entered from " WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n",
"enabled state\n", oh->name); oh->name);
return -EINVAL; return -EINVAL;
} }
@ -1682,8 +1682,8 @@ static int _shutdown(struct omap_hwmod *oh)
if (oh->_state != _HWMOD_STATE_IDLE && if (oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_ENABLED) { oh->_state != _HWMOD_STATE_ENABLED) {
WARN(1, "omap_hwmod: %s: disabled state can only be entered " WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n",
"from idle, or enabled state\n", oh->name); oh->name);
return -EINVAL; return -EINVAL;
} }
@ -2240,8 +2240,8 @@ void omap_hwmod_ocp_barrier(struct omap_hwmod *oh)
BUG_ON(!oh); BUG_ON(!oh);
if (!oh->class->sysc || !oh->class->sysc->sysc_flags) { if (!oh->class->sysc || !oh->class->sysc->sysc_flags) {
WARN(1, "omap_device: %s: OCP barrier impossible due to " WARN(1, "omap_device: %s: OCP barrier impossible due to device configuration\n",
"device configuration\n", oh->name); oh->name);
return; return;
} }

View File

@ -55,27 +55,6 @@ struct omap_hwmod_class omap2_dss_hwmod_class = {
.reset = omap_dss_reset, .reset = omap_dss_reset,
}; };
/*
* 'dispc' class
* display controller
*/
static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2_dispc_hwmod_class = {
.name = "dispc",
.sysc = &omap2_dispc_sysc,
};
/* /*
* 'rfbi' class * 'rfbi' class
* remote frame buffer interface * remote frame buffer interface

View File

@ -28,6 +28,28 @@ struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
{ .name = "dispc", .dma_req = 5 }, { .name = "dispc", .dma_req = 5 },
{ .dma_req = -1 } { .dma_req = -1 }
}; };
/*
* 'dispc' class
* display controller
*/
static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2_dispc_hwmod_class = {
.name = "dispc",
.sysc = &omap2_dispc_sysc,
};
/* OMAP2xxx Timer Common */ /* OMAP2xxx Timer Common */
static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = { static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
.rev_offs = 0x0000, .rev_offs = 0x0000,

View File

@ -1480,6 +1480,28 @@ static struct omap_hwmod omap3xxx_dss_core_hwmod = {
.masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
}; };
/*
* 'dispc' class
* display controller
*/
static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
SYSC_HAS_ENAWAKEUP),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3_dispc_hwmod_class = {
.name = "dispc",
.sysc = &omap3_dispc_sysc,
};
/* l4_core -> dss_dispc */ /* l4_core -> dss_dispc */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = { static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
.master = &omap3xxx_l4_core_hwmod, .master = &omap3xxx_l4_core_hwmod,
@ -1503,7 +1525,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
.name = "dss_dispc", .name = "dss_dispc",
.class = &omap2_dispc_hwmod_class, .class = &omap3_dispc_hwmod_class,
.mpu_irqs = omap2_dispc_irqs, .mpu_irqs = omap2_dispc_irqs,
.main_clk = "dss1_alwon_fck", .main_clk = "dss1_alwon_fck",
.prcm = { .prcm = {
@ -3523,12 +3545,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
&omap3xxx_uart2_hwmod, &omap3xxx_uart2_hwmod,
&omap3xxx_uart3_hwmod, &omap3xxx_uart3_hwmod,
/* dss class */
&omap3xxx_dss_dispc_hwmod,
&omap3xxx_dss_dsi1_hwmod,
&omap3xxx_dss_rfbi_hwmod,
&omap3xxx_dss_venc_hwmod,
/* i2c class */ /* i2c class */
&omap3xxx_i2c1_hwmod, &omap3xxx_i2c1_hwmod,
&omap3xxx_i2c2_hwmod, &omap3xxx_i2c2_hwmod,
@ -3635,6 +3651,15 @@ static __initdata struct omap_hwmod *am35xx_hwmods[] = {
NULL NULL
}; };
static __initdata struct omap_hwmod *omap3xxx_dss_hwmods[] = {
/* dss class */
&omap3xxx_dss_dispc_hwmod,
&omap3xxx_dss_dsi1_hwmod,
&omap3xxx_dss_rfbi_hwmod,
&omap3xxx_dss_venc_hwmod,
NULL
};
int __init omap3xxx_hwmod_init(void) int __init omap3xxx_hwmod_init(void)
{ {
int r; int r;
@ -3708,6 +3733,21 @@ int __init omap3xxx_hwmod_init(void)
if (h) if (h)
r = omap_hwmod_register(h); r = omap_hwmod_register(h);
if (r < 0)
return r;
/*
* DSS code presumes that dss_core hwmod is handled first,
* _before_ any other DSS related hwmods so register common
* DSS hwmods last to ensure that dss_core is already registered.
* Otherwise some change things may happen, for ex. if dispc
* is handled before dss_core and DSS is enabled in bootloader
* DIPSC will be reset with outputs enabled which sometimes leads
* to unrecoverable L3 error.
* XXX The long-term fix to this is to ensure modules are set up
* in dependency order in the hwmod core code.
*/
r = omap_hwmod_register(omap3xxx_dss_hwmods);
return r; return r;
} }

View File

@ -1031,6 +1031,7 @@ static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = {
static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = { static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
{ {
.name = "mpu",
.pa_start = 0x4012e000, .pa_start = 0x4012e000,
.pa_end = 0x4012e07f, .pa_end = 0x4012e07f,
.flags = ADDR_TYPE_RT .flags = ADDR_TYPE_RT
@ -1049,6 +1050,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = { static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = {
{ {
.name = "dma",
.pa_start = 0x4902e000, .pa_start = 0x4902e000,
.pa_end = 0x4902e07f, .pa_end = 0x4902e07f,
.flags = ADDR_TYPE_RT .flags = ADDR_TYPE_RT

View File

@ -19,6 +19,7 @@
#include "common.h" #include "common.h"
#include <plat/cpu.h> #include <plat/cpu.h>
#include <plat/prcm.h> #include <plat/prcm.h>
#include <plat/irqs.h>
#include "vp.h" #include "vp.h"

View File

@ -19,6 +19,7 @@
#include "common.h" #include "common.h"
#include <plat/cpu.h> #include <plat/cpu.h>
#include <plat/irqs.h>
#include <plat/prcm.h> #include <plat/prcm.h>
#include "vp.h" #include "vp.h"

View File

@ -107,18 +107,18 @@ static void omap_uart_set_noidle(struct platform_device *pdev)
omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO); omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
} }
static void omap_uart_set_forceidle(struct platform_device *pdev) static void omap_uart_set_smartidle(struct platform_device *pdev)
{ {
struct omap_device *od = to_omap_device(pdev); struct omap_device *od = to_omap_device(pdev);
omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_FORCE); omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART);
} }
#else #else
static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable) static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable)
{} {}
static void omap_uart_set_noidle(struct platform_device *pdev) {} static void omap_uart_set_noidle(struct platform_device *pdev) {}
static void omap_uart_set_forceidle(struct platform_device *pdev) {} static void omap_uart_set_smartidle(struct platform_device *pdev) {}
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
#ifdef CONFIG_OMAP_MUX #ifdef CONFIG_OMAP_MUX
@ -349,7 +349,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata,
omap_up.uartclk = OMAP24XX_BASE_BAUD * 16; omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
omap_up.flags = UPF_BOOT_AUTOCONF; omap_up.flags = UPF_BOOT_AUTOCONF;
omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count; omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
omap_up.set_forceidle = omap_uart_set_forceidle; omap_up.set_forceidle = omap_uart_set_smartidle;
omap_up.set_noidle = omap_uart_set_noidle; omap_up.set_noidle = omap_uart_set_noidle;
omap_up.enable_wakeup = omap_uart_enable_wakeup; omap_up.enable_wakeup = omap_uart_enable_wakeup;
omap_up.dma_rx_buf_size = info->dma_rx_buf_size; omap_up.dma_rx_buf_size = info->dma_rx_buf_size;

View File

@ -897,7 +897,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
ret = sr_late_init(sr_info); ret = sr_late_init(sr_info);
if (ret) { if (ret) {
pr_warning("%s: Error in SR late init\n", __func__); pr_warning("%s: Error in SR late init\n", __func__);
return ret; goto err_iounmap;
} }
} }

View File

@ -270,7 +270,7 @@ static struct clocksource clocksource_gpt = {
static u32 notrace dmtimer_read_sched_clock(void) static u32 notrace dmtimer_read_sched_clock(void)
{ {
if (clksrc.reserved) if (clksrc.reserved)
return __omap_dm_timer_read_counter(clksrc.io_base, 1); return __omap_dm_timer_read_counter(&clksrc, 1);
return 0; return 0;
} }

View File

@ -247,7 +247,7 @@ static void __init omap4_vc_init_channel(struct voltagedomain *voltdm)
* omap_vc_i2c_init - initialize I2C interface to PMIC * omap_vc_i2c_init - initialize I2C interface to PMIC
* @voltdm: voltage domain containing VC data * @voltdm: voltage domain containing VC data
* *
* Use PMIC supplied seetings for I2C high-speed mode and * Use PMIC supplied settings for I2C high-speed mode and
* master code (if set) and program the VC I2C configuration * master code (if set) and program the VC I2C configuration
* register. * register.
* *
@ -265,8 +265,8 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
if (initialized) { if (initialized) {
if (voltdm->pmic->i2c_high_speed != i2c_high_speed) if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
pr_warn("%s: I2C config for all channels must match.", pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
__func__); __func__, voltdm->name, i2c_high_speed);
return; return;
} }
@ -292,9 +292,7 @@ void __init omap_vc_init_channel(struct voltagedomain *voltdm)
u32 val; u32 val;
if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) { if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
pr_err("%s: PMIC info requried to configure vc for" pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
"vdd_%s not populated.Hence cannot initialize vc\n",
__func__, voltdm->name);
return; return;
} }

View File

@ -41,6 +41,11 @@ void __init omap_vp_init(struct voltagedomain *voltdm)
u32 val, sys_clk_rate, timeout, waittime; u32 val, sys_clk_rate, timeout, waittime;
u32 vddmin, vddmax, vstepmin, vstepmax; u32 vddmin, vddmax, vstepmin, vstepmax;
if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
return;
}
if (!voltdm->read || !voltdm->write) { if (!voltdm->read || !voltdm->write) {
pr_err("%s: No read/write API for accessing vdd_%s regs\n", pr_err("%s: No read/write API for accessing vdd_%s regs\n",
__func__, voltdm->name); __func__, voltdm->name);

View File

@ -68,7 +68,7 @@ void __init sirfsoc_of_irq_init(void)
if (!sirfsoc_intc_base) if (!sirfsoc_intc_base)
panic("unable to map intc cpu registers\n"); panic("unable to map intc cpu registers\n");
irq_domain_add_simple(np, 0); irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL);
of_node_put(np); of_node_put(np);

View File

@ -662,6 +662,7 @@ static struct sh_dmae_pdata usb_dma0_platform_data = {
.dmaor_is_32bit = 1, .dmaor_is_32bit = 1,
.needs_tend_set = 1, .needs_tend_set = 1,
.no_dmars = 1, .no_dmars = 1,
.slave_only = 1,
}; };
static struct resource sh7372_usb_dmae0_resources[] = { static struct resource sh7372_usb_dmae0_resources[] = {
@ -723,6 +724,7 @@ static struct sh_dmae_pdata usb_dma1_platform_data = {
.dmaor_is_32bit = 1, .dmaor_is_32bit = 1,
.needs_tend_set = 1, .needs_tend_set = 1,
.no_dmars = 1, .no_dmars = 1,
.slave_only = 1,
}; };
static struct resource sh7372_usb_dmae1_resources[] = { static struct resource sh7372_usb_dmae1_resources[] = {

View File

@ -98,8 +98,11 @@ static const struct of_device_id sic_of_match[] __initconst = {
void __init versatile_init_irq(void) void __init versatile_init_irq(void)
{ {
vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0, 0); struct device_node *np;
irq_domain_generate_simple(vic_of_match, VERSATILE_VIC_BASE, IRQ_VIC_START);
np = of_find_matching_node_by_address(NULL, vic_of_match,
VERSATILE_VIC_BASE);
__vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0, 0, np);
writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR); writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);

View File

@ -54,9 +54,15 @@ loop1:
and r1, r1, #7 @ mask of the bits for current cache only and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache blt skip @ skip if no cache, or just i-cache
#ifdef CONFIG_PREEMPT
save_and_disable_irqs r9 @ make cssr&csidr read atomic
#endif
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
#ifdef CONFIG_PREEMPT
restore_irqs_notrace r9
#endif
and r2, r1, #7 @ extract the length of the cache lines and r2, r1, #7 @ extract the length of the cache lines
add r2, r2, #4 @ add 4 (line length offset) add r2, r2, #4 @ add 4 (line length offset)
ldr r4, =0x3ff ldr r4, =0x3ff

View File

@ -225,8 +225,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
continue; continue;
if (__phys_to_pfn(area->phys_addr) > pfn || if (__phys_to_pfn(area->phys_addr) > pfn ||
__pfn_to_phys(pfn) + offset + size-1 > __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
area->phys_addr + area->size-1)
continue; continue;
/* we can drop the lock here as we know *area is static */ /* we can drop the lock here as we know *area is static */
read_unlock(&vmlist_lock); read_unlock(&vmlist_lock);

View File

@ -8,6 +8,7 @@ config AVR32
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_ATOMIC64
select HARDIRQS_SW_RESEND select HARDIRQS_SW_RESEND
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG

View File

@ -12,6 +12,7 @@ config TMS320C6X
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_SPARSE_IRQ select HAVE_SPARSE_IRQ
select IRQ_DOMAIN
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE

View File

@ -13,6 +13,7 @@
#ifndef _ASM_C6X_IRQ_H #ifndef _ASM_C6X_IRQ_H
#define _ASM_C6X_IRQ_H #define _ASM_C6X_IRQ_H
#include <linux/irqdomain.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
@ -41,253 +42,9 @@
/* This number is used when no interrupt has been assigned */ /* This number is used when no interrupt has been assigned */
#define NO_IRQ 0 #define NO_IRQ 0
/* This type is the placeholder for a hardware interrupt number. It has to
* be big enough to enclose whatever representation is used by a given
* platform.
*/
typedef unsigned long irq_hw_number_t;
/* Interrupt controller "host" data structure. This could be defined as a
* irq domain controller. That is, it handles the mapping between hardware
* and virtual interrupt numbers for a given interrupt domain. The host
* structure is generally created by the PIC code for a given PIC instance
* (though a host can cover more than one PIC if they have a flat number
* model). It's the host callbacks that are responsible for setting the
* irq_chip on a given irq_desc after it's been mapped.
*
* The host code and data structures are fairly agnostic to the fact that
* we use an open firmware device-tree. We do have references to struct
* device_node in two places: in irq_find_host() to find the host matching
* a given interrupt controller node, and of course as an argument to its
* counterpart host->ops->match() callback. However, those are treated as
* generic pointers by the core and the fact that it's actually a device-node
* pointer is purely a convention between callers and implementation. This
* code could thus be used on other architectures by replacing those two
* by some sort of arch-specific void * "token" used to identify interrupt
* controllers.
*/
struct irq_host;
struct radix_tree_root;
struct device_node;
/* Functions below are provided by the host and called whenever a new mapping
* is created or an old mapping is disposed. The host can then proceed to
* whatever internal data structures management is required. It also needs
* to setup the irq_desc when returning from map().
*/
struct irq_host_ops {
/* Match an interrupt controller device node to a host, returns
* 1 on a match
*/
int (*match)(struct irq_host *h, struct device_node *node);
/* Create or update a mapping between a virtual irq number and a hw
* irq number. This is called only once for a given mapping.
*/
int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
/* Dispose of such a mapping */
void (*unmap)(struct irq_host *h, unsigned int virq);
/* Translate device-tree interrupt specifier from raw format coming
* from the firmware to a irq_hw_number_t (interrupt line number) and
* type (sense) that can be passed to set_irq_type(). In the absence
* of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
* will return the hw number in the first cell and IRQ_TYPE_NONE for
* the type (which amount to keeping whatever default value the
* interrupt controller has for that line)
*/
int (*xlate)(struct irq_host *h, struct device_node *ctrler,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
};
struct irq_host {
struct list_head link;
/* type of reverse mapping technique */
unsigned int revmap_type;
#define IRQ_HOST_MAP_PRIORITY 0 /* core priority irqs, get irqs 1..15 */
#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
#define IRQ_HOST_MAP_TREE 3 /* radix tree */
union {
struct {
unsigned int size;
unsigned int *revmap;
} linear;
struct radix_tree_root tree;
} revmap_data;
struct irq_host_ops *ops;
void *host_data;
irq_hw_number_t inval_irq;
/* Optional device node pointer */
struct device_node *of_node;
};
struct irq_data; struct irq_data;
extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
extern irq_hw_number_t virq_to_hw(unsigned int virq); extern irq_hw_number_t virq_to_hw(unsigned int virq);
extern bool virq_is_host(unsigned int virq, struct irq_host *host);
/**
* irq_alloc_host - Allocate a new irq_host data structure
* @of_node: optional device-tree node of the interrupt controller
* @revmap_type: type of reverse mapping to use
* @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
* @ops: map/unmap host callbacks
* @inval_irq: provide a hw number in that host space that is always invalid
*
* Allocates and initialize and irq_host structure. Note that in the case of
* IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
* for all legacy interrupts except 0 (which is always the invalid irq for
* a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
* this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
* later during boot automatically (the reverse mapping will use the slow path
* until that happens).
*/
extern struct irq_host *irq_alloc_host(struct device_node *of_node,
unsigned int revmap_type,
unsigned int revmap_arg,
struct irq_host_ops *ops,
irq_hw_number_t inval_irq);
/**
* irq_find_host - Locates a host for a given device node
* @node: device-tree node of the interrupt controller
*/
extern struct irq_host *irq_find_host(struct device_node *node);
/**
* irq_set_default_host - Set a "default" host
* @host: default host pointer
*
* For convenience, it's possible to set a "default" host that will be used
* whenever NULL is passed to irq_create_mapping(). It makes life easier for
* platforms that want to manipulate a few hard coded interrupt numbers that
* aren't properly represented in the device-tree.
*/
extern void irq_set_default_host(struct irq_host *host);
/**
* irq_set_virq_count - Set the maximum number of virt irqs
* @count: number of linux virtual irqs, capped with NR_IRQS
*
* This is mainly for use by platforms like iSeries who want to program
* the virtual irq number in the controller to avoid the reverse mapping
*/
extern void irq_set_virq_count(unsigned int count);
/**
* irq_create_mapping - Map a hardware interrupt into linux virq space
* @host: host owning this hardware interrupt or NULL for default host
* @hwirq: hardware irq number in that host space
*
* Only one mapping per hardware interrupt is permitted. Returns a linux
* virq number.
* If the sense/trigger is to be specified, set_irq_type() should be called
* on the number returned from that call.
*/
extern unsigned int irq_create_mapping(struct irq_host *host,
irq_hw_number_t hwirq);
/**
* irq_dispose_mapping - Unmap an interrupt
* @virq: linux virq number of the interrupt to unmap
*/
extern void irq_dispose_mapping(unsigned int virq);
/**
* irq_find_mapping - Find a linux virq from an hw irq number.
* @host: host owning this hardware interrupt
* @hwirq: hardware irq number in that host space
*
* This is a slow path, for use by generic code. It's expected that an
* irq controller implementation directly calls the appropriate low level
* mapping function.
*/
extern unsigned int irq_find_mapping(struct irq_host *host,
irq_hw_number_t hwirq);
/**
* irq_create_direct_mapping - Allocate a virq for direct mapping
* @host: host to allocate the virq for or NULL for default host
*
* This routine is used for irq controllers which can choose the hardware
* interrupt numbers they generate. In such a case it's simplest to use
* the linux virq as the hardware interrupt number.
*/
extern unsigned int irq_create_direct_mapping(struct irq_host *host);
/**
* irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping.
* @host: host owning this hardware interrupt
* @virq: linux irq number
* @hwirq: hardware irq number in that host space
*
* This is for use by irq controllers that use a radix tree reverse
* mapping for fast lookup.
*/
extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq);
/**
* irq_radix_revmap_lookup - Find a linux virq from a hw irq number.
* @host: host owning this hardware interrupt
* @hwirq: hardware irq number in that host space
*
* This is a fast path, for use by irq controller code that uses radix tree
* revmaps
*/
extern unsigned int irq_radix_revmap_lookup(struct irq_host *host,
irq_hw_number_t hwirq);
/**
* irq_linear_revmap - Find a linux virq from a hw irq number.
* @host: host owning this hardware interrupt
* @hwirq: hardware irq number in that host space
*
* This is a fast path, for use by irq controller code that uses linear
* revmaps. It does fallback to the slow path if the revmap doesn't exist
* yet and will create the revmap entry with appropriate locking
*/
extern unsigned int irq_linear_revmap(struct irq_host *host,
irq_hw_number_t hwirq);
/**
* irq_alloc_virt - Allocate virtual irq numbers
* @host: host owning these new virtual irqs
* @count: number of consecutive numbers to allocate
* @hint: pass a hint number, the allocator will try to use a 1:1 mapping
*
* This is a low level function that is used internally by irq_create_mapping()
* and that can be used by some irq controllers implementations for things
* like allocating ranges of numbers for MSIs. The revmaps are left untouched.
*/
extern unsigned int irq_alloc_virt(struct irq_host *host,
unsigned int count,
unsigned int hint);
/**
* irq_free_virt - Free virtual irq numbers
* @virq: virtual irq number of the first interrupt to free
* @count: number of interrupts to free
*
* This function is the opposite of irq_alloc_virt. It will not clear reverse
* maps, this should be done previously by unmap'ing the interrupt. In fact,
* all interrupts covered by the range being freed should have been unmapped
* prior to calling this.
*/
extern void irq_free_virt(unsigned int virq, unsigned int count);
extern void __init init_pic_c64xplus(void); extern void __init init_pic_c64xplus(void);

View File

@ -73,10 +73,10 @@ asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
static struct irq_host *core_host; static struct irq_domain *core_domain;
static int core_host_map(struct irq_host *h, unsigned int virq, static int core_domain_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
if (hw < 4 || hw >= NR_PRIORITY_IRQS) if (hw < 4 || hw >= NR_PRIORITY_IRQS)
return -EINVAL; return -EINVAL;
@ -86,8 +86,9 @@ static int core_host_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static struct irq_host_ops core_host_ops = { static const struct irq_domain_ops core_domain_ops = {
.map = core_host_map, .map = core_domain_map,
.xlate = irq_domain_xlate_onecell,
}; };
void __init init_IRQ(void) void __init init_IRQ(void)
@ -100,10 +101,11 @@ void __init init_IRQ(void)
np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic"); np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
if (np != NULL) { if (np != NULL) {
/* create the core host */ /* create the core host */
core_host = irq_alloc_host(np, IRQ_HOST_MAP_PRIORITY, 0, core_domain = irq_domain_add_legacy(np, NR_PRIORITY_IRQS,
&core_host_ops, 0); 0, 0, &core_domain_ops,
if (core_host) NULL);
irq_set_default_host(core_host); if (core_domain)
irq_set_default_host(core_domain);
of_node_put(np); of_node_put(np);
} }
@ -128,601 +130,15 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0; return 0;
} }
/*
* IRQ controller and virtual interrupts
*/
/* The main irq map itself is an array of NR_IRQ entries containing the
* associate host and irq number. An entry with a host of NULL is free.
* An entry can be allocated if it's free, the allocator always then sets
* hwirq first to the host's invalid irq number and then fills ops.
*/
struct irq_map_entry {
irq_hw_number_t hwirq;
struct irq_host *host;
};
static LIST_HEAD(irq_hosts);
static DEFINE_RAW_SPINLOCK(irq_big_lock);
static DEFINE_MUTEX(revmap_trees_mutex);
static struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;
irq_hw_number_t irqd_to_hwirq(struct irq_data *d) irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{ {
return irq_map[d->irq].hwirq; return d->hwirq;
} }
EXPORT_SYMBOL_GPL(irqd_to_hwirq); EXPORT_SYMBOL_GPL(irqd_to_hwirq);
irq_hw_number_t virq_to_hw(unsigned int virq) irq_hw_number_t virq_to_hw(unsigned int virq)
{ {
return irq_map[virq].hwirq; struct irq_data *irq_data = irq_get_irq_data(virq);
return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
} }
EXPORT_SYMBOL_GPL(virq_to_hw); EXPORT_SYMBOL_GPL(virq_to_hw);
bool virq_is_host(unsigned int virq, struct irq_host *host)
{
return irq_map[virq].host == host;
}
EXPORT_SYMBOL_GPL(virq_is_host);
static int default_irq_host_match(struct irq_host *h, struct device_node *np)
{
return h->of_node != NULL && h->of_node == np;
}
struct irq_host *irq_alloc_host(struct device_node *of_node,
unsigned int revmap_type,
unsigned int revmap_arg,
struct irq_host_ops *ops,
irq_hw_number_t inval_irq)
{
struct irq_host *host;
unsigned int size = sizeof(struct irq_host);
unsigned int i;
unsigned int *rmap;
unsigned long flags;
/* Allocate structure and revmap table if using linear mapping */
if (revmap_type == IRQ_HOST_MAP_LINEAR)
size += revmap_arg * sizeof(unsigned int);
host = kzalloc(size, GFP_KERNEL);
if (host == NULL)
return NULL;
/* Fill structure */
host->revmap_type = revmap_type;
host->inval_irq = inval_irq;
host->ops = ops;
host->of_node = of_node_get(of_node);
if (host->ops->match == NULL)
host->ops->match = default_irq_host_match;
raw_spin_lock_irqsave(&irq_big_lock, flags);
/* Check for the priority controller. */
if (revmap_type == IRQ_HOST_MAP_PRIORITY) {
if (irq_map[0].host != NULL) {
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
of_node_put(host->of_node);
kfree(host);
return NULL;
}
irq_map[0].host = host;
}
list_add(&host->link, &irq_hosts);
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
/* Additional setups per revmap type */
switch (revmap_type) {
case IRQ_HOST_MAP_PRIORITY:
/* 0 is always the invalid number for priority */
host->inval_irq = 0;
/* setup us as the host for all priority interrupts */
for (i = 1; i < NR_PRIORITY_IRQS; i++) {
irq_map[i].hwirq = i;
smp_wmb();
irq_map[i].host = host;
smp_wmb();
ops->map(host, i, i);
}
break;
case IRQ_HOST_MAP_LINEAR:
rmap = (unsigned int *)(host + 1);
for (i = 0; i < revmap_arg; i++)
rmap[i] = NO_IRQ;
host->revmap_data.linear.size = revmap_arg;
smp_wmb();
host->revmap_data.linear.revmap = rmap;
break;
case IRQ_HOST_MAP_TREE:
INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
break;
default:
break;
}
pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
return host;
}
struct irq_host *irq_find_host(struct device_node *node)
{
struct irq_host *h, *found = NULL;
unsigned long flags;
/* We might want to match the legacy controller last since
* it might potentially be set to match all interrupts in
* the absence of a device node. This isn't a problem so far
* yet though...
*/
raw_spin_lock_irqsave(&irq_big_lock, flags);
list_for_each_entry(h, &irq_hosts, link)
if (h->ops->match(h, node)) {
found = h;
break;
}
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);
void irq_set_default_host(struct irq_host *host)
{
pr_debug("irq: Default host set to @0x%p\n", host);
irq_default_host = host;
}
void irq_set_virq_count(unsigned int count)
{
pr_debug("irq: Trying to set virq count to %d\n", count);
BUG_ON(count < NR_PRIORITY_IRQS);
if (count < NR_IRQS)
irq_virq_count = count;
}
static int irq_setup_virq(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
int res;
res = irq_alloc_desc_at(virq, 0);
if (res != virq) {
pr_debug("irq: -> allocating desc failed\n");
goto error;
}
/* map it */
smp_wmb();
irq_map[virq].hwirq = hwirq;
smp_mb();
if (host->ops->map(host, virq, hwirq)) {
pr_debug("irq: -> mapping failed, freeing\n");
goto errdesc;
}
irq_clear_status_flags(virq, IRQ_NOREQUEST);
return 0;
errdesc:
irq_free_descs(virq, 1);
error:
irq_free_virt(virq, 1);
return -1;
}
unsigned int irq_create_direct_mapping(struct irq_host *host)
{
unsigned int virq;
if (host == NULL)
host = irq_default_host;
BUG_ON(host == NULL);
WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
virq = irq_alloc_virt(host, 1, 0);
if (virq == NO_IRQ) {
pr_debug("irq: create_direct virq allocation failed\n");
return NO_IRQ;
}
pr_debug("irq: create_direct obtained virq %d\n", virq);
if (irq_setup_virq(host, virq, virq))
return NO_IRQ;
return virq;
}
unsigned int irq_create_mapping(struct irq_host *host,
irq_hw_number_t hwirq)
{
unsigned int virq, hint;
pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
/* Look for default host if nececssary */
if (host == NULL)
host = irq_default_host;
if (host == NULL) {
printk(KERN_WARNING "irq_create_mapping called for"
" NULL host, hwirq=%lx\n", hwirq);
WARN_ON(1);
return NO_IRQ;
}
pr_debug("irq: -> using host @%p\n", host);
/* Check if mapping already exists */
virq = irq_find_mapping(host, hwirq);
if (virq != NO_IRQ) {
pr_debug("irq: -> existing mapping on virq %d\n", virq);
return virq;
}
/* Allocate a virtual interrupt number */
hint = hwirq % irq_virq_count;
virq = irq_alloc_virt(host, 1, hint);
if (virq == NO_IRQ) {
pr_debug("irq: -> virq allocation failed\n");
return NO_IRQ;
}
if (irq_setup_virq(host, virq, hwirq))
return NO_IRQ;
pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
hwirq, host->of_node ? host->of_node->full_name : "null", virq);
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);
unsigned int irq_create_of_mapping(struct device_node *controller,
const u32 *intspec, unsigned int intsize)
{
struct irq_host *host;
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
unsigned int virq;
if (controller == NULL)
host = irq_default_host;
else
host = irq_find_host(controller);
if (host == NULL) {
printk(KERN_WARNING "irq: no irq host found for %s !\n",
controller->full_name);
return NO_IRQ;
}
/* If host has no translation, then we assume interrupt line */
if (host->ops->xlate == NULL)
hwirq = intspec[0];
else {
if (host->ops->xlate(host, controller, intspec, intsize,
&hwirq, &type))
return NO_IRQ;
}
/* Create mapping */
virq = irq_create_mapping(host, hwirq);
if (virq == NO_IRQ)
return virq;
/* Set type if specified and different than the current one */
if (type != IRQ_TYPE_NONE &&
type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
irq_set_irq_type(virq, type);
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
void irq_dispose_mapping(unsigned int virq)
{
struct irq_host *host;
irq_hw_number_t hwirq;
if (virq == NO_IRQ)
return;
/* Never unmap priority interrupts */
if (virq < NR_PRIORITY_IRQS)
return;
host = irq_map[virq].host;
if (WARN_ON(host == NULL))
return;
irq_set_status_flags(virq, IRQ_NOREQUEST);
/* remove chip and handler */
irq_set_chip_and_handler(virq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(virq);
/* Tell the PIC about it */
if (host->ops->unmap)
host->ops->unmap(host, virq);
smp_mb();
/* Clear reverse map */
hwirq = irq_map[virq].hwirq;
switch (host->revmap_type) {
case IRQ_HOST_MAP_LINEAR:
if (hwirq < host->revmap_data.linear.size)
host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
break;
case IRQ_HOST_MAP_TREE:
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&host->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
break;
}
/* Destroy map */
smp_mb();
irq_map[virq].hwirq = host->inval_irq;
irq_free_descs(virq, 1);
/* Free it */
irq_free_virt(virq, 1);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
unsigned int irq_find_mapping(struct irq_host *host,
irq_hw_number_t hwirq)
{
unsigned int i;
unsigned int hint = hwirq % irq_virq_count;
/* Look for default host if nececssary */
if (host == NULL)
host = irq_default_host;
if (host == NULL)
return NO_IRQ;
/* Slow path does a linear search of the map */
i = hint;
do {
if (irq_map[i].host == host &&
irq_map[i].hwirq == hwirq)
return i;
i++;
if (i >= irq_virq_count)
i = 4;
} while (i != hint);
return NO_IRQ;
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
unsigned int irq_radix_revmap_lookup(struct irq_host *host,
irq_hw_number_t hwirq)
{
struct irq_map_entry *ptr;
unsigned int virq;
if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
return irq_find_mapping(host, hwirq);
/*
* The ptr returned references the static global irq_map.
* but freeing an irq can delete nodes along the path to
* do the lookup via call_rcu.
*/
rcu_read_lock();
ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
rcu_read_unlock();
/*
* If found in radix tree, then fine.
* Else fallback to linear lookup - this should not happen in practice
* as it means that we failed to insert the node in the radix tree.
*/
if (ptr)
virq = ptr - irq_map;
else
virq = irq_find_mapping(host, hwirq);
return virq;
}
void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
return;
if (virq != NO_IRQ) {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&host->revmap_data.tree, hwirq,
&irq_map[virq]);
mutex_unlock(&revmap_trees_mutex);
}
}
unsigned int irq_linear_revmap(struct irq_host *host,
irq_hw_number_t hwirq)
{
unsigned int *revmap;
if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
return irq_find_mapping(host, hwirq);
/* Check revmap bounds */
if (unlikely(hwirq >= host->revmap_data.linear.size))
return irq_find_mapping(host, hwirq);
/* Check if revmap was allocated */
revmap = host->revmap_data.linear.revmap;
if (unlikely(revmap == NULL))
return irq_find_mapping(host, hwirq);
/* Fill up revmap with slow path if no mapping found */
if (unlikely(revmap[hwirq] == NO_IRQ))
revmap[hwirq] = irq_find_mapping(host, hwirq);
return revmap[hwirq];
}
unsigned int irq_alloc_virt(struct irq_host *host,
unsigned int count,
unsigned int hint)
{
unsigned long flags;
unsigned int i, j, found = NO_IRQ;
if (count == 0 || count > (irq_virq_count - NR_PRIORITY_IRQS))
return NO_IRQ;
raw_spin_lock_irqsave(&irq_big_lock, flags);
/* Use hint for 1 interrupt if any */
if (count == 1 && hint >= NR_PRIORITY_IRQS &&
hint < irq_virq_count && irq_map[hint].host == NULL) {
found = hint;
goto hint_found;
}
/* Look for count consecutive numbers in the allocatable
* (non-legacy) space
*/
for (i = NR_PRIORITY_IRQS, j = 0; i < irq_virq_count; i++) {
if (irq_map[i].host != NULL)
j = 0;
else
j++;
if (j == count) {
found = i - count + 1;
break;
}
}
if (found == NO_IRQ) {
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return NO_IRQ;
}
hint_found:
for (i = found; i < (found + count); i++) {
irq_map[i].hwirq = host->inval_irq;
smp_wmb();
irq_map[i].host = host;
}
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return found;
}
void irq_free_virt(unsigned int virq, unsigned int count)
{
unsigned long flags;
unsigned int i;
WARN_ON(virq < NR_PRIORITY_IRQS);
WARN_ON(count == 0 || (virq + count) > irq_virq_count);
if (virq < NR_PRIORITY_IRQS) {
if (virq + count < NR_PRIORITY_IRQS)
return;
count -= NR_PRIORITY_IRQS - virq;
virq = NR_PRIORITY_IRQS;
}
if (count > irq_virq_count || virq > irq_virq_count - count) {
if (virq > irq_virq_count)
return;
count = irq_virq_count - virq;
}
raw_spin_lock_irqsave(&irq_big_lock, flags);
for (i = virq; i < (virq + count); i++) {
struct irq_host *host;
host = irq_map[i].host;
irq_map[i].hwirq = host->inval_irq;
smp_wmb();
irq_map[i].host = NULL;
}
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
}
#ifdef CONFIG_VIRQ_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
{
unsigned long flags;
struct irq_desc *desc;
const char *p;
static const char none[] = "none";
void *data;
int i;
seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
"chip name", "chip data", "host name");
for (i = 1; i < nr_irqs; i++) {
desc = irq_to_desc(i);
if (!desc)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && desc->action->handler) {
struct irq_chip *chip;
seq_printf(m, "%5d ", i);
seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
chip = irq_desc_get_chip(desc);
if (chip && chip->name)
p = chip->name;
else
p = none;
seq_printf(m, "%-15s ", p);
data = irq_desc_get_chip_data(desc);
seq_printf(m, "0x%16p ", data);
if (irq_map[i].host && irq_map[i].host->of_node)
p = irq_map[i].host->of_node->full_name;
else
p = none;
seq_printf(m, "%s\n", p);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
}
static int virq_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, virq_debug_show, inode->i_private);
}
static const struct file_operations virq_debug_fops = {
.open = virq_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init irq_debugfs_init(void)
{
if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
NULL, &virq_debug_fops) == NULL)
return -ENOMEM;
return 0;
}
device_initcall(irq_debugfs_init);
#endif /* CONFIG_VIRQ_DEBUG */

View File

@ -48,7 +48,7 @@ struct megamod_regs {
}; };
struct megamod_pic { struct megamod_pic {
struct irq_host *irqhost; struct irq_domain *irqhost;
struct megamod_regs __iomem *regs; struct megamod_regs __iomem *regs;
raw_spinlock_t lock; raw_spinlock_t lock;
@ -116,7 +116,7 @@ static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
} }
} }
static int megamod_map(struct irq_host *h, unsigned int virq, static int megamod_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
struct megamod_pic *pic = h->host_data; struct megamod_pic *pic = h->host_data;
@ -136,21 +136,9 @@ static int megamod_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static int megamod_xlate(struct irq_host *h, struct device_node *ct, static const struct irq_domain_ops megamod_domain_ops = {
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
/* megamod intspecs must have 1 cell */
BUG_ON(intsize != 1);
*out_hwirq = intspec[0];
*out_type = IRQ_TYPE_NONE;
return 0;
}
static struct irq_host_ops megamod_host_ops = {
.map = megamod_map, .map = megamod_map,
.xlate = megamod_xlate, .xlate = irq_domain_xlate_onecell,
}; };
static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output) static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
@ -223,9 +211,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
return NULL; return NULL;
} }
pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
NR_COMBINERS * 32, &megamod_host_ops, &megamod_domain_ops, pic);
IRQ_UNMAPPED);
if (!pic->irqhost) { if (!pic->irqhost) {
pr_err("%s: Could not alloc host.\n", np->full_name); pr_err("%s: Could not alloc host.\n", np->full_name);
goto error_free; goto error_free;

View File

@ -14,6 +14,7 @@ config MICROBLAZE
select TRACING_SUPPORT select TRACING_SUPPORT
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
select IRQ_DOMAIN
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW

View File

@ -1,17 +1 @@
/*
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_HARDIRQ_H
#define _ASM_MICROBLAZE_HARDIRQ_H
/* should be defined in each interrupt controller driver */
extern unsigned int get_irq(struct pt_regs *regs);
#include <asm-generic/hardirq.h> #include <asm-generic/hardirq.h>
#endif /* _ASM_MICROBLAZE_HARDIRQ_H */

View File

@ -9,49 +9,13 @@
#ifndef _ASM_MICROBLAZE_IRQ_H #ifndef _ASM_MICROBLAZE_IRQ_H
#define _ASM_MICROBLAZE_IRQ_H #define _ASM_MICROBLAZE_IRQ_H
#define NR_IRQS (32 + 1)
/*
* Linux IRQ# is currently offset by one to map to the hardware
* irq number. So hardware IRQ0 maps to Linux irq 1.
*/
#define NO_IRQ_OFFSET 1
#define IRQ_OFFSET NO_IRQ_OFFSET
#define NR_IRQS (32 + IRQ_OFFSET)
#include <asm-generic/irq.h> #include <asm-generic/irq.h>
/* This type is the placeholder for a hardware interrupt number. It has to
* be big enough to enclose whatever representation is used by a given
* platform.
*/
typedef unsigned long irq_hw_number_t;
extern unsigned int nr_irq;
struct pt_regs; struct pt_regs;
extern void do_IRQ(struct pt_regs *regs); extern void do_IRQ(struct pt_regs *regs);
/** FIXME - not implement /* should be defined in each interrupt controller driver */
* irq_dispose_mapping - Unmap an interrupt extern unsigned int get_irq(void);
* @virq: linux virq number of the interrupt to unmap
*/
static inline void irq_dispose_mapping(unsigned int virq)
{
return;
}
struct irq_host;
/**
* irq_create_mapping - Map a hardware interrupt into linux virq space
* @host: host owning this hardware interrupt or NULL for default host
* @hwirq: hardware irq number in that host space
*
* Only one mapping per hardware interrupt is permitted. Returns a linux
* virq number.
* If the sense/trigger is to be specified, set_irq_type() should be called
* on the number returned from that call.
*/
extern unsigned int irq_create_mapping(struct irq_host *host,
irq_hw_number_t hwirq);
#endif /* _ASM_MICROBLAZE_IRQ_H */ #endif /* _ASM_MICROBLAZE_IRQ_H */

View File

@ -9,6 +9,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/irqdomain.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/page.h> #include <asm/page.h>
#include <linux/io.h> #include <linux/io.h>
@ -25,8 +26,6 @@ static unsigned int intc_baseaddr;
#define INTC_BASE intc_baseaddr #define INTC_BASE intc_baseaddr
#endif #endif
unsigned int nr_irq;
/* No one else should require these constants, so define them locally here. */ /* No one else should require these constants, so define them locally here. */
#define ISR 0x00 /* Interrupt Status Register */ #define ISR 0x00 /* Interrupt Status Register */
#define IPR 0x04 /* Interrupt Pending Register */ #define IPR 0x04 /* Interrupt Pending Register */
@ -84,24 +83,45 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack, .irq_mask_ack = intc_mask_ack,
}; };
unsigned int get_irq(struct pt_regs *regs) static struct irq_domain *root_domain;
{
int irq;
/* unsigned int get_irq(void)
* NOTE: This function is the one that needs to be improved in {
* order to handle multiple interrupt controllers. It currently unsigned int hwirq, irq = -1;
* is hardcoded to check for interrupts only on the first INTC.
*/ hwirq = in_be32(INTC_BASE + IVR);
irq = in_be32(INTC_BASE + IVR) + NO_IRQ_OFFSET; if (hwirq != -1U)
pr_debug("get_irq: %d\n", irq); irq = irq_find_mapping(root_domain, hwirq);
pr_debug("get_irq: hwirq=%d, irq=%d\n", hwirq, irq);
return irq; return irq;
} }
int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
u32 intr_mask = (u32)d->host_data;
if (intr_mask & (1 << hw)) {
irq_set_chip_and_handler_name(irq, &intc_dev,
handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else {
irq_set_chip_and_handler_name(irq, &intc_dev,
handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
}
return 0;
}
static const struct irq_domain_ops xintc_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
.map = xintc_map,
};
void __init init_IRQ(void) void __init init_IRQ(void)
{ {
u32 i, intr_mask; u32 nr_irq, intr_mask;
struct device_node *intc = NULL; struct device_node *intc = NULL;
#ifdef CONFIG_SELFMOD_INTC #ifdef CONFIG_SELFMOD_INTC
unsigned int intc_baseaddr = 0; unsigned int intc_baseaddr = 0;
@ -146,16 +166,9 @@ void __init init_IRQ(void)
/* Turn on the Master Enable. */ /* Turn on the Master Enable. */
out_be32(intc_baseaddr + MER, MER_HIE | MER_ME); out_be32(intc_baseaddr + MER, MER_HIE | MER_ME);
for (i = IRQ_OFFSET; i < (nr_irq + IRQ_OFFSET); ++i) { /* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm
if (intr_mask & (0x00000001 << (i - IRQ_OFFSET))) { * lazy and Michal can clean it up to something nicer when he tests
irq_set_chip_and_handler_name(i, &intc_dev, * and commits this patch. ~~gcl */
handle_edge_irq, "edge"); root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
irq_clear_status_flags(i, IRQ_LEVEL); (void *)intr_mask);
} else {
irq_set_chip_and_handler_name(i, &intc_dev,
handle_level_irq, "level");
irq_set_status_flags(i, IRQ_LEVEL);
}
irq_get_irq_data(i)->hwirq = i - IRQ_OFFSET;
}
} }

View File

@ -31,14 +31,13 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
trace_hardirqs_off(); trace_hardirqs_off();
irq_enter(); irq_enter();
irq = get_irq(regs); irq = get_irq();
next_irq: next_irq:
BUG_ON(!irq); BUG_ON(!irq);
/* Substract 1 because of get_irq */ generic_handle_irq(irq);
generic_handle_irq(irq + IRQ_OFFSET - NO_IRQ_OFFSET);
irq = get_irq(regs); irq = get_irq();
if (irq) { if (irq != -1U) {
pr_debug("next irq: %d\n", irq); pr_debug("next irq: %d\n", irq);
++concurrent_irq; ++concurrent_irq;
goto next_irq; goto next_irq;
@ -48,18 +47,3 @@ next_irq:
set_irq_regs(old_regs); set_irq_regs(old_regs);
trace_hardirqs_on(); trace_hardirqs_on();
} }
/* MS: There is no any advance mapping mechanism. We are using simple 32bit
intc without any cascades or any connection that's why mapping is 1:1 */
unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
{
return hwirq + IRQ_OFFSET;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);
unsigned int irq_create_of_mapping(struct device_node *controller,
const u32 *intspec, unsigned int intsize)
{
return intspec[0] + IRQ_OFFSET;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);

View File

@ -26,7 +26,6 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/entry.h> #include <asm/entry.h>
#include <asm/cpuinfo.h> #include <asm/cpuinfo.h>
@ -52,8 +51,6 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree(); unflatten_device_tree();
/* NOTE I think that this function is not necessary to call */
/* irq_early_init(); */
setup_cpuinfo(); setup_cpuinfo();
microblaze_cache_init(); microblaze_cache_init();
@ -227,23 +224,5 @@ static int __init setup_bus_notifier(void)
return 0; return 0;
} }
arch_initcall(setup_bus_notifier); arch_initcall(setup_bus_notifier);
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int i, ret;
for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i);
ret = register_cpu(c, i);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", i, ret);
}
return 0;
}
subsys_initcall(topology_init);

View File

@ -2327,6 +2327,7 @@ config USE_OF
bool "Flattened Device Tree support" bool "Flattened Device Tree support"
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
select IRQ_DOMAIN
help help
Include support for flattened device tree machine descriptions. Include support for flattened device tree machine descriptions.
@ -2356,6 +2357,7 @@ config PCI
depends on HW_HAS_PCI depends on HW_HAS_PCI
select PCI_DOMAINS select PCI_DOMAINS
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select NO_GENERIC_PCI_IOPORT_MAP
help help
Find out whether you have a PCI motherboard. PCI is the name of a Find out whether you have a PCI motherboard. PCI is the name of a
bus system, i.e. the way the CPU talks to the other stuff inside bus system, i.e. the way the CPU talks to the other stuff inside

View File

@ -11,15 +11,12 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/irqdomain.h>
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#include <irq.h> #include <irq.h>
static inline void irq_dispose_mapping(unsigned int virq)
{
}
#ifdef CONFIG_I8259 #ifdef CONFIG_I8259
static inline int irq_canonicalize(int irq) static inline int irq_canonicalize(int irq)
{ {

View File

@ -60,20 +60,6 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
} }
#endif #endif
/*
* irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq#
*
* Currently the mapping mechanism is trivial; simple flat hwirq numbers are
* mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not
* supported.
*/
unsigned int irq_create_of_mapping(struct device_node *controller,
const u32 *intspec, unsigned int intsize)
{
return intspec[0];
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
void __init early_init_devtree(void *params) void __init early_init_devtree(void *params)
{ {
/* Setup flat device-tree pointer */ /* Setup flat device-tree pointer */

View File

@ -10,8 +10,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/io.h> #include <asm/io.h>
static void __iomem *ioport_map_pci(struct pci_dev *dev, void __iomem *__pci_ioport_map(struct pci_dev *dev,
unsigned long port, unsigned int nr) unsigned long port, unsigned int nr)
{ {
struct pci_controller *ctrl = dev->bus->sysdata; struct pci_controller *ctrl = dev->bus->sysdata;
unsigned long base = ctrl->io_map_base; unsigned long base = ctrl->io_map_base;

View File

@ -24,6 +24,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <linux/irqdomain.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
@ -63,15 +64,6 @@ extern const void *of_get_mac_address(struct device_node *np);
struct pci_dev; struct pci_dev;
extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
/* This routine is here to provide compatibility with how powerpc
* handles IRQ mapping for OF device nodes. We precompute and permanently
* register them in the platform_device objects, whereas powerpc computes them
* on request.
*/
static inline void irq_dispose_mapping(unsigned int virq)
{
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_OPENRISC_PROM_H */ #endif /* _ASM_OPENRISC_PROM_H */

View File

@ -135,6 +135,7 @@ config PPC
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ select HAVE_SPARSE_IRQ
select IRQ_PER_CPU select IRQ_PER_CPU
select IRQ_DOMAIN
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL select GENERIC_IRQ_SHOW_LEVEL
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING

View File

@ -25,7 +25,7 @@
struct ehv_pic { struct ehv_pic {
/* The remapper for this EHV_PIC */ /* The remapper for this EHV_PIC */
struct irq_host *irqhost; struct irq_domain *irqhost;
/* The "linux" controller struct */ /* The "linux" controller struct */
struct irq_chip hc_irq; struct irq_chip hc_irq;

View File

@ -6,7 +6,7 @@
extern void i8259_init(struct device_node *node, unsigned long intack_addr); extern void i8259_init(struct device_node *node, unsigned long intack_addr);
extern unsigned int i8259_irq(void); extern unsigned int i8259_irq(void);
extern struct irq_host *i8259_get_host(void); extern struct irq_domain *i8259_get_host(void);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_I8259_H */ #endif /* _ASM_POWERPC_I8259_H */

View File

@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/irqdomain.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
@ -35,258 +36,12 @@ extern atomic_t ppc_n_lost_interrupts;
/* Total number of virq in the platform */ /* Total number of virq in the platform */
#define NR_IRQS CONFIG_NR_IRQS #define NR_IRQS CONFIG_NR_IRQS
/* Number of irqs reserved for the legacy controller */
#define NUM_ISA_INTERRUPTS 16
/* Same thing, used by the generic IRQ code */ /* Same thing, used by the generic IRQ code */
#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS #define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
/* This type is the placeholder for a hardware interrupt number. It has to
* be big enough to enclose whatever representation is used by a given
* platform.
*/
typedef unsigned long irq_hw_number_t;
/* Interrupt controller "host" data structure. This could be defined as a
* irq domain controller. That is, it handles the mapping between hardware
* and virtual interrupt numbers for a given interrupt domain. The host
* structure is generally created by the PIC code for a given PIC instance
* (though a host can cover more than one PIC if they have a flat number
* model). It's the host callbacks that are responsible for setting the
* irq_chip on a given irq_desc after it's been mapped.
*
* The host code and data structures are fairly agnostic to the fact that
* we use an open firmware device-tree. We do have references to struct
* device_node in two places: in irq_find_host() to find the host matching
* a given interrupt controller node, and of course as an argument to its
* counterpart host->ops->match() callback. However, those are treated as
* generic pointers by the core and the fact that it's actually a device-node
* pointer is purely a convention between callers and implementation. This
* code could thus be used on other architectures by replacing those two
* by some sort of arch-specific void * "token" used to identify interrupt
* controllers.
*/
struct irq_host;
struct radix_tree_root;
/* Functions below are provided by the host and called whenever a new mapping
* is created or an old mapping is disposed. The host can then proceed to
* whatever internal data structures management is required. It also needs
* to setup the irq_desc when returning from map().
*/
struct irq_host_ops {
/* Match an interrupt controller device node to a host, returns
* 1 on a match
*/
int (*match)(struct irq_host *h, struct device_node *node);
/* Create or update a mapping between a virtual irq number and a hw
* irq number. This is called only once for a given mapping.
*/
int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
/* Dispose of such a mapping */
void (*unmap)(struct irq_host *h, unsigned int virq);
/* Translate device-tree interrupt specifier from raw format coming
* from the firmware to a irq_hw_number_t (interrupt line number) and
* type (sense) that can be passed to set_irq_type(). In the absence
* of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
* will return the hw number in the first cell and IRQ_TYPE_NONE for
* the type (which amount to keeping whatever default value the
* interrupt controller has for that line)
*/
int (*xlate)(struct irq_host *h, struct device_node *ctrler,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
};
struct irq_host {
struct list_head link;
/* type of reverse mapping technique */
unsigned int revmap_type;
#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
#define IRQ_HOST_MAP_TREE 3 /* radix tree */
union {
struct {
unsigned int size;
unsigned int *revmap;
} linear;
struct radix_tree_root tree;
} revmap_data;
struct irq_host_ops *ops;
void *host_data;
irq_hw_number_t inval_irq;
/* Optional device node pointer */
struct device_node *of_node;
};
struct irq_data; struct irq_data;
extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
extern irq_hw_number_t virq_to_hw(unsigned int virq); extern irq_hw_number_t virq_to_hw(unsigned int virq);
extern bool virq_is_host(unsigned int virq, struct irq_host *host);
/**
* irq_alloc_host - Allocate a new irq_host data structure
* @of_node: optional device-tree node of the interrupt controller
* @revmap_type: type of reverse mapping to use
* @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
* @ops: map/unmap host callbacks
* @inval_irq: provide a hw number in that host space that is always invalid
*
* Allocates and initialize and irq_host structure. Note that in the case of
* IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
* for all legacy interrupts except 0 (which is always the invalid irq for
* a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
* this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
* later during boot automatically (the reverse mapping will use the slow path
* until that happens).
*/
extern struct irq_host *irq_alloc_host(struct device_node *of_node,
unsigned int revmap_type,
unsigned int revmap_arg,
struct irq_host_ops *ops,
irq_hw_number_t inval_irq);
/**
* irq_find_host - Locates a host for a given device node
* @node: device-tree node of the interrupt controller
*/
extern struct irq_host *irq_find_host(struct device_node *node);
/**
* irq_set_default_host - Set a "default" host
* @host: default host pointer
*
* For convenience, it's possible to set a "default" host that will be used
* whenever NULL is passed to irq_create_mapping(). It makes life easier for
* platforms that want to manipulate a few hard coded interrupt numbers that
* aren't properly represented in the device-tree.
*/
extern void irq_set_default_host(struct irq_host *host);
/**
* irq_set_virq_count - Set the maximum number of virt irqs
* @count: number of linux virtual irqs, capped with NR_IRQS
*
* This is mainly for use by platforms like iSeries who want to program
* the virtual irq number in the controller to avoid the reverse mapping
*/
extern void irq_set_virq_count(unsigned int count);
/**
* irq_create_mapping - Map a hardware interrupt into linux virq space
* @host: host owning this hardware interrupt or NULL for default host
* @hwirq: hardware irq number in that host space
*
* Only one mapping per hardware interrupt is permitted. Returns a linux
* virq number.
* If the sense/trigger is to be specified, set_irq_type() should be called
* on the number returned from that call.
*/
extern unsigned int irq_create_mapping(struct irq_host *host,
irq_hw_number_t hwirq);
/**
* irq_dispose_mapping - Unmap an interrupt
* @virq: linux virq number of the interrupt to unmap
*/
extern void irq_dispose_mapping(unsigned int virq);
/**
* irq_find_mapping - Find a linux virq from an hw irq number.
* @host: host owning this hardware interrupt
* @hwirq: hardware irq number in that host space
*
* This is a slow path, for use by generic code. It's expected that an
* irq controller implementation directly calls the appropriate low level
* mapping function.
*/
extern unsigned int irq_find_mapping(struct irq_host *host,
irq_hw_number_t hwirq);
/**
* irq_create_direct_mapping - Allocate a virq for direct mapping
* @host: host to allocate the virq for or NULL for default host
*
* This routine is used for irq controllers which can choose the hardware
* interrupt numbers they generate. In such a case it's simplest to use
* the linux virq as the hardware interrupt number.
*/
extern unsigned int irq_create_direct_mapping(struct irq_host *host);
/**
* irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping.
* @host: host owning this hardware interrupt
* @virq: linux irq number
* @hwirq: hardware irq number in that host space
*
* This is for use by irq controllers that use a radix tree reverse
* mapping for fast lookup.
*/
extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq);
/**
* irq_radix_revmap_lookup - Find a linux virq from a hw irq number.
* @host: host owning this hardware interrupt
* @hwirq: hardware irq number in that host space
*
* This is a fast path, for use by irq controller code that uses radix tree
* revmaps
*/
extern unsigned int irq_radix_revmap_lookup(struct irq_host *host,
irq_hw_number_t hwirq);
/**
* irq_linear_revmap - Find a linux virq from a hw irq number.
* @host: host owning this hardware interrupt
* @hwirq: hardware irq number in that host space
*
* This is a fast path, for use by irq controller code that uses linear
* revmaps. It does fallback to the slow path if the revmap doesn't exist
* yet and will create the revmap entry with appropriate locking
*/
extern unsigned int irq_linear_revmap(struct irq_host *host,
irq_hw_number_t hwirq);
/**
* irq_alloc_virt - Allocate virtual irq numbers
* @host: host owning these new virtual irqs
* @count: number of consecutive numbers to allocate
* @hint: pass a hint number, the allocator will try to use a 1:1 mapping
*
* This is a low level function that is used internally by irq_create_mapping()
* and that can be used by some irq controllers implementations for things
* like allocating ranges of numbers for MSIs. The revmaps are left untouched.
*/
extern unsigned int irq_alloc_virt(struct irq_host *host,
unsigned int count,
unsigned int hint);
/**
* irq_free_virt - Free virtual irq numbers
* @virq: virtual irq number of the first interrupt to free
* @count: number of interrupts to free
*
* This function is the opposite of irq_alloc_virt. It will not clear reverse
* maps, this should be done previously by unmap'ing the interrupt. In fact,
* all interrupts covered by the range being freed should have been unmapped
* prior to calling this.
*/
extern void irq_free_virt(unsigned int virq, unsigned int count);
/** /**
* irq_early_init - Init irq remapping subsystem * irq_early_init - Init irq remapping subsystem

View File

@ -255,7 +255,7 @@ struct mpic
struct device_node *node; struct device_node *node;
/* The remapper for this MPIC */ /* The remapper for this MPIC */
struct irq_host *irqhost; struct irq_domain *irqhost;
/* The "linux" controller struct */ /* The "linux" controller struct */
struct irq_chip hc_irq; struct irq_chip hc_irq;

View File

@ -86,7 +86,7 @@ struct ics {
extern unsigned int xics_default_server; extern unsigned int xics_default_server;
extern unsigned int xics_default_distrib_server; extern unsigned int xics_default_distrib_server;
extern unsigned int xics_interrupt_server_size; extern unsigned int xics_interrupt_server_size;
extern struct irq_host *xics_host; extern struct irq_domain *xics_host;
struct xics_cppr { struct xics_cppr {
unsigned char stack[MAX_NUM_PRIORITIES]; unsigned char stack[MAX_NUM_PRIORITIES];

View File

@ -486,409 +486,19 @@ void do_softirq(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
/*
* IRQ controller and virtual interrupts
*/
/* The main irq map itself is an array of NR_IRQ entries containing the
* associate host and irq number. An entry with a host of NULL is free.
* An entry can be allocated if it's free, the allocator always then sets
* hwirq first to the host's invalid irq number and then fills ops.
*/
struct irq_map_entry {
irq_hw_number_t hwirq;
struct irq_host *host;
};
static LIST_HEAD(irq_hosts);
static DEFINE_RAW_SPINLOCK(irq_big_lock);
static DEFINE_MUTEX(revmap_trees_mutex);
static struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;
irq_hw_number_t irqd_to_hwirq(struct irq_data *d) irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{ {
return irq_map[d->irq].hwirq; return d->hwirq;
} }
EXPORT_SYMBOL_GPL(irqd_to_hwirq); EXPORT_SYMBOL_GPL(irqd_to_hwirq);
irq_hw_number_t virq_to_hw(unsigned int virq) irq_hw_number_t virq_to_hw(unsigned int virq)
{ {
return irq_map[virq].hwirq; struct irq_data *irq_data = irq_get_irq_data(virq);
return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
} }
EXPORT_SYMBOL_GPL(virq_to_hw); EXPORT_SYMBOL_GPL(virq_to_hw);
bool virq_is_host(unsigned int virq, struct irq_host *host)
{
return irq_map[virq].host == host;
}
EXPORT_SYMBOL_GPL(virq_is_host);
static int default_irq_host_match(struct irq_host *h, struct device_node *np)
{
return h->of_node != NULL && h->of_node == np;
}
struct irq_host *irq_alloc_host(struct device_node *of_node,
unsigned int revmap_type,
unsigned int revmap_arg,
struct irq_host_ops *ops,
irq_hw_number_t inval_irq)
{
struct irq_host *host;
unsigned int size = sizeof(struct irq_host);
unsigned int i;
unsigned int *rmap;
unsigned long flags;
/* Allocate structure and revmap table if using linear mapping */
if (revmap_type == IRQ_HOST_MAP_LINEAR)
size += revmap_arg * sizeof(unsigned int);
host = kzalloc(size, GFP_KERNEL);
if (host == NULL)
return NULL;
/* Fill structure */
host->revmap_type = revmap_type;
host->inval_irq = inval_irq;
host->ops = ops;
host->of_node = of_node_get(of_node);
if (host->ops->match == NULL)
host->ops->match = default_irq_host_match;
raw_spin_lock_irqsave(&irq_big_lock, flags);
/* If it's a legacy controller, check for duplicates and
* mark it as allocated (we use irq 0 host pointer for that
*/
if (revmap_type == IRQ_HOST_MAP_LEGACY) {
if (irq_map[0].host != NULL) {
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
of_node_put(host->of_node);
kfree(host);
return NULL;
}
irq_map[0].host = host;
}
list_add(&host->link, &irq_hosts);
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
/* Additional setups per revmap type */
switch(revmap_type) {
case IRQ_HOST_MAP_LEGACY:
/* 0 is always the invalid number for legacy */
host->inval_irq = 0;
/* setup us as the host for all legacy interrupts */
for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
irq_map[i].hwirq = i;
smp_wmb();
irq_map[i].host = host;
smp_wmb();
/* Legacy flags are left to default at this point,
* one can then use irq_create_mapping() to
* explicitly change them
*/
ops->map(host, i, i);
/* Clear norequest flags */
irq_clear_status_flags(i, IRQ_NOREQUEST);
}
break;
case IRQ_HOST_MAP_LINEAR:
rmap = (unsigned int *)(host + 1);
for (i = 0; i < revmap_arg; i++)
rmap[i] = NO_IRQ;
host->revmap_data.linear.size = revmap_arg;
smp_wmb();
host->revmap_data.linear.revmap = rmap;
break;
case IRQ_HOST_MAP_TREE:
INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
break;
default:
break;
}
pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
return host;
}
struct irq_host *irq_find_host(struct device_node *node)
{
struct irq_host *h, *found = NULL;
unsigned long flags;
/* We might want to match the legacy controller last since
* it might potentially be set to match all interrupts in
* the absence of a device node. This isn't a problem so far
* yet though...
*/
raw_spin_lock_irqsave(&irq_big_lock, flags);
list_for_each_entry(h, &irq_hosts, link)
if (h->ops->match(h, node)) {
found = h;
break;
}
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);
void irq_set_default_host(struct irq_host *host)
{
pr_debug("irq: Default host set to @0x%p\n", host);
irq_default_host = host;
}
void irq_set_virq_count(unsigned int count)
{
pr_debug("irq: Trying to set virq count to %d\n", count);
BUG_ON(count < NUM_ISA_INTERRUPTS);
if (count < NR_IRQS)
irq_virq_count = count;
}
static int irq_setup_virq(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
int res;
res = irq_alloc_desc_at(virq, 0);
if (res != virq) {
pr_debug("irq: -> allocating desc failed\n");
goto error;
}
/* map it */
smp_wmb();
irq_map[virq].hwirq = hwirq;
smp_mb();
if (host->ops->map(host, virq, hwirq)) {
pr_debug("irq: -> mapping failed, freeing\n");
goto errdesc;
}
irq_clear_status_flags(virq, IRQ_NOREQUEST);
return 0;
errdesc:
irq_free_descs(virq, 1);
error:
irq_free_virt(virq, 1);
return -1;
}
unsigned int irq_create_direct_mapping(struct irq_host *host)
{
unsigned int virq;
if (host == NULL)
host = irq_default_host;
BUG_ON(host == NULL);
WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
virq = irq_alloc_virt(host, 1, 0);
if (virq == NO_IRQ) {
pr_debug("irq: create_direct virq allocation failed\n");
return NO_IRQ;
}
pr_debug("irq: create_direct obtained virq %d\n", virq);
if (irq_setup_virq(host, virq, virq))
return NO_IRQ;
return virq;
}
unsigned int irq_create_mapping(struct irq_host *host,
irq_hw_number_t hwirq)
{
unsigned int virq, hint;
pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
/* Look for default host if nececssary */
if (host == NULL)
host = irq_default_host;
if (host == NULL) {
printk(KERN_WARNING "irq_create_mapping called for"
" NULL host, hwirq=%lx\n", hwirq);
WARN_ON(1);
return NO_IRQ;
}
pr_debug("irq: -> using host @%p\n", host);
/* Check if mapping already exists */
virq = irq_find_mapping(host, hwirq);
if (virq != NO_IRQ) {
pr_debug("irq: -> existing mapping on virq %d\n", virq);
return virq;
}
/* Get a virtual interrupt number */
if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
/* Handle legacy */
virq = (unsigned int)hwirq;
if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
return NO_IRQ;
return virq;
} else {
/* Allocate a virtual interrupt number */
hint = hwirq % irq_virq_count;
virq = irq_alloc_virt(host, 1, hint);
if (virq == NO_IRQ) {
pr_debug("irq: -> virq allocation failed\n");
return NO_IRQ;
}
}
if (irq_setup_virq(host, virq, hwirq))
return NO_IRQ;
pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
hwirq, host->of_node ? host->of_node->full_name : "null", virq);
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);
unsigned int irq_create_of_mapping(struct device_node *controller,
const u32 *intspec, unsigned int intsize)
{
struct irq_host *host;
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
unsigned int virq;
if (controller == NULL)
host = irq_default_host;
else
host = irq_find_host(controller);
if (host == NULL) {
printk(KERN_WARNING "irq: no irq host found for %s !\n",
controller->full_name);
return NO_IRQ;
}
/* If host has no translation, then we assume interrupt line */
if (host->ops->xlate == NULL)
hwirq = intspec[0];
else {
if (host->ops->xlate(host, controller, intspec, intsize,
&hwirq, &type))
return NO_IRQ;
}
/* Create mapping */
virq = irq_create_mapping(host, hwirq);
if (virq == NO_IRQ)
return virq;
/* Set type if specified and different than the current one */
if (type != IRQ_TYPE_NONE &&
type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
irq_set_irq_type(virq, type);
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
void irq_dispose_mapping(unsigned int virq)
{
struct irq_host *host;
irq_hw_number_t hwirq;
if (virq == NO_IRQ)
return;
host = irq_map[virq].host;
if (WARN_ON(host == NULL))
return;
/* Never unmap legacy interrupts */
if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
return;
irq_set_status_flags(virq, IRQ_NOREQUEST);
/* remove chip and handler */
irq_set_chip_and_handler(virq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(virq);
/* Tell the PIC about it */
if (host->ops->unmap)
host->ops->unmap(host, virq);
smp_mb();
/* Clear reverse map */
hwirq = irq_map[virq].hwirq;
switch(host->revmap_type) {
case IRQ_HOST_MAP_LINEAR:
if (hwirq < host->revmap_data.linear.size)
host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
break;
case IRQ_HOST_MAP_TREE:
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&host->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
break;
}
/* Destroy map */
smp_mb();
irq_map[virq].hwirq = host->inval_irq;
irq_free_descs(virq, 1);
/* Free it */
irq_free_virt(virq, 1);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
unsigned int irq_find_mapping(struct irq_host *host,
irq_hw_number_t hwirq)
{
unsigned int i;
unsigned int hint = hwirq % irq_virq_count;
/* Look for default host if nececssary */
if (host == NULL)
host = irq_default_host;
if (host == NULL)
return NO_IRQ;
/* legacy -> bail early */
if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
return hwirq;
/* Slow path does a linear search of the map */
if (hint < NUM_ISA_INTERRUPTS)
hint = NUM_ISA_INTERRUPTS;
i = hint;
do {
if (irq_map[i].host == host &&
irq_map[i].hwirq == hwirq)
return i;
i++;
if (i >= irq_virq_count)
i = NUM_ISA_INTERRUPTS;
} while(i != hint);
return NO_IRQ;
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int irq_choose_cpu(const struct cpumask *mask) int irq_choose_cpu(const struct cpumask *mask)
{ {
@ -925,232 +535,11 @@ int irq_choose_cpu(const struct cpumask *mask)
} }
#endif #endif
unsigned int irq_radix_revmap_lookup(struct irq_host *host,
irq_hw_number_t hwirq)
{
struct irq_map_entry *ptr;
unsigned int virq;
if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
return irq_find_mapping(host, hwirq);
/*
* The ptr returned references the static global irq_map.
* but freeing an irq can delete nodes along the path to
* do the lookup via call_rcu.
*/
rcu_read_lock();
ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
rcu_read_unlock();
/*
* If found in radix tree, then fine.
* Else fallback to linear lookup - this should not happen in practice
* as it means that we failed to insert the node in the radix tree.
*/
if (ptr)
virq = ptr - irq_map;
else
virq = irq_find_mapping(host, hwirq);
return virq;
}
void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
return;
if (virq != NO_IRQ) {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&host->revmap_data.tree, hwirq,
&irq_map[virq]);
mutex_unlock(&revmap_trees_mutex);
}
}
unsigned int irq_linear_revmap(struct irq_host *host,
irq_hw_number_t hwirq)
{
unsigned int *revmap;
if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
return irq_find_mapping(host, hwirq);
/* Check revmap bounds */
if (unlikely(hwirq >= host->revmap_data.linear.size))
return irq_find_mapping(host, hwirq);
/* Check if revmap was allocated */
revmap = host->revmap_data.linear.revmap;
if (unlikely(revmap == NULL))
return irq_find_mapping(host, hwirq);
/* Fill up revmap with slow path if no mapping found */
if (unlikely(revmap[hwirq] == NO_IRQ))
revmap[hwirq] = irq_find_mapping(host, hwirq);
return revmap[hwirq];
}
unsigned int irq_alloc_virt(struct irq_host *host,
unsigned int count,
unsigned int hint)
{
unsigned long flags;
unsigned int i, j, found = NO_IRQ;
if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
return NO_IRQ;
raw_spin_lock_irqsave(&irq_big_lock, flags);
/* Use hint for 1 interrupt if any */
if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
hint < irq_virq_count && irq_map[hint].host == NULL) {
found = hint;
goto hint_found;
}
/* Look for count consecutive numbers in the allocatable
* (non-legacy) space
*/
for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
if (irq_map[i].host != NULL)
j = 0;
else
j++;
if (j == count) {
found = i - count + 1;
break;
}
}
if (found == NO_IRQ) {
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return NO_IRQ;
}
hint_found:
for (i = found; i < (found + count); i++) {
irq_map[i].hwirq = host->inval_irq;
smp_wmb();
irq_map[i].host = host;
}
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return found;
}
void irq_free_virt(unsigned int virq, unsigned int count)
{
unsigned long flags;
unsigned int i;
WARN_ON (virq < NUM_ISA_INTERRUPTS);
WARN_ON (count == 0 || (virq + count) > irq_virq_count);
if (virq < NUM_ISA_INTERRUPTS) {
if (virq + count < NUM_ISA_INTERRUPTS)
return;
count =- NUM_ISA_INTERRUPTS - virq;
virq = NUM_ISA_INTERRUPTS;
}
if (count > irq_virq_count || virq > irq_virq_count - count) {
if (virq > irq_virq_count)
return;
count = irq_virq_count - virq;
}
raw_spin_lock_irqsave(&irq_big_lock, flags);
for (i = virq; i < (virq + count); i++) {
struct irq_host *host;
host = irq_map[i].host;
irq_map[i].hwirq = host->inval_irq;
smp_wmb();
irq_map[i].host = NULL;
}
raw_spin_unlock_irqrestore(&irq_big_lock, flags);
}
int arch_early_irq_init(void) int arch_early_irq_init(void)
{ {
return 0; return 0;
} }
#ifdef CONFIG_VIRQ_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
{
unsigned long flags;
struct irq_desc *desc;
const char *p;
static const char none[] = "none";
void *data;
int i;
seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
"chip name", "chip data", "host name");
for (i = 1; i < nr_irqs; i++) {
desc = irq_to_desc(i);
if (!desc)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && desc->action->handler) {
struct irq_chip *chip;
seq_printf(m, "%5d ", i);
seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
chip = irq_desc_get_chip(desc);
if (chip && chip->name)
p = chip->name;
else
p = none;
seq_printf(m, "%-15s ", p);
data = irq_desc_get_chip_data(desc);
seq_printf(m, "0x%16p ", data);
if (irq_map[i].host && irq_map[i].host->of_node)
p = irq_map[i].host->of_node->full_name;
else
p = none;
seq_printf(m, "%s\n", p);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
}
static int virq_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, virq_debug_show, inode->i_private);
}
static const struct file_operations virq_debug_fops = {
.open = virq_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init irq_debugfs_init(void)
{
if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
NULL, &virq_debug_fops) == NULL)
return -ENOMEM;
return 0;
}
__initcall(irq_debugfs_init);
#endif /* CONFIG_VIRQ_DEBUG */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
static int __init setup_noirqdistrib(char *str) static int __init setup_noirqdistrib(char *str)
{ {

View File

@ -21,7 +21,7 @@
#include <asm/prom.h> #include <asm/prom.h>
static struct device_node *cpld_pic_node; static struct device_node *cpld_pic_node;
static struct irq_host *cpld_pic_host; static struct irq_domain *cpld_pic_host;
/* /*
* Bits to ignore in the misc_status register * Bits to ignore in the misc_status register
@ -123,13 +123,13 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
} }
static int static int
cpld_pic_host_match(struct irq_host *h, struct device_node *node) cpld_pic_host_match(struct irq_domain *h, struct device_node *node)
{ {
return cpld_pic_node == node; return cpld_pic_node == node;
} }
static int static int
cpld_pic_host_map(struct irq_host *h, unsigned int virq, cpld_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
irq_set_status_flags(virq, IRQ_LEVEL); irq_set_status_flags(virq, IRQ_LEVEL);
@ -137,8 +137,7 @@ cpld_pic_host_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static struct static const struct irq_domain_ops cpld_pic_host_ops = {
irq_host_ops cpld_pic_host_ops = {
.match = cpld_pic_host_match, .match = cpld_pic_host_match,
.map = cpld_pic_host_map, .map = cpld_pic_host_map,
}; };
@ -191,8 +190,7 @@ mpc5121_ads_cpld_pic_init(void)
cpld_pic_node = of_node_get(np); cpld_pic_node = of_node_get(np);
cpld_pic_host = cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL);
irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 16, &cpld_pic_host_ops, 16);
if (!cpld_pic_host) { if (!cpld_pic_host) {
printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n");
goto end; goto end;

View File

@ -45,7 +45,7 @@ static struct of_device_id mpc5200_gpio_ids[] __initdata = {
struct media5200_irq { struct media5200_irq {
void __iomem *regs; void __iomem *regs;
spinlock_t lock; spinlock_t lock;
struct irq_host *irqhost; struct irq_domain *irqhost;
}; };
struct media5200_irq media5200_irq; struct media5200_irq media5200_irq;
@ -112,7 +112,7 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
} }
static int media5200_irq_map(struct irq_host *h, unsigned int virq, static int media5200_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
@ -122,7 +122,7 @@ static int media5200_irq_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct, static int media5200_irq_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, irq_hw_number_t *out_hwirq,
unsigned int *out_flags) unsigned int *out_flags)
@ -136,7 +136,7 @@ static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct,
return 0; return 0;
} }
static struct irq_host_ops media5200_irq_ops = { static const struct irq_domain_ops media5200_irq_ops = {
.map = media5200_irq_map, .map = media5200_irq_map,
.xlate = media5200_irq_xlate, .xlate = media5200_irq_xlate,
}; };
@ -173,15 +173,12 @@ static void __init media5200_init_irq(void)
spin_lock_init(&media5200_irq.lock); spin_lock_init(&media5200_irq.lock);
media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_HOST_MAP_LINEAR, media5200_irq.irqhost = irq_domain_add_linear(fpga_np,
MEDIA5200_NUM_IRQS, MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq);
&media5200_irq_ops, -1);
if (!media5200_irq.irqhost) if (!media5200_irq.irqhost)
goto out; goto out;
pr_debug("%s: allocated irqhost\n", __func__); pr_debug("%s: allocated irqhost\n", __func__);
media5200_irq.irqhost->host_data = &media5200_irq;
irq_set_handler_data(cascade_virq, &media5200_irq); irq_set_handler_data(cascade_virq, &media5200_irq);
irq_set_chained_handler(cascade_virq, media5200_irq_cascade); irq_set_chained_handler(cascade_virq, media5200_irq_cascade);

View File

@ -81,7 +81,7 @@ MODULE_LICENSE("GPL");
* @regs: virtual address of GPT registers * @regs: virtual address of GPT registers
* @lock: spinlock to coordinate between different functions. * @lock: spinlock to coordinate between different functions.
* @gc: gpio_chip instance structure; used when GPIO is enabled * @gc: gpio_chip instance structure; used when GPIO is enabled
* @irqhost: Pointer to irq_host instance; used when IRQ mode is supported * @irqhost: Pointer to irq_domain instance; used when IRQ mode is supported
* @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates
* if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates
* if the timer is actively used as wdt which blocks gpt functions * if the timer is actively used as wdt which blocks gpt functions
@ -91,7 +91,7 @@ struct mpc52xx_gpt_priv {
struct device *dev; struct device *dev;
struct mpc52xx_gpt __iomem *regs; struct mpc52xx_gpt __iomem *regs;
spinlock_t lock; spinlock_t lock;
struct irq_host *irqhost; struct irq_domain *irqhost;
u32 ipb_freq; u32 ipb_freq;
u8 wdt_mode; u8 wdt_mode;
@ -204,7 +204,7 @@ void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
} }
} }
static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
struct mpc52xx_gpt_priv *gpt = h->host_data; struct mpc52xx_gpt_priv *gpt = h->host_data;
@ -216,7 +216,7 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct, static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, irq_hw_number_t *out_hwirq,
unsigned int *out_flags) unsigned int *out_flags)
@ -236,7 +236,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
return 0; return 0;
} }
static struct irq_host_ops mpc52xx_gpt_irq_ops = { static const struct irq_domain_ops mpc52xx_gpt_irq_ops = {
.map = mpc52xx_gpt_irq_map, .map = mpc52xx_gpt_irq_map,
.xlate = mpc52xx_gpt_irq_xlate, .xlate = mpc52xx_gpt_irq_xlate,
}; };
@ -252,14 +252,12 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
if (!cascade_virq) if (!cascade_virq)
return; return;
gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1, gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt);
&mpc52xx_gpt_irq_ops, -1);
if (!gpt->irqhost) { if (!gpt->irqhost) {
dev_err(gpt->dev, "irq_alloc_host() failed\n"); dev_err(gpt->dev, "irq_domain_add_linear() failed\n");
return; return;
} }
gpt->irqhost->host_data = gpt;
irq_set_handler_data(cascade_virq, gpt); irq_set_handler_data(cascade_virq, gpt);
irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);

View File

@ -132,7 +132,7 @@ static struct of_device_id mpc52xx_sdma_ids[] __initdata = {
static struct mpc52xx_intr __iomem *intr; static struct mpc52xx_intr __iomem *intr;
static struct mpc52xx_sdma __iomem *sdma; static struct mpc52xx_sdma __iomem *sdma;
static struct irq_host *mpc52xx_irqhost = NULL; static struct irq_domain *mpc52xx_irqhost = NULL;
static unsigned char mpc52xx_map_senses[4] = { static unsigned char mpc52xx_map_senses[4] = {
IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_LEVEL_HIGH,
@ -301,7 +301,7 @@ static int mpc52xx_is_extirq(int l1, int l2)
/** /**
* mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
*/ */
static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, irq_hw_number_t *out_hwirq,
unsigned int *out_flags) unsigned int *out_flags)
@ -335,7 +335,7 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
/** /**
* mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure
*/ */
static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t irq) irq_hw_number_t irq)
{ {
int l1irq; int l1irq;
@ -384,7 +384,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static struct irq_host_ops mpc52xx_irqhost_ops = { static const struct irq_domain_ops mpc52xx_irqhost_ops = {
.xlate = mpc52xx_irqhost_xlate, .xlate = mpc52xx_irqhost_xlate,
.map = mpc52xx_irqhost_map, .map = mpc52xx_irqhost_map,
}; };
@ -444,9 +444,9 @@ void __init mpc52xx_init_irq(void)
* As last step, add an irq host to translate the real * As last step, add an irq host to translate the real
* hw irq information provided by the ofw to linux virq * hw irq information provided by the ofw to linux virq
*/ */
mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR, mpc52xx_irqhost = irq_domain_add_linear(picnode,
MPC52xx_IRQ_HIGHTESTHWIRQ, MPC52xx_IRQ_HIGHTESTHWIRQ,
&mpc52xx_irqhost_ops, -1); &mpc52xx_irqhost_ops, NULL);
if (!mpc52xx_irqhost) if (!mpc52xx_irqhost)
panic(__FILE__ ": Cannot allocate the IRQ host\n"); panic(__FILE__ ": Cannot allocate the IRQ host\n");

View File

@ -29,7 +29,7 @@ static DEFINE_RAW_SPINLOCK(pci_pic_lock);
struct pq2ads_pci_pic { struct pq2ads_pci_pic {
struct device_node *node; struct device_node *node;
struct irq_host *host; struct irq_domain *host;
struct { struct {
u32 stat; u32 stat;
@ -103,7 +103,7 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
} }
} }
static int pci_pic_host_map(struct irq_host *h, unsigned int virq, static int pci_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
irq_set_status_flags(virq, IRQ_LEVEL); irq_set_status_flags(virq, IRQ_LEVEL);
@ -112,14 +112,14 @@ static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static struct irq_host_ops pci_pic_host_ops = { static const struct irq_domain_ops pci_pic_host_ops = {
.map = pci_pic_host_map, .map = pci_pic_host_map,
}; };
int __init pq2ads_pci_init_irq(void) int __init pq2ads_pci_init_irq(void)
{ {
struct pq2ads_pci_pic *priv; struct pq2ads_pci_pic *priv;
struct irq_host *host; struct irq_domain *host;
struct device_node *np; struct device_node *np;
int ret = -ENODEV; int ret = -ENODEV;
int irq; int irq;
@ -156,17 +156,13 @@ int __init pq2ads_pci_init_irq(void)
out_be32(&priv->regs->mask, ~0); out_be32(&priv->regs->mask, ~0);
mb(); mb();
host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NUM_IRQS, host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv);
&pci_pic_host_ops, NUM_IRQS);
if (!host) { if (!host) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unmap_regs; goto out_unmap_regs;
} }
host->host_data = priv;
priv->host = host; priv->host = host;
host->host_data = priv;
irq_set_handler_data(irq, priv); irq_set_handler_data(irq, priv);
irq_set_chained_handler(irq, pq2ads_pci_irq_demux); irq_set_chained_handler(irq, pq2ads_pci_irq_demux);

View File

@ -51,7 +51,7 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = {
static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock);
static void __iomem *socrates_fpga_pic_iobase; static void __iomem *socrates_fpga_pic_iobase;
static struct irq_host *socrates_fpga_pic_irq_host; static struct irq_domain *socrates_fpga_pic_irq_host;
static unsigned int socrates_fpga_irqs[3]; static unsigned int socrates_fpga_irqs[3];
static inline uint32_t socrates_fpga_pic_read(int reg) static inline uint32_t socrates_fpga_pic_read(int reg)
@ -227,7 +227,7 @@ static struct irq_chip socrates_fpga_pic_chip = {
.irq_set_type = socrates_fpga_pic_set_type, .irq_set_type = socrates_fpga_pic_set_type,
}; };
static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, static int socrates_fpga_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq) irq_hw_number_t hwirq)
{ {
/* All interrupts are LEVEL sensitive */ /* All interrupts are LEVEL sensitive */
@ -238,7 +238,7 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static int socrates_fpga_pic_host_xlate(struct irq_host *h, static int socrates_fpga_pic_host_xlate(struct irq_domain *h,
struct device_node *ct, const u32 *intspec, unsigned int intsize, struct device_node *ct, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags) irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{ {
@ -269,7 +269,7 @@ static int socrates_fpga_pic_host_xlate(struct irq_host *h,
return 0; return 0;
} }
static struct irq_host_ops socrates_fpga_pic_host_ops = { static const struct irq_domain_ops socrates_fpga_pic_host_ops = {
.map = socrates_fpga_pic_host_map, .map = socrates_fpga_pic_host_map,
.xlate = socrates_fpga_pic_host_xlate, .xlate = socrates_fpga_pic_host_xlate,
}; };
@ -279,10 +279,9 @@ void socrates_fpga_pic_init(struct device_node *pic)
unsigned long flags; unsigned long flags;
int i; int i;
/* Setup an irq_host structure */ /* Setup an irq_domain structure */
socrates_fpga_pic_irq_host = irq_alloc_host(pic, IRQ_HOST_MAP_LINEAR, socrates_fpga_pic_irq_host = irq_domain_add_linear(pic,
SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL);
SOCRATES_FPGA_NUM_IRQS);
if (socrates_fpga_pic_irq_host == NULL) { if (socrates_fpga_pic_irq_host == NULL) {
pr_err("FPGA PIC: Unable to allocate host\n"); pr_err("FPGA PIC: Unable to allocate host\n");
return; return;

View File

@ -50,7 +50,7 @@
static DEFINE_RAW_SPINLOCK(gef_pic_lock); static DEFINE_RAW_SPINLOCK(gef_pic_lock);
static void __iomem *gef_pic_irq_reg_base; static void __iomem *gef_pic_irq_reg_base;
static struct irq_host *gef_pic_irq_host; static struct irq_domain *gef_pic_irq_host;
static int gef_pic_cascade_irq; static int gef_pic_cascade_irq;
/* /*
@ -153,7 +153,7 @@ static struct irq_chip gef_pic_chip = {
/* When an interrupt is being configured, this call allows some flexibilty /* When an interrupt is being configured, this call allows some flexibilty
* in deciding which irq_chip structure is used * in deciding which irq_chip structure is used
*/ */
static int gef_pic_host_map(struct irq_host *h, unsigned int virq, static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq) irq_hw_number_t hwirq)
{ {
/* All interrupts are LEVEL sensitive */ /* All interrupts are LEVEL sensitive */
@ -163,7 +163,7 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct, static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags) irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{ {
@ -177,7 +177,7 @@ static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct,
return 0; return 0;
} }
static struct irq_host_ops gef_pic_host_ops = { static const struct irq_domain_ops gef_pic_host_ops = {
.map = gef_pic_host_map, .map = gef_pic_host_map,
.xlate = gef_pic_host_xlate, .xlate = gef_pic_host_xlate,
}; };
@ -211,10 +211,9 @@ void __init gef_pic_init(struct device_node *np)
return; return;
} }
/* Setup an irq_host structure */ /* Setup an irq_domain structure */
gef_pic_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS,
GEF_PIC_NUM_IRQS, &gef_pic_host_ops, NULL);
&gef_pic_host_ops, NO_IRQ);
if (gef_pic_irq_host == NULL) if (gef_pic_irq_host == NULL)
return; return;

View File

@ -67,7 +67,7 @@
struct axon_msic { struct axon_msic {
struct irq_host *irq_host; struct irq_domain *irq_domain;
__le32 *fifo_virt; __le32 *fifo_virt;
dma_addr_t fifo_phys; dma_addr_t fifo_phys;
dcr_host_t dcr_host; dcr_host_t dcr_host;
@ -152,7 +152,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
static struct axon_msic *find_msi_translator(struct pci_dev *dev) static struct axon_msic *find_msi_translator(struct pci_dev *dev)
{ {
struct irq_host *irq_host; struct irq_domain *irq_domain;
struct device_node *dn, *tmp; struct device_node *dn, *tmp;
const phandle *ph; const phandle *ph;
struct axon_msic *msic = NULL; struct axon_msic *msic = NULL;
@ -184,14 +184,14 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
goto out_error; goto out_error;
} }
irq_host = irq_find_host(dn); irq_domain = irq_find_host(dn);
if (!irq_host) { if (!irq_domain) {
dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n", dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
dn->full_name); dn->full_name);
goto out_error; goto out_error;
} }
msic = irq_host->host_data; msic = irq_domain->host_data;
out_error: out_error:
of_node_put(dn); of_node_put(dn);
@ -280,7 +280,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
BUILD_BUG_ON(NR_IRQS > 65536); BUILD_BUG_ON(NR_IRQS > 65536);
list_for_each_entry(entry, &dev->msi_list, list) { list_for_each_entry(entry, &dev->msi_list, list) {
virq = irq_create_direct_mapping(msic->irq_host); virq = irq_create_direct_mapping(msic->irq_domain);
if (virq == NO_IRQ) { if (virq == NO_IRQ) {
dev_warn(&dev->dev, dev_warn(&dev->dev,
"axon_msi: virq allocation failed!\n"); "axon_msi: virq allocation failed!\n");
@ -318,7 +318,7 @@ static struct irq_chip msic_irq_chip = {
.name = "AXON-MSI", .name = "AXON-MSI",
}; };
static int msic_host_map(struct irq_host *h, unsigned int virq, static int msic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
irq_set_chip_data(virq, h->host_data); irq_set_chip_data(virq, h->host_data);
@ -327,7 +327,7 @@ static int msic_host_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static struct irq_host_ops msic_host_ops = { static const struct irq_domain_ops msic_host_ops = {
.map = msic_host_map, .map = msic_host_map,
}; };
@ -337,7 +337,7 @@ static void axon_msi_shutdown(struct platform_device *device)
u32 tmp; u32 tmp;
pr_devel("axon_msi: disabling %s\n", pr_devel("axon_msi: disabling %s\n",
msic->irq_host->of_node->full_name); msic->irq_domain->of_node->full_name);
tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
msic_dcr_write(msic, MSIC_CTRL_REG, tmp); msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
@ -392,16 +392,13 @@ static int axon_msi_probe(struct platform_device *device)
} }
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic);
NR_IRQS, &msic_host_ops, 0); if (!msic->irq_domain) {
if (!msic->irq_host) { printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
dn->full_name); dn->full_name);
goto out_free_fifo; goto out_free_fifo;
} }
msic->irq_host->host_data = msic;
irq_set_handler_data(virq, msic); irq_set_handler_data(virq, msic);
irq_set_chained_handler(virq, axon_msi_cascade); irq_set_chained_handler(virq, axon_msi_cascade);
pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);

View File

@ -34,7 +34,7 @@ static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
static struct irq_host *beatic_host; static struct irq_domain *beatic_host;
/* /*
* In this implementation, "virq" == "IRQ plug number", * In this implementation, "virq" == "IRQ plug number",
@ -122,7 +122,7 @@ static struct irq_chip beatic_pic = {
* *
* Note that the number (virq) is already assigned at upper layer. * Note that the number (virq) is already assigned at upper layer.
*/ */
static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq)
{ {
beat_destruct_irq_plug(virq); beat_destruct_irq_plug(virq);
} }
@ -133,7 +133,7 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq)
* *
* Note that the number (virq) is already assigned at upper layer. * Note that the number (virq) is already assigned at upper layer.
*/ */
static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
int64_t err; int64_t err;
@ -154,7 +154,7 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
* Called from irq_create_of_mapping() only. * Called from irq_create_of_mapping() only.
* Note: We have only 1 entry to translate. * Note: We have only 1 entry to translate.
*/ */
static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, irq_hw_number_t *out_hwirq,
unsigned int *out_flags) unsigned int *out_flags)
@ -166,13 +166,13 @@ static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct,
return 0; return 0;
} }
static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np)
{ {
/* Match all */ /* Match all */
return 1; return 1;
} }
static struct irq_host_ops beatic_pic_host_ops = { static const struct irq_domain_ops beatic_pic_host_ops = {
.map = beatic_pic_host_map, .map = beatic_pic_host_map,
.unmap = beatic_pic_host_unmap, .unmap = beatic_pic_host_unmap,
.xlate = beatic_pic_host_xlate, .xlate = beatic_pic_host_xlate,
@ -239,9 +239,7 @@ void __init beatic_init_IRQ(void)
ppc_md.get_irq = beatic_get_irq; ppc_md.get_irq = beatic_get_irq;
/* Allocate an irq host */ /* Allocate an irq host */
beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL);
&beatic_pic_host_ops,
0);
BUG_ON(beatic_host == NULL); BUG_ON(beatic_host == NULL);
irq_set_default_host(beatic_host); irq_set_default_host(beatic_host);
} }

View File

@ -56,7 +56,7 @@ struct iic {
static DEFINE_PER_CPU(struct iic, cpu_iic); static DEFINE_PER_CPU(struct iic, cpu_iic);
#define IIC_NODE_COUNT 2 #define IIC_NODE_COUNT 2
static struct irq_host *iic_host; static struct irq_domain *iic_host;
/* Convert between "pending" bits and hw irq number */ /* Convert between "pending" bits and hw irq number */
static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
@ -186,7 +186,7 @@ void iic_message_pass(int cpu, int msg)
out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
} }
struct irq_host *iic_get_irq_host(int node) struct irq_domain *iic_get_irq_host(int node)
{ {
return iic_host; return iic_host;
} }
@ -222,13 +222,13 @@ void iic_request_IPIs(void)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static int iic_host_match(struct irq_host *h, struct device_node *node) static int iic_host_match(struct irq_domain *h, struct device_node *node)
{ {
return of_device_is_compatible(node, return of_device_is_compatible(node,
"IBM,CBEA-Internal-Interrupt-Controller"); "IBM,CBEA-Internal-Interrupt-Controller");
} }
static int iic_host_map(struct irq_host *h, unsigned int virq, static int iic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
switch (hw & IIC_IRQ_TYPE_MASK) { switch (hw & IIC_IRQ_TYPE_MASK) {
@ -245,7 +245,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static int iic_host_xlate(struct irq_host *h, struct device_node *ct, static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags) irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@ -285,7 +285,7 @@ static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
return 0; return 0;
} }
static struct irq_host_ops iic_host_ops = { static const struct irq_domain_ops iic_host_ops = {
.match = iic_host_match, .match = iic_host_match,
.map = iic_host_map, .map = iic_host_map,
.xlate = iic_host_xlate, .xlate = iic_host_xlate,
@ -378,8 +378,8 @@ static int __init setup_iic(void)
void __init iic_init_IRQ(void) void __init iic_init_IRQ(void)
{ {
/* Setup an irq host data structure */ /* Setup an irq host data structure */
iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT, iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
&iic_host_ops, IIC_IRQ_INVALID); NULL);
BUG_ON(iic_host == NULL); BUG_ON(iic_host == NULL);
irq_set_default_host(iic_host); irq_set_default_host(iic_host);

View File

@ -62,7 +62,7 @@ enum {
#define SPIDER_IRQ_INVALID 63 #define SPIDER_IRQ_INVALID 63
struct spider_pic { struct spider_pic {
struct irq_host *host; struct irq_domain *host;
void __iomem *regs; void __iomem *regs;
unsigned int node_id; unsigned int node_id;
}; };
@ -168,7 +168,7 @@ static struct irq_chip spider_pic = {
.irq_set_type = spider_set_irq_type, .irq_set_type = spider_set_irq_type,
}; };
static int spider_host_map(struct irq_host *h, unsigned int virq, static int spider_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
irq_set_chip_data(virq, h->host_data); irq_set_chip_data(virq, h->host_data);
@ -180,7 +180,7 @@ static int spider_host_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static int spider_host_xlate(struct irq_host *h, struct device_node *ct, static int spider_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags) irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@ -194,7 +194,7 @@ static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
return 0; return 0;
} }
static struct irq_host_ops spider_host_ops = { static const struct irq_domain_ops spider_host_ops = {
.map = spider_host_map, .map = spider_host_map,
.xlate = spider_host_xlate, .xlate = spider_host_xlate,
}; };
@ -299,12 +299,10 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
panic("spider_pic: can't map registers !"); panic("spider_pic: can't map registers !");
/* Allocate a host */ /* Allocate a host */
pic->host = irq_alloc_host(of_node, IRQ_HOST_MAP_LINEAR, pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT,
SPIDER_SRC_COUNT, &spider_host_ops, &spider_host_ops, pic);
SPIDER_IRQ_INVALID);
if (pic->host == NULL) if (pic->host == NULL)
panic("spider_pic: can't allocate irq host !"); panic("spider_pic: can't allocate irq host !");
pic->host->host_data = pic;
/* Go through all sources and disable them */ /* Go through all sources and disable them */
for (i = 0; i < SPIDER_SRC_COUNT; i++) { for (i = 0; i < SPIDER_SRC_COUNT; i++) {

View File

@ -96,9 +96,9 @@ static struct irq_chip flipper_pic = {
* *
*/ */
static struct irq_host *flipper_irq_host; static struct irq_domain *flipper_irq_host;
static int flipper_pic_map(struct irq_host *h, unsigned int virq, static int flipper_pic_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq) irq_hw_number_t hwirq)
{ {
irq_set_chip_data(virq, h->host_data); irq_set_chip_data(virq, h->host_data);
@ -107,13 +107,13 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static int flipper_pic_match(struct irq_host *h, struct device_node *np) static int flipper_pic_match(struct irq_domain *h, struct device_node *np)
{ {
return 1; return 1;
} }
static struct irq_host_ops flipper_irq_host_ops = { static const struct irq_domain_ops flipper_irq_domain_ops = {
.map = flipper_pic_map, .map = flipper_pic_map,
.match = flipper_pic_match, .match = flipper_pic_match,
}; };
@ -130,10 +130,10 @@ static void __flipper_quiesce(void __iomem *io_base)
out_be32(io_base + FLIPPER_ICR, 0xffffffff); out_be32(io_base + FLIPPER_ICR, 0xffffffff);
} }
struct irq_host * __init flipper_pic_init(struct device_node *np) struct irq_domain * __init flipper_pic_init(struct device_node *np)
{ {
struct device_node *pi; struct device_node *pi;
struct irq_host *irq_host = NULL; struct irq_domain *irq_domain = NULL;
struct resource res; struct resource res;
void __iomem *io_base; void __iomem *io_base;
int retval; int retval;
@ -159,17 +159,15 @@ struct irq_host * __init flipper_pic_init(struct device_node *np)
__flipper_quiesce(io_base); __flipper_quiesce(io_base);
irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS, irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS,
&flipper_irq_host_ops, -1); &flipper_irq_domain_ops, io_base);
if (!irq_host) { if (!irq_domain) {
pr_err("failed to allocate irq_host\n"); pr_err("failed to allocate irq_domain\n");
return NULL; return NULL;
} }
irq_host->host_data = io_base;
out: out:
return irq_host; return irq_domain;
} }
unsigned int flipper_pic_get_irq(void) unsigned int flipper_pic_get_irq(void)

View File

@ -89,9 +89,9 @@ static struct irq_chip hlwd_pic = {
* *
*/ */
static struct irq_host *hlwd_irq_host; static struct irq_domain *hlwd_irq_host;
static int hlwd_pic_map(struct irq_host *h, unsigned int virq, static int hlwd_pic_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq) irq_hw_number_t hwirq)
{ {
irq_set_chip_data(virq, h->host_data); irq_set_chip_data(virq, h->host_data);
@ -100,11 +100,11 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static struct irq_host_ops hlwd_irq_host_ops = { static const struct irq_domain_ops hlwd_irq_domain_ops = {
.map = hlwd_pic_map, .map = hlwd_pic_map,
}; };
static unsigned int __hlwd_pic_get_irq(struct irq_host *h) static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
{ {
void __iomem *io_base = h->host_data; void __iomem *io_base = h->host_data;
int irq; int irq;
@ -123,14 +123,14 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
struct irq_desc *desc) struct irq_desc *desc)
{ {
struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_host *irq_host = irq_get_handler_data(cascade_virq); struct irq_domain *irq_domain = irq_get_handler_data(cascade_virq);
unsigned int virq; unsigned int virq;
raw_spin_lock(&desc->lock); raw_spin_lock(&desc->lock);
chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */ chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
virq = __hlwd_pic_get_irq(irq_host); virq = __hlwd_pic_get_irq(irq_domain);
if (virq != NO_IRQ) if (virq != NO_IRQ)
generic_handle_irq(virq); generic_handle_irq(virq);
else else
@ -155,9 +155,9 @@ static void __hlwd_quiesce(void __iomem *io_base)
out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff); out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff);
} }
struct irq_host *hlwd_pic_init(struct device_node *np) struct irq_domain *hlwd_pic_init(struct device_node *np)
{ {
struct irq_host *irq_host; struct irq_domain *irq_domain;
struct resource res; struct resource res;
void __iomem *io_base; void __iomem *io_base;
int retval; int retval;
@ -177,15 +177,14 @@ struct irq_host *hlwd_pic_init(struct device_node *np)
__hlwd_quiesce(io_base); __hlwd_quiesce(io_base);
irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, HLWD_NR_IRQS, irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS,
&hlwd_irq_host_ops, -1); &hlwd_irq_domain_ops, io_base);
if (!irq_host) { if (!irq_domain) {
pr_err("failed to allocate irq_host\n"); pr_err("failed to allocate irq_domain\n");
return NULL; return NULL;
} }
irq_host->host_data = io_base;
return irq_host; return irq_domain;
} }
unsigned int hlwd_pic_get_irq(void) unsigned int hlwd_pic_get_irq(void)
@ -200,7 +199,7 @@ unsigned int hlwd_pic_get_irq(void)
void hlwd_pic_probe(void) void hlwd_pic_probe(void)
{ {
struct irq_host *host; struct irq_domain *host;
struct device_node *np; struct device_node *np;
const u32 *interrupts; const u32 *interrupts;
int cascade_virq; int cascade_virq;

View File

@ -342,7 +342,7 @@ unsigned int iSeries_get_irq(void)
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, static int iseries_irq_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
@ -350,13 +350,13 @@ static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
return 0; return 0;
} }
static int iseries_irq_host_match(struct irq_host *h, struct device_node *np) static int iseries_irq_host_match(struct irq_domain *h, struct device_node *np)
{ {
/* Match all */ /* Match all */
return 1; return 1;
} }
static struct irq_host_ops iseries_irq_host_ops = { static const struct irq_domain_ops iseries_irq_domain_ops = {
.map = iseries_irq_host_map, .map = iseries_irq_host_map,
.match = iseries_irq_host_match, .match = iseries_irq_host_match,
}; };
@ -368,7 +368,7 @@ static struct irq_host_ops iseries_irq_host_ops = {
void __init iSeries_init_IRQ(void) void __init iSeries_init_IRQ(void)
{ {
/* Register PCI event handler and open an event path */ /* Register PCI event handler and open an event path */
struct irq_host *host; struct irq_domain *host;
int ret; int ret;
/* /*
@ -380,8 +380,7 @@ void __init iSeries_init_IRQ(void)
/* Create irq host. No need for a revmap since HV will give us /* Create irq host. No need for a revmap since HV will give us
* back our virtual irq number * back our virtual irq number
*/ */
host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, host = irq_domain_add_nomap(NULL, &iseries_irq_domain_ops, NULL);
&iseries_irq_host_ops, 0);
BUG_ON(host == NULL); BUG_ON(host == NULL);
irq_set_default_host(host); irq_set_default_host(host);

Some files were not shown because too many files have changed in this diff Show More