Merge branch 'ib/4.10-sparse-keymap-managed' into next
This brings in version of sparse keymap code that uses managed memory.
This commit is contained in:
commit
7b179e251b
5
CREDITS
5
CREDITS
|
@ -2478,12 +2478,11 @@ S: D-90453 Nuernberg
|
||||||
S: Germany
|
S: Germany
|
||||||
|
|
||||||
N: Arnaldo Carvalho de Melo
|
N: Arnaldo Carvalho de Melo
|
||||||
E: acme@ghostprotocols.net
|
E: acme@kernel.org
|
||||||
E: arnaldo.melo@gmail.com
|
E: arnaldo.melo@gmail.com
|
||||||
E: acme@redhat.com
|
E: acme@redhat.com
|
||||||
W: http://oops.ghostprotocols.net:81/blog/
|
|
||||||
P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01
|
P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01
|
||||||
D: IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
|
D: tools/, IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
|
||||||
S: Brazil
|
S: Brazil
|
||||||
|
|
||||||
N: Karsten Merker
|
N: Karsten Merker
|
||||||
|
|
|
@ -15,6 +15,9 @@ Properties:
|
||||||
Second cell specifies the irq distribution mode to cores
|
Second cell specifies the irq distribution mode to cores
|
||||||
0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
|
0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
|
||||||
|
|
||||||
|
The second cell in interrupts property is deprecated and may be ignored by
|
||||||
|
the kernel.
|
||||||
|
|
||||||
intc accessed via the special ARC AUX register interface, hence "reg" property
|
intc accessed via the special ARC AUX register interface, hence "reg" property
|
||||||
is not specified.
|
is not specified.
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
|
||||||
* Ethernet controller node
|
* Ethernet controller node
|
||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: Should be "mediatek,mt7623-eth"
|
- compatible: Should be "mediatek,mt2701-eth"
|
||||||
- reg: Address and length of the register set for the device
|
- reg: Address and length of the register set for the device
|
||||||
- interrupts: Should contain the three frame engines interrupts in numeric
|
- interrupts: Should contain the three frame engines interrupts in numeric
|
||||||
order. These are fe_int0, fe_int1 and fe_int2.
|
order. These are fe_int0, fe_int1 and fe_int2.
|
||||||
|
|
|
@ -19,8 +19,9 @@ Optional Properties:
|
||||||
specifications. If neither of these are specified, the default is to
|
specifications. If neither of these are specified, the default is to
|
||||||
assume clause 22.
|
assume clause 22.
|
||||||
|
|
||||||
If the phy's identifier is known then the list may contain an entry
|
If the PHY reports an incorrect ID (or none at all) then the
|
||||||
of the form: "ethernet-phy-idAAAA.BBBB" where
|
"compatible" list may contain an entry with the correct PHY ID in the
|
||||||
|
form: "ethernet-phy-idAAAA.BBBB" where
|
||||||
AAAA - The value of the 16 bit Phy Identifier 1 register as
|
AAAA - The value of the 16 bit Phy Identifier 1 register as
|
||||||
4 hex digits. This is the chip vendor OUI bits 3:18
|
4 hex digits. This is the chip vendor OUI bits 3:18
|
||||||
BBBB - The value of the 16 bit Phy Identifier 2 register as
|
BBBB - The value of the 16 bit Phy Identifier 2 register as
|
||||||
|
|
|
@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
|
||||||
snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
|
snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
|
||||||
It's slow but very precise.
|
It's slow but very precise.
|
||||||
|
|
||||||
Table 1-2: Contents of the status files (as of 4.1)
|
Table 1-2: Contents of the status files (as of 4.8)
|
||||||
..............................................................................
|
..............................................................................
|
||||||
Field Content
|
Field Content
|
||||||
Name filename of the executable
|
Name filename of the executable
|
||||||
|
Umask file mode creation mask
|
||||||
State state (R is running, S is sleeping, D is sleeping
|
State state (R is running, S is sleeping, D is sleeping
|
||||||
in an uninterruptible wait, Z is zombie,
|
in an uninterruptible wait, Z is zombie,
|
||||||
T is traced or stopped)
|
T is traced or stopped)
|
||||||
|
@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
|
||||||
TracerPid PID of process tracing this process (0 if not)
|
TracerPid PID of process tracing this process (0 if not)
|
||||||
Uid Real, effective, saved set, and file system UIDs
|
Uid Real, effective, saved set, and file system UIDs
|
||||||
Gid Real, effective, saved set, and file system GIDs
|
Gid Real, effective, saved set, and file system GIDs
|
||||||
Umask file mode creation mask
|
|
||||||
FDSize number of file descriptor slots currently allocated
|
FDSize number of file descriptor slots currently allocated
|
||||||
Groups supplementary group list
|
Groups supplementary group list
|
||||||
NStgid descendant namespace thread group ID hierarchy
|
NStgid descendant namespace thread group ID hierarchy
|
||||||
|
@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
|
||||||
VmPeak peak virtual memory size
|
VmPeak peak virtual memory size
|
||||||
VmSize total program size
|
VmSize total program size
|
||||||
VmLck locked memory size
|
VmLck locked memory size
|
||||||
|
VmPin pinned memory size
|
||||||
VmHWM peak resident set size ("high water mark")
|
VmHWM peak resident set size ("high water mark")
|
||||||
VmRSS size of memory portions. It contains the three
|
VmRSS size of memory portions. It contains the three
|
||||||
following parts (VmRSS = RssAnon + RssFile + RssShmem)
|
following parts (VmRSS = RssAnon + RssFile + RssShmem)
|
||||||
|
|
|
@ -33,11 +33,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
Closes the cec device. Resources associated with the file descriptor are
|
Closes the cec device. Resources associated with the file descriptor are
|
||||||
freed. The device configuration remain unchanged.
|
freed. The device configuration remain unchanged.
|
||||||
|
|
||||||
|
|
|
@ -39,11 +39,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
The :c:func:`ioctl()` function manipulates cec device parameters. The
|
The :c:func:`ioctl()` function manipulates cec device parameters. The
|
||||||
argument ``fd`` must be an open file descriptor.
|
argument ``fd`` must be an open file descriptor.
|
||||||
|
|
||||||
|
|
|
@ -46,11 +46,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
To open a cec device applications call :c:func:`open()` with the
|
To open a cec device applications call :c:func:`open()` with the
|
||||||
desired device name. The function has no side effects; the device
|
desired device name. The function has no side effects; the device
|
||||||
configuration remain unchanged.
|
configuration remain unchanged.
|
||||||
|
|
|
@ -39,11 +39,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
With the :c:func:`poll()` function applications can wait for CEC
|
With the :c:func:`poll()` function applications can wait for CEC
|
||||||
events.
|
events.
|
||||||
|
|
||||||
|
|
|
@ -3,11 +3,6 @@
|
||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
HDMI connectors provide a single pin for use by the Consumer Electronics
|
HDMI connectors provide a single pin for use by the Consumer Electronics
|
||||||
Control protocol. This protocol allows different devices connected by an
|
Control protocol. This protocol allows different devices connected by an
|
||||||
HDMI cable to communicate. The protocol for CEC version 1.4 is defined
|
HDMI cable to communicate. The protocol for CEC version 1.4 is defined
|
||||||
|
@ -31,3 +26,15 @@ control just the CEC pin.
|
||||||
Drivers that support CEC will create a CEC device node (/dev/cecX) to
|
Drivers that support CEC will create a CEC device node (/dev/cecX) to
|
||||||
give userspace access to the CEC adapter. The
|
give userspace access to the CEC adapter. The
|
||||||
:ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
|
:ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
|
||||||
|
|
||||||
|
In order to check the support and test it, it is suggested to download
|
||||||
|
the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
|
||||||
|
provides three tools to handle CEC:
|
||||||
|
|
||||||
|
- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
|
||||||
|
and monitor CEC messages.
|
||||||
|
|
||||||
|
- cec-compliance: does a CEC compliance test of a remote CEC device to
|
||||||
|
determine how compliant the CEC implementation is.
|
||||||
|
|
||||||
|
- cec-follower: emulates a CEC follower.
|
||||||
|
|
|
@ -29,11 +29,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
|
All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
|
||||||
device information, applications call the ioctl with a pointer to a
|
device information, applications call the ioctl with a pointer to a
|
||||||
struct :c:type:`cec_caps`. The driver fills the structure and
|
struct :c:type:`cec_caps`. The driver fills the structure and
|
||||||
|
|
|
@ -35,11 +35,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
To query the current CEC logical addresses, applications call
|
To query the current CEC logical addresses, applications call
|
||||||
:ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
|
:ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
|
||||||
struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
|
struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
|
||||||
|
|
|
@ -35,11 +35,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
To query the current physical address applications call
|
To query the current physical address applications call
|
||||||
:ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
|
:ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
|
||||||
driver stores the physical address.
|
driver stores the physical address.
|
||||||
|
|
|
@ -30,11 +30,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
CEC devices can send asynchronous events. These can be retrieved by
|
CEC devices can send asynchronous events. These can be retrieved by
|
||||||
calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
|
calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
|
||||||
non-blocking mode and no event is pending, then it will return -1 and
|
non-blocking mode and no event is pending, then it will return -1 and
|
||||||
|
|
|
@ -31,11 +31,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
|
By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
|
||||||
applications from stepping on each others toes it must be possible to
|
applications from stepping on each others toes it must be possible to
|
||||||
obtain exclusive access to the CEC adapter. This ioctl sets the
|
obtain exclusive access to the CEC adapter. This ioctl sets the
|
||||||
|
|
|
@ -34,11 +34,6 @@ Arguments
|
||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
This documents the proposed CEC API. This API is not yet finalized
|
|
||||||
and is currently only available as a staging kernel module.
|
|
||||||
|
|
||||||
To receive a CEC message the application has to fill in the
|
To receive a CEC message the application has to fill in the
|
||||||
``timeout`` field of struct :c:type:`cec_msg` and pass it to
|
``timeout`` field of struct :c:type:`cec_msg` and pass it to
|
||||||
:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
|
:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
|
||||||
|
|
|
@ -211,7 +211,13 @@ Colorspace sRGB (V4L2_COLORSPACE_SRGB)
|
||||||
The :ref:`srgb` standard defines the colorspace used by most webcams
|
The :ref:`srgb` standard defines the colorspace used by most webcams
|
||||||
and computer graphics. The default transfer function is
|
and computer graphics. The default transfer function is
|
||||||
``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is
|
``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is
|
||||||
``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full range.
|
``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited range.
|
||||||
|
|
||||||
|
Note that the :ref:`sycc` standard specifies full range quantization,
|
||||||
|
however all current capture hardware supported by the kernel convert
|
||||||
|
R'G'B' to limited range Y'CbCr. So choosing full range as the default
|
||||||
|
would break how applications interpret the quantization range.
|
||||||
|
|
||||||
The chromaticities of the primary colors and the white reference are:
|
The chromaticities of the primary colors and the white reference are:
|
||||||
|
|
||||||
|
|
||||||
|
@ -276,7 +282,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding as defined by :ref:`sycc`:
|
||||||
|
|
||||||
Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
|
Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
|
||||||
[-0.5…0.5]. This transform is identical to one defined in SMPTE
|
[-0.5…0.5]. This transform is identical to one defined in SMPTE
|
||||||
170M/BT.601. The Y'CbCr quantization is full range.
|
170M/BT.601. The Y'CbCr quantization is limited range.
|
||||||
|
|
||||||
|
|
||||||
.. _col-adobergb:
|
.. _col-adobergb:
|
||||||
|
@ -288,10 +294,15 @@ The :ref:`adobergb` standard defines the colorspace used by computer
|
||||||
graphics that use the AdobeRGB colorspace. This is also known as the
|
graphics that use the AdobeRGB colorspace. This is also known as the
|
||||||
:ref:`oprgb` standard. The default transfer function is
|
:ref:`oprgb` standard. The default transfer function is
|
||||||
``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is
|
``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is
|
||||||
``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full
|
``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited
|
||||||
range. The chromaticities of the primary colors and the white reference
|
range.
|
||||||
are:
|
|
||||||
|
|
||||||
|
Note that the :ref:`oprgb` standard specifies full range quantization,
|
||||||
|
however all current capture hardware supported by the kernel convert
|
||||||
|
R'G'B' to limited range Y'CbCr. So choosing full range as the default
|
||||||
|
would break how applications interpret the quantization range.
|
||||||
|
|
||||||
|
The chromaticities of the primary colors and the white reference are:
|
||||||
|
|
||||||
|
|
||||||
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
|
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
|
||||||
|
@ -344,7 +355,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding:
|
||||||
|
|
||||||
Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
|
Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
|
||||||
[-0.5…0.5]. This transform is identical to one defined in SMPTE
|
[-0.5…0.5]. This transform is identical to one defined in SMPTE
|
||||||
170M/BT.601. The Y'CbCr quantization is full range.
|
170M/BT.601. The Y'CbCr quantization is limited range.
|
||||||
|
|
||||||
|
|
||||||
.. _col-bt2020:
|
.. _col-bt2020:
|
||||||
|
|
|
@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
|
||||||
The default suspend mode (ie. the one to be used without writing anything into
|
The default suspend mode (ie. the one to be used without writing anything into
|
||||||
/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
|
/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
|
||||||
"s2idle", but it can be overridden by the value of the "mem_sleep_default"
|
"s2idle", but it can be overridden by the value of the "mem_sleep_default"
|
||||||
parameter in the kernel command line. On some ACPI-based systems, depending on
|
parameter in the kernel command line.
|
||||||
the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
|
|
||||||
is supported.
|
|
||||||
|
|
||||||
The properties of all of the sleep states are described below.
|
The properties of all of the sleep states are described below.
|
||||||
|
|
||||||
|
|
77
MAINTAINERS
77
MAINTAINERS
|
@ -877,8 +877,8 @@ S: Odd fixes
|
||||||
F: drivers/hwmon/applesmc.c
|
F: drivers/hwmon/applesmc.c
|
||||||
|
|
||||||
APPLETALK NETWORK LAYER
|
APPLETALK NETWORK LAYER
|
||||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Odd fixes
|
||||||
F: drivers/net/appletalk/
|
F: drivers/net/appletalk/
|
||||||
F: net/appletalk/
|
F: net/appletalk/
|
||||||
|
|
||||||
|
@ -1091,7 +1091,7 @@ F: arch/arm/boot/dts/aspeed-*
|
||||||
F: drivers/*/*aspeed*
|
F: drivers/*/*aspeed*
|
||||||
|
|
||||||
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
|
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
|
||||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
|
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
|
||||||
M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
|
M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
|
@ -1773,7 +1773,7 @@ F: drivers/soc/renesas/
|
||||||
F: include/linux/soc/renesas/
|
F: include/linux/soc/renesas/
|
||||||
|
|
||||||
ARM/SOCFPGA ARCHITECTURE
|
ARM/SOCFPGA ARCHITECTURE
|
||||||
M: Dinh Nguyen <dinguyen@opensource.altera.com>
|
M: Dinh Nguyen <dinguyen@kernel.org>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-socfpga/
|
F: arch/arm/mach-socfpga/
|
||||||
F: arch/arm/boot/dts/socfpga*
|
F: arch/arm/boot/dts/socfpga*
|
||||||
|
@ -1783,7 +1783,7 @@ W: http://www.rocketboards.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
|
||||||
|
|
||||||
ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
|
ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
|
||||||
M: Dinh Nguyen <dinguyen@opensource.altera.com>
|
M: Dinh Nguyen <dinguyen@kernel.org>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/clk/socfpga/
|
F: drivers/clk/socfpga/
|
||||||
|
|
||||||
|
@ -2175,56 +2175,56 @@ F: include/linux/atm*
|
||||||
F: include/uapi/linux/atm*
|
F: include/uapi/linux/atm*
|
||||||
|
|
||||||
ATMEL AT91 / AT32 MCI DRIVER
|
ATMEL AT91 / AT32 MCI DRIVER
|
||||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/mmc/host/atmel-mci.c
|
F: drivers/mmc/host/atmel-mci.c
|
||||||
|
|
||||||
ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
|
ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
|
||||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/power/reset/at91-sama5d2_shdwc.c
|
F: drivers/power/reset/at91-sama5d2_shdwc.c
|
||||||
|
|
||||||
ATMEL SAMA5D2 ADC DRIVER
|
ATMEL SAMA5D2 ADC DRIVER
|
||||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||||
L: linux-iio@vger.kernel.org
|
L: linux-iio@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/iio/adc/at91-sama5d2_adc.c
|
F: drivers/iio/adc/at91-sama5d2_adc.c
|
||||||
|
|
||||||
ATMEL Audio ALSA driver
|
ATMEL Audio ALSA driver
|
||||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||||
S: Supported
|
S: Supported
|
||||||
F: sound/soc/atmel
|
F: sound/soc/atmel
|
||||||
|
|
||||||
ATMEL XDMA DRIVER
|
ATMEL XDMA DRIVER
|
||||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org
|
L: linux-arm-kernel@lists.infradead.org
|
||||||
L: dmaengine@vger.kernel.org
|
L: dmaengine@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/dma/at_xdmac.c
|
F: drivers/dma/at_xdmac.c
|
||||||
|
|
||||||
ATMEL I2C DRIVER
|
ATMEL I2C DRIVER
|
||||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||||
L: linux-i2c@vger.kernel.org
|
L: linux-i2c@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/i2c/busses/i2c-at91.c
|
F: drivers/i2c/busses/i2c-at91.c
|
||||||
|
|
||||||
ATMEL ISI DRIVER
|
ATMEL ISI DRIVER
|
||||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/media/platform/soc_camera/atmel-isi.c
|
F: drivers/media/platform/soc_camera/atmel-isi.c
|
||||||
F: include/media/atmel-isi.h
|
F: include/media/atmel-isi.h
|
||||||
|
|
||||||
ATMEL LCDFB DRIVER
|
ATMEL LCDFB DRIVER
|
||||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
L: linux-fbdev@vger.kernel.org
|
L: linux-fbdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/video/fbdev/atmel_lcdfb.c
|
F: drivers/video/fbdev/atmel_lcdfb.c
|
||||||
F: include/video/atmel_lcdc.h
|
F: include/video/atmel_lcdc.h
|
||||||
|
|
||||||
ATMEL MACB ETHERNET DRIVER
|
ATMEL MACB ETHERNET DRIVER
|
||||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/cadence/
|
F: drivers/net/ethernet/cadence/
|
||||||
|
|
||||||
|
@ -2236,32 +2236,32 @@ S: Supported
|
||||||
F: drivers/mtd/nand/atmel_nand*
|
F: drivers/mtd/nand/atmel_nand*
|
||||||
|
|
||||||
ATMEL SDMMC DRIVER
|
ATMEL SDMMC DRIVER
|
||||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||||
L: linux-mmc@vger.kernel.org
|
L: linux-mmc@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/mmc/host/sdhci-of-at91.c
|
F: drivers/mmc/host/sdhci-of-at91.c
|
||||||
|
|
||||||
ATMEL SPI DRIVER
|
ATMEL SPI DRIVER
|
||||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/spi/spi-atmel.*
|
F: drivers/spi/spi-atmel.*
|
||||||
|
|
||||||
ATMEL SSC DRIVER
|
ATMEL SSC DRIVER
|
||||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/misc/atmel-ssc.c
|
F: drivers/misc/atmel-ssc.c
|
||||||
F: include/linux/atmel-ssc.h
|
F: include/linux/atmel-ssc.h
|
||||||
|
|
||||||
ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
|
ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
|
||||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/misc/atmel_tclib.c
|
F: drivers/misc/atmel_tclib.c
|
||||||
F: drivers/clocksource/tcb_clksrc.c
|
F: drivers/clocksource/tcb_clksrc.c
|
||||||
|
|
||||||
ATMEL USBA UDC DRIVER
|
ATMEL USBA UDC DRIVER
|
||||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/usb/gadget/udc/atmel_usba_udc.*
|
F: drivers/usb/gadget/udc/atmel_usba_udc.*
|
||||||
|
@ -3567,7 +3567,7 @@ F: drivers/infiniband/hw/cxgb3/
|
||||||
F: include/uapi/rdma/cxgb3-abi.h
|
F: include/uapi/rdma/cxgb3-abi.h
|
||||||
|
|
||||||
CXGB4 ETHERNET DRIVER (CXGB4)
|
CXGB4 ETHERNET DRIVER (CXGB4)
|
||||||
M: Hariprasad S <hariprasad@chelsio.com>
|
M: Ganesh Goudar <ganeshgr@chelsio.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: http://www.chelsio.com
|
W: http://www.chelsio.com
|
||||||
S: Supported
|
S: Supported
|
||||||
|
@ -4100,12 +4100,18 @@ F: drivers/gpu/drm/bridge/
|
||||||
|
|
||||||
DRM DRIVER FOR BOCHS VIRTUAL GPU
|
DRM DRIVER FOR BOCHS VIRTUAL GPU
|
||||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||||
S: Odd Fixes
|
L: virtualization@lists.linux-foundation.org
|
||||||
|
T: git git://git.kraxel.org/linux drm-qemu
|
||||||
|
S: Maintained
|
||||||
F: drivers/gpu/drm/bochs/
|
F: drivers/gpu/drm/bochs/
|
||||||
|
|
||||||
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
|
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
|
||||||
M: Dave Airlie <airlied@redhat.com>
|
M: Dave Airlie <airlied@redhat.com>
|
||||||
S: Odd Fixes
|
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||||
|
L: virtualization@lists.linux-foundation.org
|
||||||
|
T: git git://git.kraxel.org/linux drm-qemu
|
||||||
|
S: Obsolete
|
||||||
|
W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
|
||||||
F: drivers/gpu/drm/cirrus/
|
F: drivers/gpu/drm/cirrus/
|
||||||
|
|
||||||
RADEON and AMDGPU DRM DRIVERS
|
RADEON and AMDGPU DRM DRIVERS
|
||||||
|
@ -4147,7 +4153,7 @@ F: Documentation/gpu/i915.rst
|
||||||
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
|
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
|
||||||
M: Zhenyu Wang <zhenyuw@linux.intel.com>
|
M: Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||||
M: Zhi Wang <zhi.a.wang@intel.com>
|
M: Zhi Wang <zhi.a.wang@intel.com>
|
||||||
L: igvt-g-dev@lists.01.org
|
L: intel-gvt-dev@lists.freedesktop.org
|
||||||
L: intel-gfx@lists.freedesktop.org
|
L: intel-gfx@lists.freedesktop.org
|
||||||
W: https://01.org/igvt-g
|
W: https://01.org/igvt-g
|
||||||
T: git https://github.com/01org/gvt-linux.git
|
T: git https://github.com/01org/gvt-linux.git
|
||||||
|
@ -4298,7 +4304,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt
|
||||||
|
|
||||||
DRM DRIVER FOR QXL VIRTUAL GPU
|
DRM DRIVER FOR QXL VIRTUAL GPU
|
||||||
M: Dave Airlie <airlied@redhat.com>
|
M: Dave Airlie <airlied@redhat.com>
|
||||||
S: Odd Fixes
|
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||||
|
L: virtualization@lists.linux-foundation.org
|
||||||
|
T: git git://git.kraxel.org/linux drm-qemu
|
||||||
|
S: Maintained
|
||||||
F: drivers/gpu/drm/qxl/
|
F: drivers/gpu/drm/qxl/
|
||||||
F: include/uapi/drm/qxl_drm.h
|
F: include/uapi/drm/qxl_drm.h
|
||||||
|
|
||||||
|
@ -6718,9 +6727,8 @@ S: Odd Fixes
|
||||||
F: drivers/tty/ipwireless/
|
F: drivers/tty/ipwireless/
|
||||||
|
|
||||||
IPX NETWORK LAYER
|
IPX NETWORK LAYER
|
||||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Odd fixes
|
||||||
F: include/net/ipx.h
|
F: include/net/ipx.h
|
||||||
F: include/uapi/linux/ipx.h
|
F: include/uapi/linux/ipx.h
|
||||||
F: net/ipx/
|
F: net/ipx/
|
||||||
|
@ -7492,8 +7500,8 @@ S: Maintained
|
||||||
F: drivers/misc/lkdtm*
|
F: drivers/misc/lkdtm*
|
||||||
|
|
||||||
LLC (802.2)
|
LLC (802.2)
|
||||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Odd fixes
|
||||||
F: include/linux/llc.h
|
F: include/linux/llc.h
|
||||||
F: include/uapi/linux/llc.h
|
F: include/uapi/linux/llc.h
|
||||||
F: include/net/llc*
|
F: include/net/llc*
|
||||||
|
@ -9727,7 +9735,7 @@ S: Maintained
|
||||||
F: drivers/pinctrl/pinctrl-at91.*
|
F: drivers/pinctrl/pinctrl-at91.*
|
||||||
|
|
||||||
PIN CONTROLLER - ATMEL AT91 PIO4
|
PIN CONTROLLER - ATMEL AT91 PIO4
|
||||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
L: linux-gpio@vger.kernel.org
|
L: linux-gpio@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
@ -10186,7 +10194,6 @@ F: drivers/media/tuners/qt1010*
|
||||||
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
|
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
|
||||||
M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
|
M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
|
||||||
L: linux-wireless@vger.kernel.org
|
L: linux-wireless@vger.kernel.org
|
||||||
L: ath9k-devel@lists.ath9k.org
|
|
||||||
W: http://wireless.kernel.org/en/users/Drivers/ath9k
|
W: http://wireless.kernel.org/en/users/Drivers/ath9k
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/wireless/ath/ath9k/
|
F: drivers/net/wireless/ath/ath9k/
|
||||||
|
@ -13057,7 +13064,7 @@ F: drivers/input/serio/userio.c
|
||||||
F: include/uapi/linux/userio.h
|
F: include/uapi/linux/userio.h
|
||||||
|
|
||||||
VIRTIO CONSOLE DRIVER
|
VIRTIO CONSOLE DRIVER
|
||||||
M: Amit Shah <amit.shah@redhat.com>
|
M: Amit Shah <amit@kernel.org>
|
||||||
L: virtualization@lists.linux-foundation.org
|
L: virtualization@lists.linux-foundation.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/char/virtio_console.c
|
F: drivers/char/virtio_console.c
|
||||||
|
@ -13092,6 +13099,7 @@ M: David Airlie <airlied@linux.ie>
|
||||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
L: virtualization@lists.linux-foundation.org
|
L: virtualization@lists.linux-foundation.org
|
||||||
|
T: git git://git.kraxel.org/linux drm-qemu
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/gpu/drm/virtio/
|
F: drivers/gpu/drm/virtio/
|
||||||
F: include/uapi/linux/virtio_gpu.h
|
F: include/uapi/linux/virtio_gpu.h
|
||||||
|
@ -13364,10 +13372,8 @@ S: Maintained
|
||||||
F: drivers/input/misc/wistron_btns.c
|
F: drivers/input/misc/wistron_btns.c
|
||||||
|
|
||||||
WL3501 WIRELESS PCMCIA CARD DRIVER
|
WL3501 WIRELESS PCMCIA CARD DRIVER
|
||||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
|
||||||
L: linux-wireless@vger.kernel.org
|
L: linux-wireless@vger.kernel.org
|
||||||
W: http://oops.ghostprotocols.net:81/blog
|
S: Odd fixes
|
||||||
S: Maintained
|
|
||||||
F: drivers/net/wireless/wl3501*
|
F: drivers/net/wireless/wl3501*
|
||||||
|
|
||||||
WOLFSON MICROELECTRONICS DRIVERS
|
WOLFSON MICROELECTRONICS DRIVERS
|
||||||
|
@ -13443,6 +13449,7 @@ F: arch/x86/
|
||||||
|
|
||||||
X86 PLATFORM DRIVERS
|
X86 PLATFORM DRIVERS
|
||||||
M: Darren Hart <dvhart@infradead.org>
|
M: Darren Hart <dvhart@infradead.org>
|
||||||
|
M: Andy Shevchenko <andy@infradead.org>
|
||||||
L: platform-driver-x86@vger.kernel.org
|
L: platform-driver-x86@vger.kernel.org
|
||||||
T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
|
T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -13614,6 +13621,7 @@ F: drivers/net/hamradio/z8530.h
|
||||||
|
|
||||||
ZBUD COMPRESSED PAGE ALLOCATOR
|
ZBUD COMPRESSED PAGE ALLOCATOR
|
||||||
M: Seth Jennings <sjenning@redhat.com>
|
M: Seth Jennings <sjenning@redhat.com>
|
||||||
|
M: Dan Streetman <ddstreet@ieee.org>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: mm/zbud.c
|
F: mm/zbud.c
|
||||||
|
@ -13669,6 +13677,7 @@ F: Documentation/vm/zsmalloc.txt
|
||||||
|
|
||||||
ZSWAP COMPRESSED SWAP CACHING
|
ZSWAP COMPRESSED SWAP CACHING
|
||||||
M: Seth Jennings <sjenning@redhat.com>
|
M: Seth Jennings <sjenning@redhat.com>
|
||||||
|
M: Dan Streetman <ddstreet@ieee.org>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: mm/zswap.c
|
F: mm/zswap.c
|
||||||
|
|
6
Makefile
6
Makefile
|
@ -1,8 +1,8 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc5
|
EXTRAVERSION =
|
||||||
NAME = Anniversary Edition
|
NAME = Fearless Coyote
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
# To see a list of typical targets execute "make help"
|
# To see a list of typical targets execute "make help"
|
||||||
|
@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
|
||||||
KBUILD_ARFLAGS := $(call ar-option,D)
|
KBUILD_ARFLAGS := $(call ar-option,D)
|
||||||
|
|
||||||
# check for 'asm goto'
|
# check for 'asm goto'
|
||||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
|
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
|
||||||
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
||||||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||||
endif
|
endif
|
||||||
|
|
|
@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
|
||||||
" lp 1f \n"
|
" lp 1f \n"
|
||||||
" nop \n"
|
" nop \n"
|
||||||
"1: \n"
|
"1: \n"
|
||||||
: : "r"(loops));
|
:
|
||||||
|
: "r"(loops)
|
||||||
|
: "lp_count");
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void __bad_udelay(void);
|
extern void __bad_udelay(void);
|
||||||
|
|
|
@ -71,14 +71,14 @@ ENTRY(stext)
|
||||||
GET_CPU_ID r5
|
GET_CPU_ID r5
|
||||||
cmp r5, 0
|
cmp r5, 0
|
||||||
mov.nz r0, r5
|
mov.nz r0, r5
|
||||||
#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
|
bz .Lmaster_proceed
|
||||||
; Non-Master can proceed as system would be booted sufficiently
|
|
||||||
jnz first_lines_of_secondary
|
|
||||||
#else
|
|
||||||
; Non-Masters wait for Master to boot enough and bring them up
|
; Non-Masters wait for Master to boot enough and bring them up
|
||||||
jnz arc_platform_smp_wait_to_boot
|
; when they resume, tail-call to entry point
|
||||||
#endif
|
mov blink, @first_lines_of_secondary
|
||||||
; Master falls thru
|
j arc_platform_smp_wait_to_boot
|
||||||
|
|
||||||
|
.Lmaster_proceed:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
; Clear BSS before updating any globals
|
; Clear BSS before updating any globals
|
||||||
|
|
|
@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void)
|
||||||
READ_BCR(ARC_REG_MCIP_BCR, mp);
|
READ_BCR(ARC_REG_MCIP_BCR, mp);
|
||||||
|
|
||||||
sprintf(smp_cpuinfo_buf,
|
sprintf(smp_cpuinfo_buf,
|
||||||
"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
|
"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
|
||||||
mp.ver, mp.num_cores,
|
mp.ver, mp.num_cores,
|
||||||
IS_AVAIL1(mp.ipi, "IPI "),
|
IS_AVAIL1(mp.ipi, "IPI "),
|
||||||
IS_AVAIL1(mp.idu, "IDU "),
|
IS_AVAIL1(mp.idu, "IDU "),
|
||||||
IS_AVAIL1(mp.llm, "LLM "),
|
|
||||||
IS_AVAIL1(mp.dbg, "DEBUG "),
|
IS_AVAIL1(mp.dbg, "DEBUG "),
|
||||||
IS_AVAIL1(mp.gfrc, "GFRC"));
|
IS_AVAIL1(mp.gfrc, "GFRC"));
|
||||||
|
|
||||||
|
@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
|
||||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static int
|
static int
|
||||||
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
||||||
bool force)
|
bool force)
|
||||||
|
@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
||||||
|
|
||||||
return IRQ_SET_MASK_OK;
|
return IRQ_SET_MASK_OK;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
static void idu_irq_enable(struct irq_data *data)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* By default send all common interrupts to all available online CPUs.
|
||||||
|
* The affinity of common interrupts in IDU must be set manually since
|
||||||
|
* in some cases the kernel will not call irq_set_affinity() by itself:
|
||||||
|
* 1. When the kernel is not configured with support of SMP.
|
||||||
|
* 2. When the kernel is configured with support of SMP but upper
|
||||||
|
* interrupt controllers does not support setting of the affinity
|
||||||
|
* and cannot propagate it to IDU.
|
||||||
|
*/
|
||||||
|
idu_irq_set_affinity(data, cpu_online_mask, false);
|
||||||
|
idu_irq_unmask(data);
|
||||||
|
}
|
||||||
|
|
||||||
static struct irq_chip idu_irq_chip = {
|
static struct irq_chip idu_irq_chip = {
|
||||||
.name = "MCIP IDU Intc",
|
.name = "MCIP IDU Intc",
|
||||||
.irq_mask = idu_irq_mask,
|
.irq_mask = idu_irq_mask,
|
||||||
.irq_unmask = idu_irq_unmask,
|
.irq_unmask = idu_irq_unmask,
|
||||||
|
.irq_enable = idu_irq_enable,
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.irq_set_affinity = idu_irq_set_affinity,
|
.irq_set_affinity = idu_irq_set_affinity,
|
||||||
#endif
|
#endif
|
||||||
|
@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
|
||||||
const u32 *intspec, unsigned int intsize,
|
const u32 *intspec, unsigned int intsize,
|
||||||
irq_hw_number_t *out_hwirq, unsigned int *out_type)
|
irq_hw_number_t *out_hwirq, unsigned int *out_type)
|
||||||
{
|
{
|
||||||
irq_hw_number_t hwirq = *out_hwirq = intspec[0];
|
/*
|
||||||
int distri = intspec[1];
|
* Ignore value of interrupt distribution mode for common interrupts in
|
||||||
unsigned long flags;
|
* IDU which resides in intspec[1] since setting an affinity using value
|
||||||
|
* from Device Tree is deprecated in ARC.
|
||||||
|
*/
|
||||||
|
*out_hwirq = intspec[0];
|
||||||
*out_type = IRQ_TYPE_NONE;
|
*out_type = IRQ_TYPE_NONE;
|
||||||
|
|
||||||
/* XXX: validate distribution scheme again online cpu mask */
|
|
||||||
if (distri == 0) {
|
|
||||||
/* 0 - Round Robin to all cpus, otherwise 1 bit per core */
|
|
||||||
raw_spin_lock_irqsave(&mcip_lock, flags);
|
|
||||||
idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
|
|
||||||
idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
|
|
||||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* DEST based distribution for Level Triggered intr can only
|
|
||||||
* have 1 CPU, so generalize it to always contain 1 cpu
|
|
||||||
*/
|
|
||||||
int cpu = ffs(distri);
|
|
||||||
|
|
||||||
if (cpu != fls(distri))
|
|
||||||
pr_warn("IDU irq %lx distri mode set to cpu %x\n",
|
|
||||||
hwirq, cpu);
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&mcip_lock, flags);
|
|
||||||
idu_set_dest(hwirq, cpu);
|
|
||||||
idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
|
|
||||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||||
*/
|
*/
|
||||||
static volatile int wake_flag;
|
static volatile int wake_flag;
|
||||||
|
|
||||||
|
#ifdef CONFIG_ISA_ARCOMPACT
|
||||||
|
|
||||||
|
#define __boot_read(f) f
|
||||||
|
#define __boot_write(f, v) f = v
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define __boot_read(f) arc_read_uncached_32(&f)
|
||||||
|
#define __boot_write(f, v) arc_write_uncached_32(&f, v)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
|
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
|
||||||
{
|
{
|
||||||
BUG_ON(cpu == 0);
|
BUG_ON(cpu == 0);
|
||||||
wake_flag = cpu;
|
|
||||||
|
__boot_write(wake_flag, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arc_platform_smp_wait_to_boot(int cpu)
|
void arc_platform_smp_wait_to_boot(int cpu)
|
||||||
{
|
{
|
||||||
while (wake_flag != cpu)
|
/* for halt-on-reset, we've waited already */
|
||||||
|
if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
|
||||||
|
return;
|
||||||
|
|
||||||
|
while (__boot_read(wake_flag) != cpu)
|
||||||
;
|
;
|
||||||
|
|
||||||
wake_flag = 0;
|
__boot_write(wake_flag, 0);
|
||||||
__asm__ __volatile__("j @first_lines_of_secondary \n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const char *arc_platform_smp_cpuinfo(void)
|
const char *arc_platform_smp_cpuinfo(void)
|
||||||
{
|
{
|
||||||
return plat_smp_ops.info ? : "";
|
return plat_smp_ops.info ? : "";
|
||||||
|
|
|
@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
||||||
if (state.fault)
|
if (state.fault)
|
||||||
goto fault;
|
goto fault;
|
||||||
|
|
||||||
|
/* clear any remanants of delay slot */
|
||||||
if (delay_mode(regs)) {
|
if (delay_mode(regs)) {
|
||||||
regs->ret = regs->bta;
|
regs->ret = regs->bta & ~1U;
|
||||||
regs->status32 &= ~STATUS_DE_MASK;
|
regs->status32 &= ~STATUS_DE_MASK;
|
||||||
} else {
|
} else {
|
||||||
regs->ret += state.instr_len;
|
regs->ret += state.instr_len;
|
||||||
|
|
|
@ -617,7 +617,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
|
||||||
orion5x-lacie-ethernet-disk-mini-v2.dtb \
|
orion5x-lacie-ethernet-disk-mini-v2.dtb \
|
||||||
orion5x-linkstation-lsgl.dtb \
|
orion5x-linkstation-lsgl.dtb \
|
||||||
orion5x-linkstation-lswtgl.dtb \
|
orion5x-linkstation-lswtgl.dtb \
|
||||||
orion5x-lschl.dtb \
|
orion5x-linkstation-lschl.dtb \
|
||||||
orion5x-lswsgl.dtb \
|
orion5x-lswsgl.dtb \
|
||||||
orion5x-maxtor-shared-storage-2.dtb \
|
orion5x-maxtor-shared-storage-2.dtb \
|
||||||
orion5x-netgear-wnr854t.dtb \
|
orion5x-netgear-wnr854t.dtb \
|
||||||
|
|
|
@ -18,6 +18,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
gpio0 = &gpio1;
|
gpio0 = &gpio1;
|
||||||
|
|
|
@ -16,6 +16,14 @@
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
|
||||||
interrupt-parent = <&icoll>;
|
interrupt-parent = <&icoll>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
gpio0 = &gpio0;
|
gpio0 = &gpio0;
|
||||||
|
|
|
@ -14,6 +14,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &fec;
|
ethernet0 = &fec;
|
||||||
|
|
|
@ -19,6 +19,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &fec;
|
ethernet0 = &fec;
|
||||||
|
|
|
@ -17,6 +17,14 @@
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
|
||||||
interrupt-parent = <&icoll>;
|
interrupt-parent = <&icoll>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &mac0;
|
ethernet0 = &mac0;
|
||||||
|
|
|
@ -12,6 +12,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
serial0 = &uart1;
|
serial0 = &uart1;
|
||||||
|
|
|
@ -13,6 +13,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &fec;
|
ethernet0 = &fec;
|
||||||
|
|
|
@ -17,6 +17,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &fec;
|
ethernet0 = &fec;
|
||||||
|
|
|
@ -19,6 +19,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &fec;
|
ethernet0 = &fec;
|
||||||
|
|
|
@ -19,6 +19,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &fec;
|
ethernet0 = &fec;
|
||||||
|
|
|
@ -137,7 +137,7 @@
|
||||||
&gpio4 {
|
&gpio4 {
|
||||||
gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
|
gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
|
||||||
<&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
|
<&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
|
||||||
<&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
|
<&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
|
||||||
<&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
|
<&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
|
||||||
<&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
|
<&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
|
||||||
};
|
};
|
||||||
|
|
|
@ -16,6 +16,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &fec;
|
ethernet0 = &fec;
|
||||||
|
|
|
@ -14,6 +14,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &fec;
|
ethernet0 = &fec;
|
||||||
|
|
|
@ -15,6 +15,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
can0 = &flexcan1;
|
can0 = &flexcan1;
|
||||||
|
|
|
@ -15,6 +15,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
ethernet0 = &fec1;
|
ethernet0 = &fec1;
|
||||||
|
|
|
@ -50,6 +50,14 @@
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
/*
|
||||||
|
* The decompressor and also some bootloaders rely on a
|
||||||
|
* pre-existing /chosen node to be available to insert the
|
||||||
|
* command line and merge other ATAGS info.
|
||||||
|
* Also for U-Boot there must be a pre-existing /memory node.
|
||||||
|
*/
|
||||||
|
chosen {};
|
||||||
|
memory { device_type = "memory"; reg = <0 0>; };
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
gpio0 = &gpio1;
|
gpio0 = &gpio1;
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
* Device Tree file for Buffalo Linkstation LS-CHLv3
|
* Device Tree file for Buffalo Linkstation LS-CHLv3
|
||||||
*
|
*
|
||||||
* Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
|
* Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
|
||||||
* Copyright (C) 2015, 2016
|
* Copyright (C) 2015-2017
|
||||||
* Roger Shimizu <rogershimizu@gmail.com>
|
* Roger Shimizu <rogershimizu@gmail.com>
|
||||||
*
|
*
|
||||||
* This file is dual-licensed: you can use it either under the terms
|
* This file is dual-licensed: you can use it either under the terms
|
||||||
|
@ -52,7 +52,7 @@
|
||||||
#include <dt-bindings/gpio/gpio.h>
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "Buffalo Linkstation Live v3 (LS-CHL)";
|
model = "Buffalo Linkstation LiveV3 (LS-CHL)";
|
||||||
compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
|
compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
|
||||||
|
|
||||||
memory { /* 128 MB */
|
memory { /* 128 MB */
|
|
@ -680,6 +680,7 @@
|
||||||
phy-names = "usb2-phy", "usb3-phy";
|
phy-names = "usb2-phy", "usb3-phy";
|
||||||
phys = <&usb2_picophy0>,
|
phys = <&usb2_picophy0>,
|
||||||
<&phy_port2 PHY_TYPE_USB3>;
|
<&phy_port2 PHY_TYPE_USB3>;
|
||||||
|
snps,dis_u3_susphy_quirk;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -64,8 +64,8 @@ CONFIG_NETFILTER=y
|
||||||
CONFIG_NETFILTER_NETLINK_QUEUE=m
|
CONFIG_NETFILTER_NETLINK_QUEUE=m
|
||||||
CONFIG_NF_CONNTRACK=m
|
CONFIG_NF_CONNTRACK=m
|
||||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||||
CONFIG_NF_CT_PROTO_SCTP=m
|
CONFIG_NF_CT_PROTO_SCTP=y
|
||||||
CONFIG_NF_CT_PROTO_UDPLITE=m
|
CONFIG_NF_CT_PROTO_UDPLITE=y
|
||||||
CONFIG_NF_CONNTRACK_AMANDA=m
|
CONFIG_NF_CONNTRACK_AMANDA=m
|
||||||
CONFIG_NF_CONNTRACK_FTP=m
|
CONFIG_NF_CONNTRACK_FTP=m
|
||||||
CONFIG_NF_CONNTRACK_H323=m
|
CONFIG_NF_CONNTRACK_H323=m
|
||||||
|
|
|
@ -56,8 +56,8 @@ CONFIG_NETFILTER=y
|
||||||
CONFIG_NETFILTER_NETLINK_QUEUE=m
|
CONFIG_NETFILTER_NETLINK_QUEUE=m
|
||||||
CONFIG_NF_CONNTRACK=m
|
CONFIG_NF_CONNTRACK=m
|
||||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||||
CONFIG_NF_CT_PROTO_SCTP=m
|
CONFIG_NF_CT_PROTO_SCTP=y
|
||||||
CONFIG_NF_CT_PROTO_UDPLITE=m
|
CONFIG_NF_CT_PROTO_UDPLITE=y
|
||||||
CONFIG_NF_CONNTRACK_AMANDA=m
|
CONFIG_NF_CONNTRACK_AMANDA=m
|
||||||
CONFIG_NF_CONNTRACK_FTP=m
|
CONFIG_NF_CONNTRACK_FTP=m
|
||||||
CONFIG_NF_CONNTRACK_H323=m
|
CONFIG_NF_CONNTRACK_H323=m
|
||||||
|
|
|
@ -824,6 +824,7 @@ CONFIG_QCOM_SMSM=y
|
||||||
CONFIG_QCOM_WCNSS_CTRL=m
|
CONFIG_QCOM_WCNSS_CTRL=m
|
||||||
CONFIG_ROCKCHIP_PM_DOMAINS=y
|
CONFIG_ROCKCHIP_PM_DOMAINS=y
|
||||||
CONFIG_COMMON_CLK_QCOM=y
|
CONFIG_COMMON_CLK_QCOM=y
|
||||||
|
CONFIG_QCOM_CLK_RPM=y
|
||||||
CONFIG_CHROME_PLATFORMS=y
|
CONFIG_CHROME_PLATFORMS=y
|
||||||
CONFIG_STAGING_BOARD=y
|
CONFIG_STAGING_BOARD=y
|
||||||
CONFIG_CROS_EC_CHARDEV=m
|
CONFIG_CROS_EC_CHARDEV=m
|
||||||
|
|
|
@ -478,11 +478,10 @@ extern unsigned long __must_check
|
||||||
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
|
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||||
|
|
||||||
static inline unsigned long __must_check
|
static inline unsigned long __must_check
|
||||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
unsigned int __ua_flags;
|
unsigned int __ua_flags;
|
||||||
|
|
||||||
check_object_size(to, n, false);
|
|
||||||
__ua_flags = uaccess_save_and_enable();
|
__ua_flags = uaccess_save_and_enable();
|
||||||
n = arm_copy_from_user(to, from, n);
|
n = arm_copy_from_user(to, from, n);
|
||||||
uaccess_restore(__ua_flags);
|
uaccess_restore(__ua_flags);
|
||||||
|
@ -495,18 +494,15 @@ extern unsigned long __must_check
|
||||||
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
||||||
|
|
||||||
static inline unsigned long __must_check
|
static inline unsigned long __must_check
|
||||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
||||||
unsigned int __ua_flags;
|
unsigned int __ua_flags;
|
||||||
|
|
||||||
check_object_size(from, n, true);
|
|
||||||
__ua_flags = uaccess_save_and_enable();
|
__ua_flags = uaccess_save_and_enable();
|
||||||
n = arm_copy_to_user(to, from, n);
|
n = arm_copy_to_user(to, from, n);
|
||||||
uaccess_restore(__ua_flags);
|
uaccess_restore(__ua_flags);
|
||||||
return n;
|
return n;
|
||||||
#else
|
#else
|
||||||
check_object_size(from, n, true);
|
|
||||||
return arm_copy_to_user(to, from, n);
|
return arm_copy_to_user(to, from, n);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -526,25 +522,49 @@ __clear_user(void __user *addr, unsigned long n)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
|
#define __arch_copy_from_user(to, from, n) \
|
||||||
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
|
(memcpy(to, (void __force *)from, n), 0)
|
||||||
|
#define __arch_copy_to_user(to, from, n) \
|
||||||
|
(memcpy((void __force *)to, from, n), 0)
|
||||||
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
|
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
static inline unsigned long __must_check
|
||||||
|
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
|
{
|
||||||
|
check_object_size(to, n, false);
|
||||||
|
return __arch_copy_from_user(to, from, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __must_check
|
||||||
|
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
unsigned long res = n;
|
unsigned long res = n;
|
||||||
|
|
||||||
|
check_object_size(to, n, false);
|
||||||
|
|
||||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||||
res = __copy_from_user(to, from, n);
|
res = __arch_copy_from_user(to, from, n);
|
||||||
if (unlikely(res))
|
if (unlikely(res))
|
||||||
memset(to + (n - res), 0, res);
|
memset(to + (n - res), 0, res);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
static inline unsigned long __must_check
|
||||||
|
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
{
|
{
|
||||||
|
check_object_size(from, n, true);
|
||||||
|
|
||||||
|
return __arch_copy_to_user(to, from, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __must_check
|
||||||
|
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
|
{
|
||||||
|
check_object_size(from, n, true);
|
||||||
|
|
||||||
if (access_ok(VERIFY_WRITE, to, n))
|
if (access_ok(VERIFY_WRITE, to, n))
|
||||||
n = __copy_to_user(to, from, n);
|
n = __arch_copy_to_user(to, from, n);
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
|
||||||
const void *kbuf, const void __user *ubuf)
|
const void *kbuf, const void __user *ubuf)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct pt_regs newregs;
|
struct pt_regs newregs = *task_pt_regs(target);
|
||||||
|
|
||||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||||
&newregs,
|
&newregs,
|
||||||
|
|
|
@ -67,7 +67,7 @@ ENTRY(__get_user_4)
|
||||||
ENDPROC(__get_user_4)
|
ENDPROC(__get_user_4)
|
||||||
|
|
||||||
ENTRY(__get_user_8)
|
ENTRY(__get_user_8)
|
||||||
check_uaccess r0, 8, r1, r2, __get_user_bad
|
check_uaccess r0, 8, r1, r2, __get_user_bad8
|
||||||
#ifdef CONFIG_THUMB2_KERNEL
|
#ifdef CONFIG_THUMB2_KERNEL
|
||||||
5: TUSER(ldr) r2, [r0]
|
5: TUSER(ldr) r2, [r0]
|
||||||
6: TUSER(ldr) r3, [r0, #4]
|
6: TUSER(ldr) r3, [r0, #4]
|
||||||
|
|
|
@ -60,7 +60,6 @@
|
||||||
|
|
||||||
#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
|
#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
|
||||||
|
|
||||||
static enum cpuhp_state cpuhp_mmdc_state;
|
|
||||||
static int ddr_type;
|
static int ddr_type;
|
||||||
|
|
||||||
struct fsl_mmdc_devtype_data {
|
struct fsl_mmdc_devtype_data {
|
||||||
|
@ -82,6 +81,7 @@ static const struct of_device_id imx_mmdc_dt_ids[] = {
|
||||||
|
|
||||||
#ifdef CONFIG_PERF_EVENTS
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
|
|
||||||
|
static enum cpuhp_state cpuhp_mmdc_state;
|
||||||
static DEFINE_IDA(mmdc_ida);
|
static DEFINE_IDA(mmdc_ida);
|
||||||
|
|
||||||
PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
|
PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
|
||||||
|
|
|
@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
|
||||||
|
|
||||||
void __init early_abt_enable(void)
|
void __init early_abt_enable(void)
|
||||||
{
|
{
|
||||||
fsr_info[22].fn = early_abort_handler;
|
fsr_info[FSR_FS_AEA].fn = early_abort_handler;
|
||||||
local_abt_enable();
|
local_abt_enable();
|
||||||
fsr_info[22].fn = do_bad;
|
fsr_info[FSR_FS_AEA].fn = do_bad;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_ARM_LPAE
|
#ifndef CONFIG_ARM_LPAE
|
||||||
|
|
|
@ -11,11 +11,15 @@
|
||||||
#define FSR_FS5_0 (0x3f)
|
#define FSR_FS5_0 (0x3f)
|
||||||
|
|
||||||
#ifdef CONFIG_ARM_LPAE
|
#ifdef CONFIG_ARM_LPAE
|
||||||
|
#define FSR_FS_AEA 17
|
||||||
|
|
||||||
static inline int fsr_fs(unsigned int fsr)
|
static inline int fsr_fs(unsigned int fsr)
|
||||||
{
|
{
|
||||||
return fsr & FSR_FS5_0;
|
return fsr & FSR_FS5_0;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
#define FSR_FS_AEA 22
|
||||||
|
|
||||||
static inline int fsr_fs(unsigned int fsr)
|
static inline int fsr_fs(unsigned int fsr)
|
||||||
{
|
{
|
||||||
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
|
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
|
||||||
|
|
|
@ -55,6 +55,24 @@
|
||||||
#address-cells = <2>;
|
#address-cells = <2>;
|
||||||
#size-cells = <2>;
|
#size-cells = <2>;
|
||||||
|
|
||||||
|
reserved-memory {
|
||||||
|
#address-cells = <2>;
|
||||||
|
#size-cells = <2>;
|
||||||
|
ranges;
|
||||||
|
|
||||||
|
/* 16 MiB reserved for Hardware ROM Firmware */
|
||||||
|
hwrom_reserved: hwrom@0 {
|
||||||
|
reg = <0x0 0x0 0x0 0x1000000>;
|
||||||
|
no-map;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
|
||||||
|
secmon_reserved: secmon@10000000 {
|
||||||
|
reg = <0x0 0x10000000 0x0 0x200000>;
|
||||||
|
no-map;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
cpus {
|
cpus {
|
||||||
#address-cells = <0x2>;
|
#address-cells = <0x2>;
|
||||||
#size-cells = <0x0>;
|
#size-cells = <0x0>;
|
||||||
|
|
|
@ -151,6 +151,18 @@
|
||||||
status = "okay";
|
status = "okay";
|
||||||
pinctrl-0 = <ð_rgmii_pins>;
|
pinctrl-0 = <ð_rgmii_pins>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
|
phy-handle = <ð_phy0>;
|
||||||
|
|
||||||
|
mdio {
|
||||||
|
compatible = "snps,dwmac-mdio";
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
eth_phy0: ethernet-phy@0 {
|
||||||
|
reg = <0>;
|
||||||
|
eee-broken-1000t;
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
&ir {
|
&ir {
|
||||||
|
|
|
@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
|
||||||
cbz w6, .Lcbcencloop
|
cbz w6, .Lcbcencloop
|
||||||
|
|
||||||
ld1 {v0.16b}, [x5] /* get iv */
|
ld1 {v0.16b}, [x5] /* get iv */
|
||||||
enc_prepare w3, x2, x5
|
enc_prepare w3, x2, x6
|
||||||
|
|
||||||
.Lcbcencloop:
|
.Lcbcencloop:
|
||||||
ld1 {v1.16b}, [x1], #16 /* get next pt block */
|
ld1 {v1.16b}, [x1], #16 /* get next pt block */
|
||||||
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
|
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
|
||||||
encrypt_block v0, w3, x2, x5, w6
|
encrypt_block v0, w3, x2, x6, w7
|
||||||
st1 {v0.16b}, [x0], #16
|
st1 {v0.16b}, [x0], #16
|
||||||
subs w4, w4, #1
|
subs w4, w4, #1
|
||||||
bne .Lcbcencloop
|
bne .Lcbcencloop
|
||||||
|
st1 {v0.16b}, [x5] /* return iv */
|
||||||
ret
|
ret
|
||||||
AES_ENDPROC(aes_cbc_encrypt)
|
AES_ENDPROC(aes_cbc_encrypt)
|
||||||
|
|
||||||
|
@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||||
cbz w6, .LcbcdecloopNx
|
cbz w6, .LcbcdecloopNx
|
||||||
|
|
||||||
ld1 {v7.16b}, [x5] /* get iv */
|
ld1 {v7.16b}, [x5] /* get iv */
|
||||||
dec_prepare w3, x2, x5
|
dec_prepare w3, x2, x6
|
||||||
|
|
||||||
.LcbcdecloopNx:
|
.LcbcdecloopNx:
|
||||||
#if INTERLEAVE >= 2
|
#if INTERLEAVE >= 2
|
||||||
|
@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||||
.Lcbcdecloop:
|
.Lcbcdecloop:
|
||||||
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
||||||
mov v0.16b, v1.16b /* ...and copy to v0 */
|
mov v0.16b, v1.16b /* ...and copy to v0 */
|
||||||
decrypt_block v0, w3, x2, x5, w6
|
decrypt_block v0, w3, x2, x6, w7
|
||||||
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
|
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
|
||||||
mov v7.16b, v1.16b /* ct is next iv */
|
mov v7.16b, v1.16b /* ct is next iv */
|
||||||
st1 {v0.16b}, [x0], #16
|
st1 {v0.16b}, [x0], #16
|
||||||
|
@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||||
bne .Lcbcdecloop
|
bne .Lcbcdecloop
|
||||||
.Lcbcdecout:
|
.Lcbcdecout:
|
||||||
FRAME_POP
|
FRAME_POP
|
||||||
|
st1 {v7.16b}, [x5] /* return iv */
|
||||||
ret
|
ret
|
||||||
AES_ENDPROC(aes_cbc_decrypt)
|
AES_ENDPROC(aes_cbc_decrypt)
|
||||||
|
|
||||||
|
@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
|
||||||
|
|
||||||
AES_ENTRY(aes_ctr_encrypt)
|
AES_ENTRY(aes_ctr_encrypt)
|
||||||
FRAME_PUSH
|
FRAME_PUSH
|
||||||
cbnz w6, .Lctrfirst /* 1st time around? */
|
cbz w6, .Lctrnotfirst /* 1st time around? */
|
||||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
|
||||||
rev x5, x5
|
|
||||||
#if INTERLEAVE >= 2
|
|
||||||
cmn w5, w4 /* 32 bit overflow? */
|
|
||||||
bcs .Lctrinc
|
|
||||||
add x5, x5, #1 /* increment BE ctr */
|
|
||||||
b .LctrincNx
|
|
||||||
#else
|
|
||||||
b .Lctrinc
|
|
||||||
#endif
|
|
||||||
.Lctrfirst:
|
|
||||||
enc_prepare w3, x2, x6
|
enc_prepare w3, x2, x6
|
||||||
ld1 {v4.16b}, [x5]
|
ld1 {v4.16b}, [x5]
|
||||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
|
||||||
rev x5, x5
|
.Lctrnotfirst:
|
||||||
|
umov x8, v4.d[1] /* keep swabbed ctr in reg */
|
||||||
|
rev x8, x8
|
||||||
#if INTERLEAVE >= 2
|
#if INTERLEAVE >= 2
|
||||||
cmn w5, w4 /* 32 bit overflow? */
|
cmn w8, w4 /* 32 bit overflow? */
|
||||||
bcs .Lctrloop
|
bcs .Lctrloop
|
||||||
.LctrloopNx:
|
.LctrloopNx:
|
||||||
subs w4, w4, #INTERLEAVE
|
subs w4, w4, #INTERLEAVE
|
||||||
|
@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||||
#if INTERLEAVE == 2
|
#if INTERLEAVE == 2
|
||||||
mov v0.8b, v4.8b
|
mov v0.8b, v4.8b
|
||||||
mov v1.8b, v4.8b
|
mov v1.8b, v4.8b
|
||||||
rev x7, x5
|
rev x7, x8
|
||||||
add x5, x5, #1
|
add x8, x8, #1
|
||||||
ins v0.d[1], x7
|
ins v0.d[1], x7
|
||||||
rev x7, x5
|
rev x7, x8
|
||||||
add x5, x5, #1
|
add x8, x8, #1
|
||||||
ins v1.d[1], x7
|
ins v1.d[1], x7
|
||||||
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
|
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
|
||||||
do_encrypt_block2x
|
do_encrypt_block2x
|
||||||
|
@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||||
st1 {v0.16b-v1.16b}, [x0], #32
|
st1 {v0.16b-v1.16b}, [x0], #32
|
||||||
#else
|
#else
|
||||||
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
|
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
|
||||||
dup v7.4s, w5
|
dup v7.4s, w8
|
||||||
mov v0.16b, v4.16b
|
mov v0.16b, v4.16b
|
||||||
add v7.4s, v7.4s, v8.4s
|
add v7.4s, v7.4s, v8.4s
|
||||||
mov v1.16b, v4.16b
|
mov v1.16b, v4.16b
|
||||||
|
@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||||
eor v2.16b, v7.16b, v2.16b
|
eor v2.16b, v7.16b, v2.16b
|
||||||
eor v3.16b, v5.16b, v3.16b
|
eor v3.16b, v5.16b, v3.16b
|
||||||
st1 {v0.16b-v3.16b}, [x0], #64
|
st1 {v0.16b-v3.16b}, [x0], #64
|
||||||
add x5, x5, #INTERLEAVE
|
add x8, x8, #INTERLEAVE
|
||||||
#endif
|
#endif
|
||||||
cbz w4, .LctroutNx
|
rev x7, x8
|
||||||
.LctrincNx:
|
|
||||||
rev x7, x5
|
|
||||||
ins v4.d[1], x7
|
ins v4.d[1], x7
|
||||||
|
cbz w4, .Lctrout
|
||||||
b .LctrloopNx
|
b .LctrloopNx
|
||||||
.LctroutNx:
|
|
||||||
sub x5, x5, #1
|
|
||||||
rev x7, x5
|
|
||||||
ins v4.d[1], x7
|
|
||||||
b .Lctrout
|
|
||||||
.Lctr1x:
|
.Lctr1x:
|
||||||
adds w4, w4, #INTERLEAVE
|
adds w4, w4, #INTERLEAVE
|
||||||
beq .Lctrout
|
beq .Lctrout
|
||||||
|
@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||||
.Lctrloop:
|
.Lctrloop:
|
||||||
mov v0.16b, v4.16b
|
mov v0.16b, v4.16b
|
||||||
encrypt_block v0, w3, x2, x6, w7
|
encrypt_block v0, w3, x2, x6, w7
|
||||||
|
|
||||||
|
adds x8, x8, #1 /* increment BE ctr */
|
||||||
|
rev x7, x8
|
||||||
|
ins v4.d[1], x7
|
||||||
|
bcs .Lctrcarry /* overflow? */
|
||||||
|
|
||||||
|
.Lctrcarrydone:
|
||||||
subs w4, w4, #1
|
subs w4, w4, #1
|
||||||
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
|
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
|
||||||
ld1 {v3.16b}, [x1], #16
|
ld1 {v3.16b}, [x1], #16
|
||||||
eor v3.16b, v0.16b, v3.16b
|
eor v3.16b, v0.16b, v3.16b
|
||||||
st1 {v3.16b}, [x0], #16
|
st1 {v3.16b}, [x0], #16
|
||||||
beq .Lctrout
|
bne .Lctrloop
|
||||||
.Lctrinc:
|
|
||||||
adds x5, x5, #1 /* increment BE ctr */
|
.Lctrout:
|
||||||
rev x7, x5
|
st1 {v4.16b}, [x5] /* return next CTR value */
|
||||||
ins v4.d[1], x7
|
FRAME_POP
|
||||||
bcc .Lctrloop /* no overflow? */
|
ret
|
||||||
|
|
||||||
|
.Lctrhalfblock:
|
||||||
|
ld1 {v3.8b}, [x1]
|
||||||
|
eor v3.8b, v0.8b, v3.8b
|
||||||
|
st1 {v3.8b}, [x0]
|
||||||
|
FRAME_POP
|
||||||
|
ret
|
||||||
|
|
||||||
|
.Lctrcarry:
|
||||||
umov x7, v4.d[0] /* load upper word of ctr */
|
umov x7, v4.d[0] /* load upper word of ctr */
|
||||||
rev x7, x7 /* ... to handle the carry */
|
rev x7, x7 /* ... to handle the carry */
|
||||||
add x7, x7, #1
|
add x7, x7, #1
|
||||||
rev x7, x7
|
rev x7, x7
|
||||||
ins v4.d[0], x7
|
ins v4.d[0], x7
|
||||||
b .Lctrloop
|
b .Lctrcarrydone
|
||||||
.Lctrhalfblock:
|
|
||||||
ld1 {v3.8b}, [x1]
|
|
||||||
eor v3.8b, v0.8b, v3.8b
|
|
||||||
st1 {v3.8b}, [x0]
|
|
||||||
.Lctrout:
|
|
||||||
FRAME_POP
|
|
||||||
ret
|
|
||||||
AES_ENDPROC(aes_ctr_encrypt)
|
AES_ENDPROC(aes_ctr_encrypt)
|
||||||
.ltorg
|
.ltorg
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
* for more details.
|
* for more details.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/acpi.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
|
||||||
|
|
||||||
static int __init register_cpufreq_notifier(void)
|
static int __init register_cpufreq_notifier(void)
|
||||||
{
|
{
|
||||||
if (cap_parsing_failed)
|
/*
|
||||||
|
* on ACPI-based systems we need to use the default cpu capacity
|
||||||
|
* until we have the necessary code to parse the cpu capacity, so
|
||||||
|
* skip registering cpufreq notifier.
|
||||||
|
*/
|
||||||
|
if (!acpi_disabled || cap_parsing_failed)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
|
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
|
||||||
|
|
|
@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
|
||||||
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
|
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
|
||||||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
||||||
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
|
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
|
||||||
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||||
|
|
||||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
|
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
|
||||||
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
|
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
|
||||||
|
@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
|
||||||
|
{
|
||||||
|
long long c, old;
|
||||||
|
|
||||||
|
c = atomic64_read(v);
|
||||||
|
for (;;) {
|
||||||
|
if (unlikely(c == u))
|
||||||
|
break;
|
||||||
|
old = atomic64_cmpxchg(v, c, c + i);
|
||||||
|
if (likely(old == c))
|
||||||
|
break;
|
||||||
|
c = old;
|
||||||
|
}
|
||||||
|
return c != u;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||||
|
{
|
||||||
|
long long c, old, dec;
|
||||||
|
|
||||||
|
c = atomic64_read(v);
|
||||||
|
for (;;) {
|
||||||
|
dec = c - 1;
|
||||||
|
if (unlikely(dec < 0))
|
||||||
|
break;
|
||||||
|
old = atomic64_cmpxchg((v), c, dec);
|
||||||
|
if (likely(old == c))
|
||||||
|
break;
|
||||||
|
c = old;
|
||||||
|
}
|
||||||
|
return dec;
|
||||||
|
}
|
||||||
|
|
||||||
#define ATOMIC_OP(op) \
|
#define ATOMIC_OP(op) \
|
||||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||||
{ \
|
{ \
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
struct thread_struct;
|
struct thread_struct;
|
||||||
|
|
||||||
#if !defined(CONFIG_LAZY_SAVE_FPU)
|
#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
|
||||||
struct fpu_state_struct;
|
struct fpu_state_struct;
|
||||||
extern asmlinkage void fpu_save(struct fpu_state_struct *);
|
extern asmlinkage void fpu_save(struct fpu_state_struct *);
|
||||||
#define switch_fpu(prev, next) \
|
#define switch_fpu(prev, next) \
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
|
#include <asm/types.h>
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
#include <asm/barrier.h>
|
#include <asm/barrier.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
|
@ -17,6 +17,12 @@
|
||||||
* to include/asm-i386/bitops.h or kerneldoc
|
* to include/asm-i386/bitops.h or kerneldoc
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#if __BITS_PER_LONG == 64
|
||||||
|
#define SHIFT_PER_LONG 6
|
||||||
|
#else
|
||||||
|
#define SHIFT_PER_LONG 5
|
||||||
|
#endif
|
||||||
|
|
||||||
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
|
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,10 +3,8 @@
|
||||||
|
|
||||||
#if defined(__LP64__)
|
#if defined(__LP64__)
|
||||||
#define __BITS_PER_LONG 64
|
#define __BITS_PER_LONG 64
|
||||||
#define SHIFT_PER_LONG 6
|
|
||||||
#else
|
#else
|
||||||
#define __BITS_PER_LONG 32
|
#define __BITS_PER_LONG 32
|
||||||
#define SHIFT_PER_LONG 5
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <asm-generic/bitsperlong.h>
|
#include <asm-generic/bitsperlong.h>
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#ifndef _PARISC_SWAB_H
|
#ifndef _PARISC_SWAB_H
|
||||||
#define _PARISC_SWAB_H
|
#define _PARISC_SWAB_H
|
||||||
|
|
||||||
|
#include <asm/bitsperlong.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
|
|
||||||
|
@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
|
||||||
}
|
}
|
||||||
#define __arch_swab32 __arch_swab32
|
#define __arch_swab32 __arch_swab32
|
||||||
|
|
||||||
#if BITS_PER_LONG > 32
|
#if __BITS_PER_LONG > 32
|
||||||
/*
|
/*
|
||||||
** From "PA-RISC 2.0 Architecture", HP Professional Books.
|
** From "PA-RISC 2.0 Architecture", HP Professional Books.
|
||||||
** See Appendix I page 8 , "Endian Byte Swapping".
|
** See Appendix I page 8 , "Endian Byte Swapping".
|
||||||
|
@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
#define __arch_swab64 __arch_swab64
|
#define __arch_swab64 __arch_swab64
|
||||||
#endif /* BITS_PER_LONG > 32 */
|
#endif /* __BITS_PER_LONG > 32 */
|
||||||
|
|
||||||
#endif /* _PARISC_SWAB_H */
|
#endif /* _PARISC_SWAB_H */
|
||||||
|
|
|
@ -164,7 +164,6 @@ config PPC
|
||||||
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
|
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
|
||||||
select HAVE_ARCH_HARDENED_USERCOPY
|
select HAVE_ARCH_HARDENED_USERCOPY
|
||||||
select HAVE_KERNEL_GZIP
|
select HAVE_KERNEL_GZIP
|
||||||
select HAVE_CC_STACKPROTECTOR
|
|
||||||
|
|
||||||
config GENERIC_CSUM
|
config GENERIC_CSUM
|
||||||
def_bool CPU_LITTLE_ENDIAN
|
def_bool CPU_LITTLE_ENDIAN
|
||||||
|
@ -484,6 +483,7 @@ config RELOCATABLE
|
||||||
bool "Build a relocatable kernel"
|
bool "Build a relocatable kernel"
|
||||||
depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
|
depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
|
||||||
select NONSTATIC_KERNEL
|
select NONSTATIC_KERNEL
|
||||||
|
select MODULE_REL_CRCS if MODVERSIONS
|
||||||
help
|
help
|
||||||
This builds a kernel image that is capable of running at the
|
This builds a kernel image that is capable of running at the
|
||||||
location the kernel is loaded at. For ppc32, there is no any
|
location the kernel is loaded at. For ppc32, there is no any
|
||||||
|
|
|
@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
#ifndef __clang__ /* clang can't cope with this */
|
||||||
BUILD_BUG_ON(!__builtin_constant_p(feature));
|
BUILD_BUG_ON(!__builtin_constant_p(feature));
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
|
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
|
||||||
if (!static_key_initialized) {
|
if (!static_key_initialized) {
|
||||||
|
|
|
@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
#ifndef __clang__ /* clang can't cope with this */
|
||||||
BUILD_BUG_ON(!__builtin_constant_p(feature));
|
BUILD_BUG_ON(!__builtin_constant_p(feature));
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
|
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
|
||||||
if (!static_key_initialized) {
|
if (!static_key_initialized) {
|
||||||
|
|
|
@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
|
|
||||||
#define ARCH_RELOCATES_KCRCTAB
|
|
||||||
#define reloc_start PHYSICAL_START
|
|
||||||
#endif
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_POWERPC_MODULE_H */
|
#endif /* _ASM_POWERPC_MODULE_H */
|
||||||
|
|
|
@ -649,9 +649,10 @@
|
||||||
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
|
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
|
||||||
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
|
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
|
||||||
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
|
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
|
||||||
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
|
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
|
||||||
#define SRR1_WAKESYSERR 0x00300000 /* System error */
|
#define SRR1_WAKESYSERR 0x00300000 /* System error */
|
||||||
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
|
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
|
||||||
|
#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
|
||||||
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
|
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
|
||||||
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
|
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
|
||||||
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
|
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
|
||||||
|
|
|
@ -1,40 +0,0 @@
|
||||||
/*
|
|
||||||
* GCC stack protector support.
|
|
||||||
*
|
|
||||||
* Stack protector works by putting predefined pattern at the start of
|
|
||||||
* the stack frame and verifying that it hasn't been overwritten when
|
|
||||||
* returning from the function. The pattern is called stack canary
|
|
||||||
* and gcc expects it to be defined by a global variable called
|
|
||||||
* "__stack_chk_guard" on PPC. This unfortunately means that on SMP
|
|
||||||
* we cannot have a different canary value per task.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_STACKPROTECTOR_H
|
|
||||||
#define _ASM_STACKPROTECTOR_H
|
|
||||||
|
|
||||||
#include <linux/random.h>
|
|
||||||
#include <linux/version.h>
|
|
||||||
#include <asm/reg.h>
|
|
||||||
|
|
||||||
extern unsigned long __stack_chk_guard;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialize the stackprotector canary value.
|
|
||||||
*
|
|
||||||
* NOTE: this must only be called from functions that never return,
|
|
||||||
* and it must always be inlined.
|
|
||||||
*/
|
|
||||||
static __always_inline void boot_init_stack_canary(void)
|
|
||||||
{
|
|
||||||
unsigned long canary;
|
|
||||||
|
|
||||||
/* Try to get a semi random initial value. */
|
|
||||||
get_random_bytes(&canary, sizeof(canary));
|
|
||||||
canary ^= mftb();
|
|
||||||
canary ^= LINUX_VERSION_CODE;
|
|
||||||
|
|
||||||
current->stack_canary = canary;
|
|
||||||
__stack_chk_guard = current->stack_canary;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_STACKPROTECTOR_H */
|
|
|
@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_POWERNV
|
#ifdef CONFIG_PPC_POWERNV
|
||||||
extern int icp_opal_init(void);
|
extern int icp_opal_init(void);
|
||||||
|
extern void icp_opal_flush_interrupt(void);
|
||||||
#else
|
#else
|
||||||
static inline int icp_opal_init(void) { return -ENODEV; }
|
static inline int icp_opal_init(void) { return -ENODEV; }
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
||||||
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
||||||
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
||||||
|
|
||||||
# -fstack-protector triggers protection checks in this code,
|
|
||||||
# but it is being used too early to link to meaningful stack_chk logic.
|
|
||||||
CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
|
|
||||||
|
|
||||||
ifdef CONFIG_FUNCTION_TRACER
|
ifdef CONFIG_FUNCTION_TRACER
|
||||||
# Do not trace early boot code
|
# Do not trace early boot code
|
||||||
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
||||||
|
|
|
@ -91,9 +91,6 @@ int main(void)
|
||||||
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
|
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
||||||
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
|
|
||||||
#endif
|
|
||||||
DEFINE(KSP, offsetof(struct thread_struct, ksp));
|
DEFINE(KSP, offsetof(struct thread_struct, ksp));
|
||||||
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
|
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
|
||||||
#ifdef CONFIG_BOOKE
|
#ifdef CONFIG_BOOKE
|
||||||
|
|
|
@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
|
||||||
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
|
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
|
||||||
{
|
{
|
||||||
struct eeh_pe *pe = (struct eeh_pe *)data;
|
struct eeh_pe *pe = (struct eeh_pe *)data;
|
||||||
bool *clear_sw_state = flag;
|
bool clear_sw_state = *(bool *)flag;
|
||||||
int i, rc = 1;
|
int i, rc = 1;
|
||||||
|
|
||||||
for (i = 0; rc && i < 3; i++)
|
for (i = 0; rc && i < 3; i++)
|
||||||
|
|
|
@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
|
||||||
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
|
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
|
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
|
||||||
#endif /* CONFIG_SPE */
|
#endif /* CONFIG_SPE */
|
||||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
|
||||||
lwz r0,TSK_STACK_CANARY(r2)
|
|
||||||
lis r4,__stack_chk_guard@ha
|
|
||||||
stw r0,__stack_chk_guard@l(r4)
|
|
||||||
#endif
|
|
||||||
lwz r0,_CCR(r1)
|
lwz r0,_CCR(r1)
|
||||||
mtcrf 0xFF,r0
|
mtcrf 0xFF,r0
|
||||||
/* r3-r12 are destroyed -- Cort */
|
/* r3-r12 are destroyed -- Cort */
|
||||||
|
|
|
@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
|
||||||
for (end = (void *)vers + size; vers < end; vers++)
|
for (end = (void *)vers + size; vers < end; vers++)
|
||||||
if (vers->name[0] == '.') {
|
if (vers->name[0] == '.') {
|
||||||
memmove(vers->name, vers->name+1, strlen(vers->name));
|
memmove(vers->name, vers->name+1, strlen(vers->name));
|
||||||
#ifdef ARCH_RELOCATES_KCRCTAB
|
|
||||||
/* The TOC symbol has no CRC computed. To avoid CRC
|
|
||||||
* check failing, we must force it to the expected
|
|
||||||
* value (see CRC check in module.c).
|
|
||||||
*/
|
|
||||||
if (!strcmp(vers->name, "TOC."))
|
|
||||||
vers->crc = -(unsigned long)reloc_start;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,12 +64,6 @@
|
||||||
#include <linux/kprobes.h>
|
#include <linux/kprobes.h>
|
||||||
#include <linux/kdebug.h>
|
#include <linux/kdebug.h>
|
||||||
|
|
||||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
||||||
#include <linux/stackprotector.h>
|
|
||||||
unsigned long __stack_chk_guard __read_mostly;
|
|
||||||
EXPORT_SYMBOL(__stack_chk_guard);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Transactional Memory debug */
|
/* Transactional Memory debug */
|
||||||
#ifdef TM_DEBUG_SW
|
#ifdef TM_DEBUG_SW
|
||||||
#define TM_DEBUG(x...) printk(KERN_INFO x)
|
#define TM_DEBUG(x...) printk(KERN_INFO x)
|
||||||
|
|
|
@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
|
||||||
|
|
||||||
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
|
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
|
||||||
|
|
||||||
|
if (!PHANDLE_VALID(cpu_pkg))
|
||||||
|
return;
|
||||||
|
|
||||||
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
|
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
|
||||||
prom.cpu = be32_to_cpu(rval);
|
prom.cpu = be32_to_cpu(rval);
|
||||||
|
|
||||||
|
|
|
@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
if (unlikely(debugger_fault_handler(regs)))
|
if (unlikely(debugger_fault_handler(regs)))
|
||||||
goto bail;
|
goto bail;
|
||||||
|
|
||||||
/* On a kernel SLB miss we can only check for a valid exception entry */
|
/*
|
||||||
if (!user_mode(regs) && (address >= TASK_SIZE)) {
|
* The kernel should never take an execute fault nor should it
|
||||||
|
* take a page fault to a kernel address.
|
||||||
|
*/
|
||||||
|
if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
|
||||||
rc = SIGSEGV;
|
rc = SIGSEGV;
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
@ -390,20 +393,6 @@ good_area:
|
||||||
#endif /* CONFIG_8xx */
|
#endif /* CONFIG_8xx */
|
||||||
|
|
||||||
if (is_exec) {
|
if (is_exec) {
|
||||||
/*
|
|
||||||
* An execution fault + no execute ?
|
|
||||||
*
|
|
||||||
* On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
|
|
||||||
* deliberately create NX mappings, and use the fault to do the
|
|
||||||
* cache flush. This is usually handled in hash_page_do_lazy_icache()
|
|
||||||
* but we could end up here if that races with a concurrent PTE
|
|
||||||
* update. In that case we need to fall through here to the VMA
|
|
||||||
* check below.
|
|
||||||
*/
|
|
||||||
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
|
|
||||||
(regs->msr & SRR1_ISI_N_OR_G))
|
|
||||||
goto bad_area;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow execution from readable areas if the MMU does not
|
* Allow execution from readable areas if the MMU does not
|
||||||
* provide separate controls over reading and executing.
|
* provide separate controls over reading and executing.
|
||||||
|
|
|
@ -347,7 +347,8 @@ early_param("disable_radix", parse_disable_radix);
|
||||||
void __init mmu_early_init_devtree(void)
|
void __init mmu_early_init_devtree(void)
|
||||||
{
|
{
|
||||||
/* Disable radix mode based on kernel command line. */
|
/* Disable radix mode based on kernel command line. */
|
||||||
if (disable_radix)
|
/* We don't yet have the machinery to do radix as a guest. */
|
||||||
|
if (disable_radix || !(mfmsr() & MSR_HV))
|
||||||
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
||||||
|
|
||||||
if (early_radix_enabled())
|
if (early_radix_enabled())
|
||||||
|
|
|
@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
|
||||||
if (!pmdp)
|
if (!pmdp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (map_page_size == PMD_SIZE) {
|
if (map_page_size == PMD_SIZE) {
|
||||||
ptep = (pte_t *)pudp;
|
ptep = pmdp_ptep(pmdp);
|
||||||
goto set_the_pte;
|
goto set_the_pte;
|
||||||
}
|
}
|
||||||
ptep = pte_alloc_kernel(pmdp, ea);
|
ptep = pte_alloc_kernel(pmdp, ea);
|
||||||
|
@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
|
||||||
}
|
}
|
||||||
pmdp = pmd_offset(pudp, ea);
|
pmdp = pmd_offset(pudp, ea);
|
||||||
if (map_page_size == PMD_SIZE) {
|
if (map_page_size == PMD_SIZE) {
|
||||||
ptep = (pte_t *)pudp;
|
ptep = pmdp_ptep(pmdp);
|
||||||
goto set_the_pte;
|
goto set_the_pte;
|
||||||
}
|
}
|
||||||
if (!pmd_present(*pmdp)) {
|
if (!pmd_present(*pmdp)) {
|
||||||
|
|
|
@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
|
||||||
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
|
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
|
||||||
__tlbiel_pid(pid, set, ric);
|
__tlbiel_pid(pid, set, ric);
|
||||||
}
|
}
|
||||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
|
||||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
|
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
|
||||||
|
@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
|
||||||
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
|
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
|
||||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||||
asm volatile("ptesync": : :"memory");
|
asm volatile("ptesync": : :"memory");
|
||||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
|
||||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void _tlbie_va(unsigned long va, unsigned long pid,
|
static inline void _tlbie_va(unsigned long va, unsigned long pid,
|
||||||
|
|
|
@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
|
||||||
wmask = SRR1_WAKEMASK_P8;
|
wmask = SRR1_WAKEMASK_P8;
|
||||||
|
|
||||||
idle_states = pnv_get_supported_cpuidle_states();
|
idle_states = pnv_get_supported_cpuidle_states();
|
||||||
|
|
||||||
/* We don't want to take decrementer interrupts while we are offline,
|
/* We don't want to take decrementer interrupts while we are offline,
|
||||||
* so clear LPCR:PECE1. We keep PECE2 enabled.
|
* so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
|
||||||
|
* enabled as to let IPIs in.
|
||||||
*/
|
*/
|
||||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
|
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
|
||||||
|
|
||||||
|
@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
|
||||||
* contains 0.
|
* contains 0.
|
||||||
*/
|
*/
|
||||||
if (((srr1 & wmask) == SRR1_WAKEEE) ||
|
if (((srr1 & wmask) == SRR1_WAKEEE) ||
|
||||||
|
((srr1 & wmask) == SRR1_WAKEHVI) ||
|
||||||
(local_paca->irq_happened & PACA_IRQ_EE)) {
|
(local_paca->irq_happened & PACA_IRQ_EE)) {
|
||||||
icp_native_flush_interrupt();
|
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||||
|
icp_opal_flush_interrupt();
|
||||||
|
else
|
||||||
|
icp_native_flush_interrupt();
|
||||||
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
|
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
|
||||||
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
||||||
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
|
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
|
||||||
|
@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
|
||||||
if (srr1 && !generic_check_cpu_restart(cpu))
|
if (srr1 && !generic_check_cpu_restart(cpu))
|
||||||
DBG("CPU%d Unexpected exit while offline !\n", cpu);
|
DBG("CPU%d Unexpected exit while offline !\n", cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Re-enable decrementer interrupts */
|
||||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
|
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
|
||||||
DBG("CPU%d coming online...\n", cpu);
|
DBG("CPU%d coming online...\n", cpu);
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
|
||||||
{
|
{
|
||||||
int hw_cpu = get_hard_smp_processor_id(cpu);
|
int hw_cpu = get_hard_smp_processor_id(cpu);
|
||||||
|
|
||||||
|
kvmppc_set_host_ipi(cpu, 1);
|
||||||
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
|
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
|
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
int hw_cpu = hard_smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
opal_int_set_mfrr(hw_cpu, 0xff);
|
kvmppc_set_host_ipi(cpu, 0);
|
||||||
|
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
|
||||||
|
|
||||||
return smp_ipi_demux();
|
return smp_ipi_demux();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called when an interrupt is received on an off-line CPU to
|
||||||
|
* clear the interrupt, so that the CPU can go back to nap mode.
|
||||||
|
*/
|
||||||
|
void icp_opal_flush_interrupt(void)
|
||||||
|
{
|
||||||
|
unsigned int xirr;
|
||||||
|
unsigned int vec;
|
||||||
|
|
||||||
|
do {
|
||||||
|
xirr = icp_opal_get_xirr();
|
||||||
|
vec = xirr & 0x00ffffff;
|
||||||
|
if (vec == XICS_IRQ_SPURIOUS)
|
||||||
|
break;
|
||||||
|
if (vec == XICS_IPI) {
|
||||||
|
/* Clear pending IPI */
|
||||||
|
int cpu = smp_processor_id();
|
||||||
|
kvmppc_set_host_ipi(cpu, 0);
|
||||||
|
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
|
||||||
|
} else {
|
||||||
|
pr_err("XICS: hw interrupt 0x%x to offline cpu, "
|
||||||
|
"disabling\n", vec);
|
||||||
|
xics_mask_unknown_vec(vec);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* EOI the interrupt */
|
||||||
|
} while (opal_int_eoi(xirr) > 0);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
static const struct icp_ops icp_opal_ops = {
|
static const struct icp_ops icp_opal_ops = {
|
||||||
|
|
|
@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
|
||||||
if (target == current)
|
if (target == current)
|
||||||
save_fpu_regs();
|
save_fpu_regs();
|
||||||
|
|
||||||
|
if (MACHINE_HAS_VX)
|
||||||
|
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
|
||||||
|
else
|
||||||
|
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
|
||||||
|
|
||||||
/* If setting FPC, must validate it first. */
|
/* If setting FPC, must validate it first. */
|
||||||
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
|
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
|
||||||
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
|
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
|
||||||
|
@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
|
||||||
if (target == current)
|
if (target == current)
|
||||||
save_fpu_regs();
|
save_fpu_regs();
|
||||||
|
|
||||||
|
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||||
|
vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
|
||||||
|
|
||||||
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
|
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
|
||||||
if (rc == 0)
|
if (rc == 0)
|
||||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||||
|
|
|
@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
|
||||||
return pgste;
|
return pgste;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void ptep_xchg_commit(struct mm_struct *mm,
|
static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
|
||||||
unsigned long addr, pte_t *ptep,
|
unsigned long addr, pte_t *ptep,
|
||||||
pgste_t pgste, pte_t old, pte_t new)
|
pgste_t pgste, pte_t old, pte_t new)
|
||||||
{
|
{
|
||||||
|
@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
|
||||||
} else {
|
} else {
|
||||||
*ptep = new;
|
*ptep = new;
|
||||||
}
|
}
|
||||||
|
return old;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||||
|
@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
pgste = ptep_xchg_start(mm, addr, ptep);
|
pgste = ptep_xchg_start(mm, addr, ptep);
|
||||||
old = ptep_flush_direct(mm, addr, ptep);
|
old = ptep_flush_direct(mm, addr, ptep);
|
||||||
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return old;
|
return old;
|
||||||
}
|
}
|
||||||
|
@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
pgste = ptep_xchg_start(mm, addr, ptep);
|
pgste = ptep_xchg_start(mm, addr, ptep);
|
||||||
old = ptep_flush_lazy(mm, addr, ptep);
|
old = ptep_flush_lazy(mm, addr, ptep);
|
||||||
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return old;
|
return old;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
|
||||||
static inline void tsb_context_switch(struct mm_struct *mm)
|
static inline void tsb_context_switch(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
__tsb_context_switch(__pa(mm->pgd),
|
__tsb_context_switch(__pa(mm->pgd),
|
||||||
&mm->context.tsb_block[0],
|
&mm->context.tsb_block[MM_TSB_BASE],
|
||||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||||
(mm->context.tsb_block[1].tsb ?
|
(mm->context.tsb_block[MM_TSB_HUGE].tsb ?
|
||||||
&mm->context.tsb_block[1] :
|
&mm->context.tsb_block[MM_TSB_HUGE] :
|
||||||
NULL)
|
NULL)
|
||||||
#else
|
#else
|
||||||
NULL
|
NULL
|
||||||
#endif
|
#endif
|
||||||
, __pa(&mm->context.tsb_descr[0]));
|
, __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
|
||||||
}
|
}
|
||||||
|
|
||||||
void tsb_grow(struct mm_struct *mm,
|
void tsb_grow(struct mm_struct *mm,
|
||||||
|
|
|
@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
|
||||||
unsigned long order = get_order(size);
|
unsigned long order = get_order(size);
|
||||||
unsigned long p;
|
unsigned long p;
|
||||||
|
|
||||||
p = __get_free_pages(GFP_KERNEL, order);
|
p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||||
if (!p) {
|
if (!p) {
|
||||||
prom_printf("SUN4V: Error, cannot allocate queue.\n");
|
prom_printf("SUN4V: Error, cannot allocate queue.\n");
|
||||||
prom_halt();
|
prom_halt();
|
||||||
|
|
|
@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
|
||||||
"Linux powering off";
|
"Linux powering off";
|
||||||
static const char rebooting_msg[32] __attribute__((aligned(32))) =
|
static const char rebooting_msg[32] __attribute__((aligned(32))) =
|
||||||
"Linux rebooting";
|
"Linux rebooting";
|
||||||
static const char panicing_msg[32] __attribute__((aligned(32))) =
|
static const char panicking_msg[32] __attribute__((aligned(32))) =
|
||||||
"Linux panicing";
|
"Linux panicking";
|
||||||
|
|
||||||
static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
|
static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
|
||||||
{
|
{
|
||||||
|
@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
|
||||||
|
|
||||||
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
|
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
|
||||||
{
|
{
|
||||||
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
|
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
|
||||||
|
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
|
||||||
atomic_inc(&sun4v_resum_oflow_cnt);
|
atomic_inc(&sun4v_resum_oflow_cnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Given a set of registers, get the virtual addressi that was being accessed
|
||||||
|
* by the faulting instructions at tpc.
|
||||||
|
*/
|
||||||
|
static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned int insn;
|
||||||
|
|
||||||
|
if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
|
||||||
|
return compute_effective_address(regs, insn,
|
||||||
|
(insn >> 25) & 0x1f);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Attempt to handle non-resumable errors generated from userspace.
|
||||||
|
* Returns true if the signal was handled, false otherwise.
|
||||||
|
*/
|
||||||
|
bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
|
||||||
|
struct sun4v_error_entry *ent) {
|
||||||
|
|
||||||
|
unsigned int attrs = ent->err_attrs;
|
||||||
|
|
||||||
|
if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
|
||||||
|
unsigned long addr = ent->err_raddr;
|
||||||
|
siginfo_t info;
|
||||||
|
|
||||||
|
if (addr == ~(u64)0) {
|
||||||
|
/* This seems highly unlikely to ever occur */
|
||||||
|
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
|
||||||
|
} else {
|
||||||
|
unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
|
||||||
|
PAGE_SIZE);
|
||||||
|
|
||||||
|
/* Break the unfortunate news. */
|
||||||
|
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
|
||||||
|
addr);
|
||||||
|
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
|
||||||
|
page_cnt);
|
||||||
|
|
||||||
|
while (page_cnt-- > 0) {
|
||||||
|
if (pfn_valid(addr >> PAGE_SHIFT))
|
||||||
|
get_page(pfn_to_page(addr >> PAGE_SHIFT));
|
||||||
|
addr += PAGE_SIZE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info.si_signo = SIGKILL;
|
||||||
|
info.si_errno = 0;
|
||||||
|
info.si_trapno = 0;
|
||||||
|
force_sig_info(info.si_signo, &info, current);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (attrs & SUN4V_ERR_ATTRS_PIO) {
|
||||||
|
siginfo_t info;
|
||||||
|
|
||||||
|
info.si_signo = SIGBUS;
|
||||||
|
info.si_code = BUS_ADRERR;
|
||||||
|
info.si_addr = (void __user *)sun4v_get_vaddr(regs);
|
||||||
|
force_sig_info(info.si_signo, &info, current);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Default to doing nothing */
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
|
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
|
||||||
* Log the event, clear the first word of the entry, and die.
|
* Log the event, clear the first word of the entry, and die.
|
||||||
*/
|
*/
|
||||||
|
@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
|
||||||
|
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
|
||||||
|
if (!(regs->tstate & TSTATE_PRIV) &&
|
||||||
|
sun4v_nonresum_error_user_handled(regs, &local_copy)) {
|
||||||
|
/* DON'T PANIC: This userspace error was handled. */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
/* Check for the special PCI poke sequence. */
|
/* Check for the special PCI poke sequence. */
|
||||||
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
|
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
|
||||||
|
|
|
@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
|
||||||
const void *kbuf, const void __user *ubuf)
|
const void *kbuf, const void __user *ubuf)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct pt_regs regs;
|
struct pt_regs regs = *task_pt_regs(target);
|
||||||
|
|
||||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0,
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0,
|
||||||
sizeof(regs));
|
sizeof(regs));
|
||||||
|
|
|
@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
|
||||||
aesni_simd_skciphers[i]; i++)
|
aesni_simd_skciphers[i]; i++)
|
||||||
simd_skcipher_free(aesni_simd_skciphers[i]);
|
simd_skcipher_free(aesni_simd_skciphers[i]);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
|
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
|
||||||
aesni_simd_skciphers2[i].simd; i++)
|
if (aesni_simd_skciphers2[i].simd)
|
||||||
simd_skcipher_free(aesni_simd_skciphers2[i].simd);
|
simd_skcipher_free(aesni_simd_skciphers2[i].simd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init aesni_init(void)
|
static int __init aesni_init(void)
|
||||||
|
@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
|
||||||
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
||||||
err = PTR_ERR(simd);
|
err = PTR_ERR(simd);
|
||||||
if (IS_ERR(simd))
|
if (IS_ERR(simd))
|
||||||
goto unregister_simds;
|
continue;
|
||||||
|
|
||||||
aesni_simd_skciphers2[i].simd = simd;
|
aesni_simd_skciphers2[i].simd = simd;
|
||||||
}
|
}
|
||||||
|
|
|
@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
|
||||||
|
|
||||||
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
|
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
return rapl_pmus->pmus[topology_logical_package_id(cpu)];
|
unsigned int pkgid = topology_logical_package_id(cpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The unsigned check also catches the '-1' return value for non
|
||||||
|
* existent mappings in the topology map.
|
||||||
|
*/
|
||||||
|
return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 rapl_read_counter(struct perf_event *event)
|
static inline u64 rapl_read_counter(struct perf_event *event)
|
||||||
|
@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
|
||||||
|
|
||||||
/* must be done before validate_group */
|
/* must be done before validate_group */
|
||||||
pmu = cpu_to_rapl_pmu(event->cpu);
|
pmu = cpu_to_rapl_pmu(event->cpu);
|
||||||
|
if (!pmu)
|
||||||
|
return -EINVAL;
|
||||||
event->cpu = pmu->cpu;
|
event->cpu = pmu->cpu;
|
||||||
event->pmu_private = pmu;
|
event->pmu_private = pmu;
|
||||||
event->hw.event_base = msr;
|
event->hw.event_base = msr;
|
||||||
|
@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
|
||||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||||
int target;
|
int target;
|
||||||
|
|
||||||
|
if (!pmu) {
|
||||||
|
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
||||||
|
if (!pmu)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
raw_spin_lock_init(&pmu->lock);
|
||||||
|
INIT_LIST_HEAD(&pmu->active_list);
|
||||||
|
pmu->pmu = &rapl_pmus->pmu;
|
||||||
|
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
||||||
|
rapl_hrtimer_init(pmu);
|
||||||
|
|
||||||
|
rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if there is an online cpu in the package which collects rapl
|
* Check if there is an online cpu in the package which collects rapl
|
||||||
* events already.
|
* events already.
|
||||||
|
@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rapl_cpu_prepare(unsigned int cpu)
|
|
||||||
{
|
|
||||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
|
||||||
|
|
||||||
if (pmu)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
|
||||||
if (!pmu)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
raw_spin_lock_init(&pmu->lock);
|
|
||||||
INIT_LIST_HEAD(&pmu->active_list);
|
|
||||||
pmu->pmu = &rapl_pmus->pmu;
|
|
||||||
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
|
||||||
pmu->cpu = -1;
|
|
||||||
rapl_hrtimer_init(pmu);
|
|
||||||
rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int rapl_check_hw_unit(bool apply_quirk)
|
static int rapl_check_hw_unit(bool apply_quirk)
|
||||||
{
|
{
|
||||||
u64 msr_rapl_power_unit_bits;
|
u64 msr_rapl_power_unit_bits;
|
||||||
|
@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
|
||||||
/*
|
/*
|
||||||
* Install callbacks. Core will call them for each online cpu.
|
* Install callbacks. Core will call them for each online cpu.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
|
|
||||||
rapl_cpu_prepare, NULL);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
||||||
"perf/x86/rapl:online",
|
"perf/x86/rapl:online",
|
||||||
rapl_cpu_online, rapl_cpu_offline);
|
rapl_cpu_online, rapl_cpu_offline);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out1;
|
goto out;
|
||||||
|
|
||||||
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out2;
|
goto out1;
|
||||||
|
|
||||||
rapl_advertise();
|
rapl_advertise();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out2:
|
|
||||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
|
||||||
out1:
|
out1:
|
||||||
cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||||
out:
|
out:
|
||||||
pr_warn("Initialization failed (%d), disabled\n", ret);
|
pr_warn("Initialization failed (%d), disabled\n", ret);
|
||||||
cleanup_rapl_pmus();
|
cleanup_rapl_pmus();
|
||||||
|
@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
|
||||||
static void __exit intel_rapl_exit(void)
|
static void __exit intel_rapl_exit(void)
|
||||||
{
|
{
|
||||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||||
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
|
|
||||||
perf_pmu_unregister(&rapl_pmus->pmu);
|
perf_pmu_unregister(&rapl_pmus->pmu);
|
||||||
cleanup_rapl_pmus();
|
cleanup_rapl_pmus();
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
|
||||||
|
|
||||||
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
||||||
{
|
{
|
||||||
return pmu->boxes[topology_logical_package_id(cpu)];
|
unsigned int pkgid = topology_logical_package_id(cpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The unsigned check also catches the '-1' return value for non
|
||||||
|
* existent mappings in the topology map.
|
||||||
|
*/
|
||||||
|
return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
||||||
|
@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
|
||||||
pmu->registered = false;
|
pmu->registered = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
|
|
||||||
{
|
|
||||||
struct intel_uncore_pmu *pmu = type->pmus;
|
|
||||||
struct intel_uncore_box *box;
|
|
||||||
int i, pkg;
|
|
||||||
|
|
||||||
if (pmu) {
|
|
||||||
pkg = topology_physical_package_id(cpu);
|
|
||||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
||||||
box = pmu->boxes[pkg];
|
|
||||||
if (box)
|
|
||||||
uncore_box_exit(box);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void uncore_exit_boxes(void *dummy)
|
|
||||||
{
|
|
||||||
struct intel_uncore_type **types;
|
|
||||||
|
|
||||||
for (types = uncore_msr_uncores; *types; types++)
|
|
||||||
__uncore_exit_boxes(*types++, smp_processor_id());
|
|
||||||
}
|
|
||||||
|
|
||||||
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
|
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
|
||||||
{
|
{
|
||||||
int pkg;
|
int pkg;
|
||||||
|
@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uncore_cpu_dying(unsigned int cpu)
|
|
||||||
{
|
|
||||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
|
||||||
struct intel_uncore_pmu *pmu;
|
|
||||||
struct intel_uncore_box *box;
|
|
||||||
int i, pkg;
|
|
||||||
|
|
||||||
pkg = topology_logical_package_id(cpu);
|
|
||||||
for (; *types; types++) {
|
|
||||||
type = *types;
|
|
||||||
pmu = type->pmus;
|
|
||||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
||||||
box = pmu->boxes[pkg];
|
|
||||||
if (box && atomic_dec_return(&box->refcnt) == 0)
|
|
||||||
uncore_box_exit(box);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int first_init;
|
|
||||||
|
|
||||||
static int uncore_cpu_starting(unsigned int cpu)
|
|
||||||
{
|
|
||||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
|
||||||
struct intel_uncore_pmu *pmu;
|
|
||||||
struct intel_uncore_box *box;
|
|
||||||
int i, pkg, ncpus = 1;
|
|
||||||
|
|
||||||
if (first_init) {
|
|
||||||
/*
|
|
||||||
* On init we get the number of online cpus in the package
|
|
||||||
* and set refcount for all of them.
|
|
||||||
*/
|
|
||||||
ncpus = cpumask_weight(topology_core_cpumask(cpu));
|
|
||||||
}
|
|
||||||
|
|
||||||
pkg = topology_logical_package_id(cpu);
|
|
||||||
for (; *types; types++) {
|
|
||||||
type = *types;
|
|
||||||
pmu = type->pmus;
|
|
||||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
||||||
box = pmu->boxes[pkg];
|
|
||||||
if (!box)
|
|
||||||
continue;
|
|
||||||
/* The first cpu on a package activates the box */
|
|
||||||
if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
|
|
||||||
uncore_box_init(box);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int uncore_cpu_prepare(unsigned int cpu)
|
|
||||||
{
|
|
||||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
|
||||||
struct intel_uncore_pmu *pmu;
|
|
||||||
struct intel_uncore_box *box;
|
|
||||||
int i, pkg;
|
|
||||||
|
|
||||||
pkg = topology_logical_package_id(cpu);
|
|
||||||
for (; *types; types++) {
|
|
||||||
type = *types;
|
|
||||||
pmu = type->pmus;
|
|
||||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
||||||
if (pmu->boxes[pkg])
|
|
||||||
continue;
|
|
||||||
/* First cpu of a package allocates the box */
|
|
||||||
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
|
||||||
if (!box)
|
|
||||||
return -ENOMEM;
|
|
||||||
box->pmu = pmu;
|
|
||||||
box->pkgid = pkg;
|
|
||||||
pmu->boxes[pkg] = box;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
|
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
|
||||||
int new_cpu)
|
int new_cpu)
|
||||||
{
|
{
|
||||||
|
@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
|
||||||
|
|
||||||
static int uncore_event_cpu_offline(unsigned int cpu)
|
static int uncore_event_cpu_offline(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int target;
|
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||||
|
struct intel_uncore_pmu *pmu;
|
||||||
|
struct intel_uncore_box *box;
|
||||||
|
int i, pkg, target;
|
||||||
|
|
||||||
/* Check if exiting cpu is used for collecting uncore events */
|
/* Check if exiting cpu is used for collecting uncore events */
|
||||||
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
||||||
return 0;
|
goto unref;
|
||||||
|
|
||||||
/* Find a new cpu to collect uncore events */
|
/* Find a new cpu to collect uncore events */
|
||||||
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
||||||
|
|
||||||
|
@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
|
||||||
|
|
||||||
uncore_change_context(uncore_msr_uncores, cpu, target);
|
uncore_change_context(uncore_msr_uncores, cpu, target);
|
||||||
uncore_change_context(uncore_pci_uncores, cpu, target);
|
uncore_change_context(uncore_pci_uncores, cpu, target);
|
||||||
|
|
||||||
|
unref:
|
||||||
|
/* Clear the references */
|
||||||
|
pkg = topology_logical_package_id(cpu);
|
||||||
|
for (; *types; types++) {
|
||||||
|
type = *types;
|
||||||
|
pmu = type->pmus;
|
||||||
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||||
|
box = pmu->boxes[pkg];
|
||||||
|
if (box && atomic_dec_return(&box->refcnt) == 0)
|
||||||
|
uncore_box_exit(box);
|
||||||
|
}
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int allocate_boxes(struct intel_uncore_type **types,
|
||||||
|
unsigned int pkg, unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct intel_uncore_box *box, *tmp;
|
||||||
|
struct intel_uncore_type *type;
|
||||||
|
struct intel_uncore_pmu *pmu;
|
||||||
|
LIST_HEAD(allocated);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* Try to allocate all required boxes */
|
||||||
|
for (; *types; types++) {
|
||||||
|
type = *types;
|
||||||
|
pmu = type->pmus;
|
||||||
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||||
|
if (pmu->boxes[pkg])
|
||||||
|
continue;
|
||||||
|
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
||||||
|
if (!box)
|
||||||
|
goto cleanup;
|
||||||
|
box->pmu = pmu;
|
||||||
|
box->pkgid = pkg;
|
||||||
|
list_add(&box->active_list, &allocated);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Install them in the pmus */
|
||||||
|
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
||||||
|
list_del_init(&box->active_list);
|
||||||
|
box->pmu->boxes[pkg] = box;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
||||||
|
list_del_init(&box->active_list);
|
||||||
|
kfree(box);
|
||||||
|
}
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
static int uncore_event_cpu_online(unsigned int cpu)
|
static int uncore_event_cpu_online(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int target;
|
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||||
|
struct intel_uncore_pmu *pmu;
|
||||||
|
struct intel_uncore_box *box;
|
||||||
|
int i, ret, pkg, target;
|
||||||
|
|
||||||
|
pkg = topology_logical_package_id(cpu);
|
||||||
|
ret = allocate_boxes(types, pkg, cpu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
for (; *types; types++) {
|
||||||
|
type = *types;
|
||||||
|
pmu = type->pmus;
|
||||||
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||||
|
box = pmu->boxes[pkg];
|
||||||
|
if (!box && atomic_inc_return(&box->refcnt) == 1)
|
||||||
|
uncore_box_init(box);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if there is an online cpu in the package
|
* Check if there is an online cpu in the package
|
||||||
|
@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
|
||||||
if (cret && pret)
|
if (cret && pret)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
/*
|
/* Install hotplug callbacks to setup the targets for each package */
|
||||||
* Install callbacks. Core will call them for each online cpu.
|
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
||||||
*
|
"perf/x86/intel/uncore:online",
|
||||||
* The first online cpu of each package allocates and takes
|
uncore_event_cpu_online,
|
||||||
* the refcounts for all other online cpus in that package.
|
uncore_event_cpu_offline);
|
||||||
* If msrs are not enabled no allocation is required and
|
if (ret)
|
||||||
* uncore_cpu_prepare() is not called for each online cpu.
|
goto err;
|
||||||
*/
|
|
||||||
if (!cret) {
|
|
||||||
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
|
|
||||||
"perf/x86/intel/uncore:prepare",
|
|
||||||
uncore_cpu_prepare, NULL);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
} else {
|
|
||||||
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
|
|
||||||
"perf/x86/intel/uncore:prepare",
|
|
||||||
uncore_cpu_prepare, NULL);
|
|
||||||
}
|
|
||||||
first_init = 1;
|
|
||||||
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
|
|
||||||
"perf/x86/uncore:starting",
|
|
||||||
uncore_cpu_starting, uncore_cpu_dying);
|
|
||||||
first_init = 0;
|
|
||||||
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
|
||||||
"perf/x86/uncore:online",
|
|
||||||
uncore_event_cpu_online, uncore_event_cpu_offline);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
/* Undo box->init_box() */
|
|
||||||
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
|
|
||||||
uncore_types_exit(uncore_msr_uncores);
|
uncore_types_exit(uncore_msr_uncores);
|
||||||
uncore_pci_exit();
|
uncore_pci_exit();
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
|
||||||
|
|
||||||
static void __exit intel_uncore_exit(void)
|
static void __exit intel_uncore_exit(void)
|
||||||
{
|
{
|
||||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
||||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
|
|
||||||
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
|
|
||||||
uncore_types_exit(uncore_msr_uncores);
|
uncore_types_exit(uncore_msr_uncores);
|
||||||
uncore_pci_exit();
|
uncore_pci_exit();
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
|
||||||
extern void load_ucode_ap(void);
|
extern void load_ucode_ap(void);
|
||||||
void reload_early_microcode(void);
|
void reload_early_microcode(void);
|
||||||
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
|
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
|
||||||
|
extern bool initrd_gone;
|
||||||
#else
|
#else
|
||||||
static inline int __init microcode_init(void) { return 0; };
|
static inline int __init microcode_init(void) { return 0; };
|
||||||
static inline void __init load_ucode_bsp(void) { }
|
static inline void __init load_ucode_bsp(void) { }
|
||||||
|
|
|
@ -104,6 +104,7 @@ struct cpuinfo_x86 {
|
||||||
__u8 x86_phys_bits;
|
__u8 x86_phys_bits;
|
||||||
/* CPUID returned core id bits: */
|
/* CPUID returned core id bits: */
|
||||||
__u8 x86_coreid_bits;
|
__u8 x86_coreid_bits;
|
||||||
|
__u8 cu_id;
|
||||||
/* Max extended CPUID function supported: */
|
/* Max extended CPUID function supported: */
|
||||||
__u32 extended_cpuid_level;
|
__u32 extended_cpuid_level;
|
||||||
/* Maximum supported CPUID level, -1=no CPUID: */
|
/* Maximum supported CPUID level, -1=no CPUID: */
|
||||||
|
|
|
@ -1875,7 +1875,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
|
||||||
.irq_ack = irq_chip_ack_parent,
|
.irq_ack = irq_chip_ack_parent,
|
||||||
.irq_eoi = ioapic_ack_level,
|
.irq_eoi = ioapic_ack_level,
|
||||||
.irq_set_affinity = ioapic_set_affinity,
|
.irq_set_affinity = ioapic_set_affinity,
|
||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1887,7 +1886,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
|
||||||
.irq_ack = irq_chip_ack_parent,
|
.irq_ack = irq_chip_ack_parent,
|
||||||
.irq_eoi = ioapic_ir_ack_level,
|
.irq_eoi = ioapic_ir_ack_level,
|
||||||
.irq_set_affinity = ioapic_set_affinity,
|
.irq_set_affinity = ioapic_set_affinity,
|
||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2117,6 +2115,7 @@ static inline void __init check_timer(void)
|
||||||
if (idx != -1 && irq_trigger(idx))
|
if (idx != -1 && irq_trigger(idx))
|
||||||
unmask_ioapic_irq(irq_get_chip_data(0));
|
unmask_ioapic_irq(irq_get_chip_data(0));
|
||||||
}
|
}
|
||||||
|
irq_domain_deactivate_irq(irq_data);
|
||||||
irq_domain_activate_irq(irq_data);
|
irq_domain_activate_irq(irq_data);
|
||||||
if (timer_irq_works()) {
|
if (timer_irq_works()) {
|
||||||
if (disable_timer_pin_1 > 0)
|
if (disable_timer_pin_1 > 0)
|
||||||
|
@ -2138,6 +2137,7 @@ static inline void __init check_timer(void)
|
||||||
* legacy devices should be connected to IO APIC #0
|
* legacy devices should be connected to IO APIC #0
|
||||||
*/
|
*/
|
||||||
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
|
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
|
||||||
|
irq_domain_deactivate_irq(irq_data);
|
||||||
irq_domain_activate_irq(irq_data);
|
irq_domain_activate_irq(irq_data);
|
||||||
legacy_pic->unmask(0);
|
legacy_pic->unmask(0);
|
||||||
if (timer_irq_works()) {
|
if (timer_irq_works()) {
|
||||||
|
|
|
@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
||||||
|
|
||||||
/* get information required for multi-node processors */
|
/* get information required for multi-node processors */
|
||||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||||
|
u32 eax, ebx, ecx, edx;
|
||||||
|
|
||||||
node_id = cpuid_ecx(0x8000001e) & 7;
|
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
|
||||||
|
|
||||||
|
node_id = ecx & 0xff;
|
||||||
|
smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
|
||||||
|
|
||||||
|
if (c->x86 == 0x15)
|
||||||
|
c->cu_id = ebx & 0xff;
|
||||||
|
|
||||||
|
if (c->x86 >= 0x17) {
|
||||||
|
c->cpu_core_id = ebx & 0xff;
|
||||||
|
|
||||||
|
if (smp_num_siblings > 1)
|
||||||
|
c->x86_max_cores /= smp_num_siblings;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We may have multiple LLCs if L3 caches exist, so check if we
|
* We may have multiple LLCs if L3 caches exist, so check if we
|
||||||
|
|
|
@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
||||||
c->x86_model_id[0] = '\0'; /* Unset */
|
c->x86_model_id[0] = '\0'; /* Unset */
|
||||||
c->x86_max_cores = 1;
|
c->x86_max_cores = 1;
|
||||||
c->x86_coreid_bits = 0;
|
c->x86_coreid_bits = 0;
|
||||||
|
c->cu_id = 0xff;
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
c->x86_clflush_size = 64;
|
c->x86_clflush_size = 64;
|
||||||
c->x86_phys_bits = 36;
|
c->x86_phys_bits = 36;
|
||||||
|
|
|
@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
|
||||||
|
|
||||||
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
|
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
|
||||||
|
|
||||||
static void __restart_timer(struct timer_list *t, unsigned long interval)
|
static void __start_timer(struct timer_list *t, unsigned long interval)
|
||||||
{
|
{
|
||||||
unsigned long when = jiffies + interval;
|
unsigned long when = jiffies + interval;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
if (timer_pending(t)) {
|
if (!timer_pending(t) || time_before(when, t->expires))
|
||||||
if (time_before(when, t->expires))
|
mod_timer(t, round_jiffies(when));
|
||||||
mod_timer(t, when);
|
|
||||||
} else {
|
|
||||||
t->expires = round_jiffies(when);
|
|
||||||
add_timer_on(t, smp_processor_id());
|
|
||||||
}
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
|
||||||
|
|
||||||
done:
|
done:
|
||||||
__this_cpu_write(mce_next_interval, iv);
|
__this_cpu_write(mce_next_interval, iv);
|
||||||
__restart_timer(t, iv);
|
__start_timer(t, iv);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
|
||||||
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
||||||
unsigned long iv = __this_cpu_read(mce_next_interval);
|
unsigned long iv = __this_cpu_read(mce_next_interval);
|
||||||
|
|
||||||
__restart_timer(t, interval);
|
__start_timer(t, interval);
|
||||||
|
|
||||||
if (interval < iv)
|
if (interval < iv)
|
||||||
__this_cpu_write(mce_next_interval, interval);
|
__this_cpu_write(mce_next_interval, interval);
|
||||||
|
@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mce_start_timer(unsigned int cpu, struct timer_list *t)
|
static void mce_start_timer(struct timer_list *t)
|
||||||
{
|
{
|
||||||
unsigned long iv = check_interval * HZ;
|
unsigned long iv = check_interval * HZ;
|
||||||
|
|
||||||
if (mca_cfg.ignore_ce || !iv)
|
if (mca_cfg.ignore_ce || !iv)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
per_cpu(mce_next_interval, cpu) = iv;
|
this_cpu_write(mce_next_interval, iv);
|
||||||
|
__start_timer(t, iv);
|
||||||
t->expires = round_jiffies(jiffies + iv);
|
|
||||||
add_timer_on(t, cpu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __mcheck_cpu_setup_timer(void)
|
static void __mcheck_cpu_setup_timer(void)
|
||||||
|
@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
|
|
||||||
setup_pinned_timer(t, mce_timer_fn, cpu);
|
setup_pinned_timer(t, mce_timer_fn, cpu);
|
||||||
mce_start_timer(cpu, t);
|
mce_start_timer(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle unconfigured int18 (should never happen) */
|
/* Handle unconfigured int18 (should never happen) */
|
||||||
|
@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
|
||||||
|
|
||||||
static int mce_cpu_online(unsigned int cpu)
|
static int mce_cpu_online(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct timer_list *t = &per_cpu(mce_timer, cpu);
|
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mce_device_create(cpu);
|
mce_device_create(cpu);
|
||||||
|
@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
mce_reenable_cpu();
|
mce_reenable_cpu();
|
||||||
mce_start_timer(cpu, t);
|
mce_start_timer(t);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mce_cpu_pre_down(unsigned int cpu)
|
static int mce_cpu_pre_down(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct timer_list *t = &per_cpu(mce_timer, cpu);
|
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
||||||
|
|
||||||
mce_disable_cpu();
|
mce_disable_cpu();
|
||||||
del_timer_sync(t);
|
del_timer_sync(t);
|
||||||
|
|
|
@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
|
||||||
reget:
|
reget:
|
||||||
if (!get_builtin_microcode(&cp, family)) {
|
if (!get_builtin_microcode(&cp, family)) {
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
cp = find_cpio_data(ucode_path, (void *)initrd_start,
|
if (!initrd_gone)
|
||||||
initrd_end - initrd_start, NULL);
|
cp = find_cpio_data(ucode_path, (void *)initrd_start,
|
||||||
|
initrd_end - initrd_start, NULL);
|
||||||
#endif
|
#endif
|
||||||
if (!(cp.data && cp.size)) {
|
if (!(cp.data && cp.size)) {
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -46,6 +46,8 @@
|
||||||
static struct microcode_ops *microcode_ops;
|
static struct microcode_ops *microcode_ops;
|
||||||
static bool dis_ucode_ldr = true;
|
static bool dis_ucode_ldr = true;
|
||||||
|
|
||||||
|
bool initrd_gone;
|
||||||
|
|
||||||
LIST_HEAD(microcode_cache);
|
LIST_HEAD(microcode_cache);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -190,21 +192,24 @@ void load_ucode_ap(void)
|
||||||
static int __init save_microcode_in_initrd(void)
|
static int __init save_microcode_in_initrd(void)
|
||||||
{
|
{
|
||||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||||
|
int ret = -EINVAL;
|
||||||
|
|
||||||
switch (c->x86_vendor) {
|
switch (c->x86_vendor) {
|
||||||
case X86_VENDOR_INTEL:
|
case X86_VENDOR_INTEL:
|
||||||
if (c->x86 >= 6)
|
if (c->x86 >= 6)
|
||||||
return save_microcode_in_initrd_intel();
|
ret = save_microcode_in_initrd_intel();
|
||||||
break;
|
break;
|
||||||
case X86_VENDOR_AMD:
|
case X86_VENDOR_AMD:
|
||||||
if (c->x86 >= 0x10)
|
if (c->x86 >= 0x10)
|
||||||
return save_microcode_in_initrd_amd(c->x86);
|
ret = save_microcode_in_initrd_amd(c->x86);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return -EINVAL;
|
initrd_gone = true;
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
|
struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
|
||||||
|
@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
|
||||||
* has the virtual address of the beginning of the initrd. It also
|
* has the virtual address of the beginning of the initrd. It also
|
||||||
* possibly relocates the ramdisk. In either case, initrd_start contains
|
* possibly relocates the ramdisk. In either case, initrd_start contains
|
||||||
* the updated address so use that instead.
|
* the updated address so use that instead.
|
||||||
|
*
|
||||||
|
* initrd_gone is for the hotplug case where we've thrown out initrd
|
||||||
|
* already.
|
||||||
*/
|
*/
|
||||||
if (!use_pa && initrd_start)
|
if (!use_pa) {
|
||||||
start = initrd_start;
|
if (initrd_gone)
|
||||||
|
return (struct cpio_data){ NULL, 0, "" };
|
||||||
|
if (initrd_start)
|
||||||
|
start = initrd_start;
|
||||||
|
}
|
||||||
|
|
||||||
return find_cpio_data(path, (void *)start, size, NULL);
|
return find_cpio_data(path, (void *)start, size, NULL);
|
||||||
#else /* !CONFIG_BLK_DEV_INITRD */
|
#else /* !CONFIG_BLK_DEV_INITRD */
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue