Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Rejecting non-native endian BTF overlapped with the addition of support for it. The rest were more simple overlapping changes, except the renesas ravb binding update, which had to follow a file move as well as a YAML conversion. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8b0308fe31
|
@ -1324,15 +1324,26 @@ PAGE_SIZE multiple when read back.
|
||||||
pgmajfault
|
pgmajfault
|
||||||
Number of major page faults incurred
|
Number of major page faults incurred
|
||||||
|
|
||||||
workingset_refault
|
workingset_refault_anon
|
||||||
Number of refaults of previously evicted pages
|
Number of refaults of previously evicted anonymous pages.
|
||||||
|
|
||||||
workingset_activate
|
workingset_refault_file
|
||||||
Number of refaulted pages that were immediately activated
|
Number of refaults of previously evicted file pages.
|
||||||
|
|
||||||
workingset_restore
|
workingset_activate_anon
|
||||||
Number of restored pages which have been detected as an active
|
Number of refaulted anonymous pages that were immediately
|
||||||
workingset before they got reclaimed.
|
activated.
|
||||||
|
|
||||||
|
workingset_activate_file
|
||||||
|
Number of refaulted file pages that were immediately activated.
|
||||||
|
|
||||||
|
workingset_restore_anon
|
||||||
|
Number of restored anonymous pages which have been detected as
|
||||||
|
an active workingset before they got reclaimed.
|
||||||
|
|
||||||
|
workingset_restore_file
|
||||||
|
Number of restored file pages which have been detected as an
|
||||||
|
active workingset before they got reclaimed.
|
||||||
|
|
||||||
workingset_nodereclaim
|
workingset_nodereclaim
|
||||||
Number of times a shadow node has been reclaimed
|
Number of times a shadow node has been reclaimed
|
||||||
|
|
|
@ -67,7 +67,7 @@ Parameters::
|
||||||
the value passed in <key_size>.
|
the value passed in <key_size>.
|
||||||
|
|
||||||
<key_type>
|
<key_type>
|
||||||
Either 'logon' or 'user' kernel key type.
|
Either 'logon', 'user' or 'encrypted' kernel key type.
|
||||||
|
|
||||||
<key_description>
|
<key_description>
|
||||||
The kernel keyring key description crypt target should look for
|
The kernel keyring key description crypt target should look for
|
||||||
|
@ -121,6 +121,14 @@ submit_from_crypt_cpus
|
||||||
thread because it benefits CFQ to have writes submitted using the
|
thread because it benefits CFQ to have writes submitted using the
|
||||||
same context.
|
same context.
|
||||||
|
|
||||||
|
no_read_workqueue
|
||||||
|
Bypass dm-crypt internal workqueue and process read requests synchronously.
|
||||||
|
|
||||||
|
no_write_workqueue
|
||||||
|
Bypass dm-crypt internal workqueue and process write requests synchronously.
|
||||||
|
This option is automatically enabled for host-managed zoned block devices
|
||||||
|
(e.g. host-managed SMR hard-disks).
|
||||||
|
|
||||||
integrity:<bytes>:<type>
|
integrity:<bytes>:<type>
|
||||||
The device requires additional <bytes> metadata per-sector stored
|
The device requires additional <bytes> metadata per-sector stored
|
||||||
in per-bio integrity structure. This metadata must by provided
|
in per-bio integrity structure. This metadata must by provided
|
||||||
|
|
|
@ -690,7 +690,7 @@ which of the two parameters is added to the kernel command line. In the
|
||||||
instruction of the CPUs (which, as a rule, suspends the execution of the program
|
instruction of the CPUs (which, as a rule, suspends the execution of the program
|
||||||
and causes the hardware to attempt to enter the shallowest available idle state)
|
and causes the hardware to attempt to enter the shallowest available idle state)
|
||||||
for this purpose, and if ``idle=poll`` is used, idle CPUs will execute a
|
for this purpose, and if ``idle=poll`` is used, idle CPUs will execute a
|
||||||
more or less ``lightweight'' sequence of instructions in a tight loop. [Note
|
more or less "lightweight" sequence of instructions in a tight loop. [Note
|
||||||
that using ``idle=poll`` is somewhat drastic in many cases, as preventing idle
|
that using ``idle=poll`` is somewhat drastic in many cases, as preventing idle
|
||||||
CPUs from saving almost any energy at all may not be the only effect of it.
|
CPUs from saving almost any energy at all may not be the only effect of it.
|
||||||
For example, on Intel hardware it effectively prevents CPUs from using
|
For example, on Intel hardware it effectively prevents CPUs from using
|
||||||
|
|
|
@ -23,7 +23,7 @@ properties:
|
||||||
compatible:
|
compatible:
|
||||||
items:
|
items:
|
||||||
- const: raspberrypi,bcm2835-firmware
|
- const: raspberrypi,bcm2835-firmware
|
||||||
- const: simple-bus
|
- const: simple-mfd
|
||||||
|
|
||||||
mboxes:
|
mboxes:
|
||||||
$ref: '/schemas/types.yaml#/definitions/phandle'
|
$ref: '/schemas/types.yaml#/definitions/phandle'
|
||||||
|
@ -57,7 +57,7 @@ required:
|
||||||
examples:
|
examples:
|
||||||
- |
|
- |
|
||||||
firmware {
|
firmware {
|
||||||
compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
|
compatible = "raspberrypi,bcm2835-firmware", "simple-mfd";
|
||||||
mboxes = <&mailbox>;
|
mboxes = <&mailbox>;
|
||||||
|
|
||||||
firmware_clocks: clocks {
|
firmware_clocks: clocks {
|
||||||
|
|
|
@ -67,7 +67,7 @@ examples:
|
||||||
|
|
||||||
main_crypto: crypto@4e00000 {
|
main_crypto: crypto@4e00000 {
|
||||||
compatible = "ti,j721-sa2ul";
|
compatible = "ti,j721-sa2ul";
|
||||||
reg = <0x0 0x4e00000 0x0 0x1200>;
|
reg = <0x4e00000 0x1200>;
|
||||||
power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>;
|
power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>;
|
||||||
dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
|
dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
|
||||||
<&main_udmap 0x4001>;
|
<&main_udmap 0x4001>;
|
||||||
|
|
|
@ -145,10 +145,10 @@ examples:
|
||||||
|
|
||||||
display@fd4a0000 {
|
display@fd4a0000 {
|
||||||
compatible = "xlnx,zynqmp-dpsub-1.7";
|
compatible = "xlnx,zynqmp-dpsub-1.7";
|
||||||
reg = <0x0 0xfd4a0000 0x0 0x1000>,
|
reg = <0xfd4a0000 0x1000>,
|
||||||
<0x0 0xfd4aa000 0x0 0x1000>,
|
<0xfd4aa000 0x1000>,
|
||||||
<0x0 0xfd4ab000 0x0 0x1000>,
|
<0xfd4ab000 0x1000>,
|
||||||
<0x0 0xfd4ac000 0x0 0x1000>;
|
<0xfd4ac000 0x1000>;
|
||||||
reg-names = "dp", "blend", "av_buf", "aud";
|
reg-names = "dp", "blend", "av_buf", "aud";
|
||||||
interrupts = <0 119 4>;
|
interrupts = <0 119 4>;
|
||||||
interrupt-parent = <&gic>;
|
interrupt-parent = <&gic>;
|
||||||
|
|
|
@ -57,7 +57,7 @@ examples:
|
||||||
|
|
||||||
dma: dma-controller@fd4c0000 {
|
dma: dma-controller@fd4c0000 {
|
||||||
compatible = "xlnx,zynqmp-dpdma";
|
compatible = "xlnx,zynqmp-dpdma";
|
||||||
reg = <0x0 0xfd4c0000 0x0 0x1000>;
|
reg = <0xfd4c0000 0x1000>;
|
||||||
interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-parent = <&gic>;
|
interrupt-parent = <&gic>;
|
||||||
clocks = <&dpdma_clk>;
|
clocks = <&dpdma_clk>;
|
||||||
|
|
|
@ -20,8 +20,9 @@ Required properties:
|
||||||
- gpio-controller : Marks the device node as a GPIO controller
|
- gpio-controller : Marks the device node as a GPIO controller
|
||||||
- interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt
|
- interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt
|
||||||
- interrupt-controller : Mark the GPIO controller as an interrupt-controller
|
- interrupt-controller : Mark the GPIO controller as an interrupt-controller
|
||||||
- ngpios : number of GPIO lines, see gpio.txt
|
- ngpios : number of *hardware* GPIO lines, see gpio.txt. This will expose
|
||||||
(should be multiple of 8, up to 80 pins)
|
2 software GPIOs per hardware GPIO: one for hardware input, one for hardware
|
||||||
|
output. Up to 80 pins, must be a multiple of 8.
|
||||||
- clocks : A phandle to the APB clock for SGPM clock division
|
- clocks : A phandle to the APB clock for SGPM clock division
|
||||||
- bus-frequency : SGPM CLK frequency
|
- bus-frequency : SGPM CLK frequency
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ properties:
|
||||||
const: 0
|
const: 0
|
||||||
|
|
||||||
patternProperties:
|
patternProperties:
|
||||||
"^multi-led[0-9a-f]$":
|
"^multi-led@[0-9a-b]$":
|
||||||
type: object
|
type: object
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: leds-class-multicolor.yaml#
|
- $ref: leds-class-multicolor.yaml#
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
* Sony 1/2.5-Inch 8.51Mp CMOS Digital Image Sensor
|
|
||||||
|
|
||||||
The Sony imx274 is a 1/2.5-inch CMOS active pixel digital image sensor with
|
|
||||||
an active array size of 3864H x 2202V. It is programmable through I2C
|
|
||||||
interface. The I2C address is fixed to 0x1a as per sensor data sheet.
|
|
||||||
Image data is sent through MIPI CSI-2, which is configured as 4 lanes
|
|
||||||
at 1440 Mbps.
|
|
||||||
|
|
||||||
|
|
||||||
Required Properties:
|
|
||||||
- compatible: value should be "sony,imx274" for imx274 sensor
|
|
||||||
- reg: I2C bus address of the device
|
|
||||||
|
|
||||||
Optional Properties:
|
|
||||||
- reset-gpios: Sensor reset GPIO
|
|
||||||
- clocks: Reference to the input clock.
|
|
||||||
- clock-names: Should be "inck".
|
|
||||||
- VANA-supply: Sensor 2.8v analog supply.
|
|
||||||
- VDIG-supply: Sensor 1.8v digital core supply.
|
|
||||||
- VDDL-supply: Sensor digital IO 1.2v supply.
|
|
||||||
|
|
||||||
The imx274 device node should contain one 'port' child node with
|
|
||||||
an 'endpoint' subnode. For further reading on port node refer to
|
|
||||||
Documentation/devicetree/bindings/media/video-interfaces.txt.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
sensor@1a {
|
|
||||||
compatible = "sony,imx274";
|
|
||||||
reg = <0x1a>;
|
|
||||||
#address-cells = <1>;
|
|
||||||
#size-cells = <0>;
|
|
||||||
reset-gpios = <&gpio_sensor 0 0>;
|
|
||||||
port {
|
|
||||||
sensor_out: endpoint {
|
|
||||||
remote-endpoint = <&csiss_in>;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
|
@ -0,0 +1,76 @@
|
||||||
|
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||||
|
%YAML 1.2
|
||||||
|
---
|
||||||
|
$id: http://devicetree.org/schemas/media/i2c/sony,imx274.yaml#
|
||||||
|
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||||
|
|
||||||
|
title: Sony 1/2.5-Inch 8.51MP CMOS Digital Image Sensor
|
||||||
|
|
||||||
|
maintainers:
|
||||||
|
- Leon Luo <leonl@leopardimaging.com>
|
||||||
|
|
||||||
|
description: |
|
||||||
|
The Sony IMX274 is a 1/2.5-inch CMOS active pixel digital image sensor with an
|
||||||
|
active array size of 3864H x 2202V. It is programmable through I2C interface.
|
||||||
|
Image data is sent through MIPI CSI-2, which is configured as 4 lanes at 1440
|
||||||
|
Mbps.
|
||||||
|
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
const: sony,imx274
|
||||||
|
|
||||||
|
reg:
|
||||||
|
const: 0x1a
|
||||||
|
|
||||||
|
reset-gpios:
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
clocks:
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
clock-names:
|
||||||
|
const: inck
|
||||||
|
|
||||||
|
vana-supply:
|
||||||
|
description: Sensor 2.8 V analog supply.
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
vdig-supply:
|
||||||
|
description: Sensor 1.8 V digital core supply.
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
vddl-supply:
|
||||||
|
description: Sensor digital IO 1.2 V supply.
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
port:
|
||||||
|
type: object
|
||||||
|
description: Output video port. See ../video-interfaces.txt.
|
||||||
|
|
||||||
|
required:
|
||||||
|
- compatible
|
||||||
|
- reg
|
||||||
|
- port
|
||||||
|
|
||||||
|
additionalProperties: false
|
||||||
|
|
||||||
|
examples:
|
||||||
|
- |
|
||||||
|
i2c0 {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
imx274: camera-sensor@1a {
|
||||||
|
compatible = "sony,imx274";
|
||||||
|
reg = <0x1a>;
|
||||||
|
reset-gpios = <&gpio_sensor 0 0>;
|
||||||
|
|
||||||
|
port {
|
||||||
|
sensor_out: endpoint {
|
||||||
|
remote-endpoint = <&csiss_in>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
...
|
|
@ -31,6 +31,7 @@ properties:
|
||||||
- renesas,etheravb-r8a774a1 # RZ/G2M
|
- renesas,etheravb-r8a774a1 # RZ/G2M
|
||||||
- renesas,etheravb-r8a774b1 # RZ/G2N
|
- renesas,etheravb-r8a774b1 # RZ/G2N
|
||||||
- renesas,etheravb-r8a774c0 # RZ/G2E
|
- renesas,etheravb-r8a774c0 # RZ/G2E
|
||||||
|
- renesas,etheravb-r8a774e1 # RZ/G2H
|
||||||
- renesas,etheravb-r8a7795 # R-Car H3
|
- renesas,etheravb-r8a7795 # R-Car H3
|
||||||
- renesas,etheravb-r8a7796 # R-Car M3-W
|
- renesas,etheravb-r8a7796 # R-Car M3-W
|
||||||
- renesas,etheravb-r8a77961 # R-Car M3-W+
|
- renesas,etheravb-r8a77961 # R-Car M3-W+
|
||||||
|
|
|
@ -39,10 +39,10 @@ which can help simplify cross compiling. ::
|
||||||
ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang
|
ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang
|
||||||
|
|
||||||
``CROSS_COMPILE`` is not used to prefix the Clang compiler binary, instead
|
``CROSS_COMPILE`` is not used to prefix the Clang compiler binary, instead
|
||||||
``CROSS_COMPILE`` is used to set a command line flag: ``--target <triple>``. For
|
``CROSS_COMPILE`` is used to set a command line flag: ``--target=<triple>``. For
|
||||||
example: ::
|
example: ::
|
||||||
|
|
||||||
clang --target aarch64-linux-gnu foo.c
|
clang --target=aarch64-linux-gnu foo.c
|
||||||
|
|
||||||
LLVM Utilities
|
LLVM Utilities
|
||||||
--------------
|
--------------
|
||||||
|
|
|
@ -701,23 +701,6 @@ Memory Consistency Flags
|
||||||
:stub-columns: 0
|
:stub-columns: 0
|
||||||
:widths: 3 1 4
|
:widths: 3 1 4
|
||||||
|
|
||||||
* .. _`V4L2-FLAG-MEMORY-NON-CONSISTENT`:
|
|
||||||
|
|
||||||
- ``V4L2_FLAG_MEMORY_NON_CONSISTENT``
|
|
||||||
- 0x00000001
|
|
||||||
- A buffer is allocated either in consistent (it will be automatically
|
|
||||||
coherent between the CPU and the bus) or non-consistent memory. The
|
|
||||||
latter can provide performance gains, for instance the CPU cache
|
|
||||||
sync/flush operations can be avoided if the buffer is accessed by the
|
|
||||||
corresponding device only and the CPU does not read/write to/from that
|
|
||||||
buffer. However, this requires extra care from the driver -- it must
|
|
||||||
guarantee memory consistency by issuing a cache flush/sync when
|
|
||||||
consistency is needed. If this flag is set V4L2 will attempt to
|
|
||||||
allocate the buffer in non-consistent memory. The flag takes effect
|
|
||||||
only if the buffer is used for :ref:`memory mapping <mmap>` I/O and the
|
|
||||||
queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS
|
|
||||||
<V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS>` capability.
|
|
||||||
|
|
||||||
.. c:type:: v4l2_memory
|
.. c:type:: v4l2_memory
|
||||||
|
|
||||||
enum v4l2_memory
|
enum v4l2_memory
|
||||||
|
|
|
@ -120,13 +120,9 @@ than the number requested.
|
||||||
If you want to just query the capabilities without making any
|
If you want to just query the capabilities without making any
|
||||||
other changes, then set ``count`` to 0, ``memory`` to
|
other changes, then set ``count`` to 0, ``memory`` to
|
||||||
``V4L2_MEMORY_MMAP`` and ``format.type`` to the buffer type.
|
``V4L2_MEMORY_MMAP`` and ``format.type`` to the buffer type.
|
||||||
* - __u32
|
|
||||||
- ``flags``
|
|
||||||
- Specifies additional buffer management attributes.
|
|
||||||
See :ref:`memory-flags`.
|
|
||||||
|
|
||||||
* - __u32
|
* - __u32
|
||||||
- ``reserved``\ [6]
|
- ``reserved``\ [7]
|
||||||
- A place holder for future extensions. Drivers and applications
|
- A place holder for future extensions. Drivers and applications
|
||||||
must set the array to zero.
|
must set the array to zero.
|
||||||
|
|
||||||
|
|
|
@ -112,17 +112,10 @@ aborting or finishing any DMA in progress, an implicit
|
||||||
``V4L2_MEMORY_MMAP`` and ``type`` set to the buffer type. This will
|
``V4L2_MEMORY_MMAP`` and ``type`` set to the buffer type. This will
|
||||||
free any previously allocated buffers, so this is typically something
|
free any previously allocated buffers, so this is typically something
|
||||||
that will be done at the start of the application.
|
that will be done at the start of the application.
|
||||||
* - union {
|
|
||||||
- (anonymous)
|
|
||||||
* - __u32
|
|
||||||
- ``flags``
|
|
||||||
- Specifies additional buffer management attributes.
|
|
||||||
See :ref:`memory-flags`.
|
|
||||||
* - __u32
|
* - __u32
|
||||||
- ``reserved``\ [1]
|
- ``reserved``\ [1]
|
||||||
- Kept for backwards compatibility. Use ``flags`` instead.
|
- A place holder for future extensions. Drivers and applications
|
||||||
* - }
|
must set the array to zero.
|
||||||
-
|
|
||||||
|
|
||||||
.. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}|
|
.. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}|
|
||||||
|
|
||||||
|
@ -169,7 +162,6 @@ aborting or finishing any DMA in progress, an implicit
|
||||||
- This capability is set by the driver to indicate that the queue supports
|
- This capability is set by the driver to indicate that the queue supports
|
||||||
cache and memory management hints. However, it's only valid when the
|
cache and memory management hints. However, it's only valid when the
|
||||||
queue is used for :ref:`memory mapping <mmap>` streaming I/O. See
|
queue is used for :ref:`memory mapping <mmap>` streaming I/O. See
|
||||||
:ref:`V4L2_FLAG_MEMORY_NON_CONSISTENT <V4L2-FLAG-MEMORY-NON-CONSISTENT>`,
|
|
||||||
:ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE <V4L2-BUF-FLAG-NO-CACHE-INVALIDATE>` and
|
:ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE <V4L2-BUF-FLAG-NO-CACHE-INVALIDATE>` and
|
||||||
:ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN <V4L2-BUF-FLAG-NO-CACHE-CLEAN>`.
|
:ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN <V4L2-BUF-FLAG-NO-CACHE-CLEAN>`.
|
||||||
|
|
||||||
|
|
16
MAINTAINERS
16
MAINTAINERS
|
@ -8764,7 +8764,8 @@ F: include/drm/i915*
|
||||||
F: include/uapi/drm/i915_drm.h
|
F: include/uapi/drm/i915_drm.h
|
||||||
|
|
||||||
INTEL ETHERNET DRIVERS
|
INTEL ETHERNET DRIVERS
|
||||||
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
|
M: Jesse Brandeburg <jesse.brandeburg@intel.com>
|
||||||
|
M: Tony Nguyen <anthony.l.nguyen@intel.com>
|
||||||
L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
|
L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
|
||||||
S: Supported
|
S: Supported
|
||||||
W: http://www.intel.com/support/feedback.htm
|
W: http://www.intel.com/support/feedback.htm
|
||||||
|
@ -12104,6 +12105,7 @@ NETWORKING [DSA]
|
||||||
M: Andrew Lunn <andrew@lunn.ch>
|
M: Andrew Lunn <andrew@lunn.ch>
|
||||||
M: Vivien Didelot <vivien.didelot@gmail.com>
|
M: Vivien Didelot <vivien.didelot@gmail.com>
|
||||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||||
|
M: Vladimir Oltean <olteanv@gmail.com>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/net/dsa/
|
F: Documentation/devicetree/bindings/net/dsa/
|
||||||
F: drivers/net/dsa/
|
F: drivers/net/dsa/
|
||||||
|
@ -13211,6 +13213,7 @@ F: drivers/firmware/pcdp.*
|
||||||
|
|
||||||
PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
|
PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
|
||||||
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
|
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
|
||||||
|
M: Pali Rohár <pali@kernel.org>
|
||||||
L: linux-pci@vger.kernel.org
|
L: linux-pci@vger.kernel.org
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -16185,7 +16188,7 @@ M: Leon Luo <leonl@leopardimaging.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
F: Documentation/devicetree/bindings/media/i2c/imx274.txt
|
F: Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
|
||||||
F: drivers/media/i2c/imx274.c
|
F: drivers/media/i2c/imx274.c
|
||||||
|
|
||||||
SONY IMX290 SENSOR DRIVER
|
SONY IMX290 SENSOR DRIVER
|
||||||
|
@ -18312,7 +18315,8 @@ F: drivers/gpu/vga/vga_switcheroo.c
|
||||||
F: include/linux/vga_switcheroo.h
|
F: include/linux/vga_switcheroo.h
|
||||||
|
|
||||||
VIA RHINE NETWORK DRIVER
|
VIA RHINE NETWORK DRIVER
|
||||||
S: Orphan
|
S: Maintained
|
||||||
|
M: Kevin Brace <kevinbrace@bracecomputerlab.com>
|
||||||
F: drivers/net/ethernet/via/via-rhine.c
|
F: drivers/net/ethernet/via/via-rhine.c
|
||||||
|
|
||||||
VIA SD/MMC CARD CONTROLLER DRIVER
|
VIA SD/MMC CARD CONTROLLER DRIVER
|
||||||
|
@ -18917,10 +18921,10 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
|
||||||
F: arch/x86/mm/
|
F: arch/x86/mm/
|
||||||
|
|
||||||
X86 PLATFORM DRIVERS
|
X86 PLATFORM DRIVERS
|
||||||
M: Darren Hart <dvhart@infradead.org>
|
M: Hans de Goede <hdegoede@redhat.com>
|
||||||
M: Andy Shevchenko <andy@infradead.org>
|
M: Mark Gross <mgross@linux.intel.com>
|
||||||
L: platform-driver-x86@vger.kernel.org
|
L: platform-driver-x86@vger.kernel.org
|
||||||
S: Odd Fixes
|
S: Maintained
|
||||||
T: git git://git.infradead.org/linux-platform-drivers-x86.git
|
T: git git://git.infradead.org/linux-platform-drivers-x86.git
|
||||||
F: drivers/platform/olpc/
|
F: drivers/platform/olpc/
|
||||||
F: drivers/platform/x86/
|
F: drivers/platform/x86/
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 9
|
PATCHLEVEL = 9
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc8
|
||||||
NAME = Kleptomaniac Octopus
|
NAME = Kleptomaniac Octopus
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
soc {
|
soc {
|
||||||
firmware: firmware {
|
firmware: firmware {
|
||||||
compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
|
compatible = "raspberrypi,bcm2835-firmware", "simple-mfd";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,9 @@ static int imx6q_enter_wait(struct cpuidle_device *dev,
|
||||||
imx6_set_lpm(WAIT_UNCLOCKED);
|
imx6_set_lpm(WAIT_UNCLOCKED);
|
||||||
raw_spin_unlock(&cpuidle_lock);
|
raw_spin_unlock(&cpuidle_lock);
|
||||||
|
|
||||||
|
rcu_idle_enter();
|
||||||
cpu_do_idle();
|
cpu_do_idle();
|
||||||
|
rcu_idle_exit();
|
||||||
|
|
||||||
raw_spin_lock(&cpuidle_lock);
|
raw_spin_lock(&cpuidle_lock);
|
||||||
if (num_idle_cpus-- == num_online_cpus())
|
if (num_idle_cpus-- == num_online_cpus())
|
||||||
|
@ -44,7 +46,7 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
|
||||||
{
|
{
|
||||||
.exit_latency = 50,
|
.exit_latency = 50,
|
||||||
.target_residency = 75,
|
.target_residency = 75,
|
||||||
.flags = CPUIDLE_FLAG_TIMER_STOP,
|
.flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE,
|
||||||
.enter = imx6q_enter_wait,
|
.enter = imx6q_enter_wait,
|
||||||
.name = "WAIT",
|
.name = "WAIT",
|
||||||
.desc = "Clock off",
|
.desc = "Clock off",
|
||||||
|
|
|
@ -298,8 +298,21 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||||
case EFI_BOOT_SERVICES_DATA:
|
case EFI_BOOT_SERVICES_DATA:
|
||||||
case EFI_CONVENTIONAL_MEMORY:
|
case EFI_CONVENTIONAL_MEMORY:
|
||||||
case EFI_PERSISTENT_MEMORY:
|
case EFI_PERSISTENT_MEMORY:
|
||||||
pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
|
if (memblock_is_map_memory(phys) ||
|
||||||
return NULL;
|
!memblock_is_region_memory(phys, size)) {
|
||||||
|
pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Mapping kernel memory is permitted if the region in
|
||||||
|
* question is covered by a single memblock with the
|
||||||
|
* NOMAP attribute set: this enables the use of ACPI
|
||||||
|
* table overrides passed via initramfs, which are
|
||||||
|
* reserved in memory using arch_reserve_mem_area()
|
||||||
|
* below. As this particular use case only requires
|
||||||
|
* read access, fall through to the R/O mapping case.
|
||||||
|
*/
|
||||||
|
fallthrough;
|
||||||
|
|
||||||
case EFI_RUNTIME_SERVICES_CODE:
|
case EFI_RUNTIME_SERVICES_CODE:
|
||||||
/*
|
/*
|
||||||
|
@ -388,3 +401,8 @@ int apei_claim_sea(struct pt_regs *regs)
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
|
||||||
|
{
|
||||||
|
memblock_mark_nomap(addr, size);
|
||||||
|
}
|
||||||
|
|
|
@ -31,7 +31,14 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* __load_guest_stage2() includes an ISB only when the AT
|
||||||
|
* workaround is applied. Take care of the opposite condition,
|
||||||
|
* ensuring that we always have an ISB, but not two ISBs back
|
||||||
|
* to back.
|
||||||
|
*/
|
||||||
__load_guest_stage2(mmu);
|
__load_guest_stage2(mmu);
|
||||||
|
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
|
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
|
||||||
|
|
|
@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
|
||||||
if (map_start < map_end)
|
if (map_start < map_end)
|
||||||
memmap_init_zone((unsigned long)(map_end - map_start),
|
memmap_init_zone((unsigned long)(map_end - map_start),
|
||||||
args->nid, args->zone, page_to_pfn(map_start),
|
args->nid, args->zone, page_to_pfn(map_start),
|
||||||
MEMMAP_EARLY, NULL);
|
MEMINIT_EARLY, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
|
||||||
unsigned long start_pfn)
|
unsigned long start_pfn)
|
||||||
{
|
{
|
||||||
if (!vmem_map) {
|
if (!vmem_map) {
|
||||||
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
|
memmap_init_zone(size, nid, zone, start_pfn,
|
||||||
NULL);
|
MEMINIT_EARLY, NULL);
|
||||||
} else {
|
} else {
|
||||||
struct page *start;
|
struct page *start;
|
||||||
struct memmap_init_callback_data args;
|
struct memmap_init_callback_data args;
|
||||||
|
|
|
@ -148,7 +148,7 @@ void __init plat_mem_setup(void)
|
||||||
{
|
{
|
||||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||||
|
|
||||||
if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) {
|
if (c->cputype == CPU_74K) {
|
||||||
pr_info("Using bcma bus\n");
|
pr_info("Using bcma bus\n");
|
||||||
#ifdef CONFIG_BCM47XX_BCMA
|
#ifdef CONFIG_BCM47XX_BCMA
|
||||||
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
|
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
|
||||||
|
|
|
@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
|
||||||
case CPU_34K:
|
case CPU_34K:
|
||||||
case CPU_1004K:
|
case CPU_1004K:
|
||||||
case CPU_74K:
|
case CPU_74K:
|
||||||
|
case CPU_1074K:
|
||||||
case CPU_M14KC:
|
case CPU_M14KC:
|
||||||
case CPU_M14KEC:
|
case CPU_M14KEC:
|
||||||
case CPU_INTERAPTIV:
|
case CPU_INTERAPTIV:
|
||||||
|
|
|
@ -44,6 +44,10 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
# Some -march= flags enable MMI instructions, and GCC complains about that
|
||||||
|
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
|
||||||
|
cflags-y += $(call cc-option,-mno-loongson-mmi)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Loongson Machines' Support
|
# Loongson Machines' Support
|
||||||
#
|
#
|
||||||
|
|
|
@ -95,10 +95,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||||
if (res)
|
if (res)
|
||||||
goto fault;
|
goto fault;
|
||||||
|
|
||||||
set_fpr64(current->thread.fpu.fpr,
|
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
|
||||||
insn.loongson3_lswc2_format.rt, value);
|
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
|
||||||
set_fpr64(current->thread.fpu.fpr,
|
|
||||||
insn.loongson3_lswc2_format.rq, value_next);
|
|
||||||
compute_return_epc(regs);
|
compute_return_epc(regs);
|
||||||
own_fpu(1);
|
own_fpu(1);
|
||||||
}
|
}
|
||||||
|
@ -130,15 +128,13 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||||
goto sigbus;
|
goto sigbus;
|
||||||
|
|
||||||
lose_fpu(1);
|
lose_fpu(1);
|
||||||
value_next = get_fpr64(current->thread.fpu.fpr,
|
value_next = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
|
||||||
insn.loongson3_lswc2_format.rq);
|
|
||||||
|
|
||||||
StoreDW(addr + 8, value_next, res);
|
StoreDW(addr + 8, value_next, res);
|
||||||
if (res)
|
if (res)
|
||||||
goto fault;
|
goto fault;
|
||||||
|
|
||||||
value = get_fpr64(current->thread.fpu.fpr,
|
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
|
||||||
insn.loongson3_lswc2_format.rt);
|
|
||||||
|
|
||||||
StoreDW(addr, value, res);
|
StoreDW(addr, value, res);
|
||||||
if (res)
|
if (res)
|
||||||
|
@ -204,8 +200,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||||
if (res)
|
if (res)
|
||||||
goto fault;
|
goto fault;
|
||||||
|
|
||||||
set_fpr64(current->thread.fpu.fpr,
|
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
|
||||||
insn.loongson3_lsdc2_format.rt, value);
|
|
||||||
compute_return_epc(regs);
|
compute_return_epc(regs);
|
||||||
own_fpu(1);
|
own_fpu(1);
|
||||||
|
|
||||||
|
@ -221,8 +216,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||||
if (res)
|
if (res)
|
||||||
goto fault;
|
goto fault;
|
||||||
|
|
||||||
set_fpr64(current->thread.fpu.fpr,
|
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
|
||||||
insn.loongson3_lsdc2_format.rt, value);
|
|
||||||
compute_return_epc(regs);
|
compute_return_epc(regs);
|
||||||
own_fpu(1);
|
own_fpu(1);
|
||||||
break;
|
break;
|
||||||
|
@ -286,8 +280,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||||
goto sigbus;
|
goto sigbus;
|
||||||
|
|
||||||
lose_fpu(1);
|
lose_fpu(1);
|
||||||
value = get_fpr64(current->thread.fpu.fpr,
|
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
|
||||||
insn.loongson3_lsdc2_format.rt);
|
|
||||||
|
|
||||||
StoreW(addr, value, res);
|
StoreW(addr, value, res);
|
||||||
if (res)
|
if (res)
|
||||||
|
@ -305,8 +298,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||||
goto sigbus;
|
goto sigbus;
|
||||||
|
|
||||||
lose_fpu(1);
|
lose_fpu(1);
|
||||||
value = get_fpr64(current->thread.fpu.fpr,
|
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
|
||||||
insn.loongson3_lsdc2_format.rt);
|
|
||||||
|
|
||||||
StoreDW(addr, value, res);
|
StoreDW(addr, value, res);
|
||||||
if (res)
|
if (res)
|
||||||
|
|
|
@ -475,7 +475,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
||||||
case BPF_JMP | BPF_JSET | BPF_K:
|
case BPF_JMP | BPF_JSET | BPF_K:
|
||||||
case BPF_JMP | BPF_JSET | BPF_X:
|
case BPF_JMP | BPF_JSET | BPF_X:
|
||||||
true_cond = COND_NE;
|
true_cond = COND_NE;
|
||||||
fallthrough;
|
|
||||||
cond_branch:
|
cond_branch:
|
||||||
/* same targets, can avoid doing the test :) */
|
/* same targets, can avoid doing the test :) */
|
||||||
if (filter[i].jt == filter[i].jf) {
|
if (filter[i].jt == filter[i].jf) {
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
|
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
#include <linux/version.h>
|
#include <linux/version.h>
|
||||||
#include <asm/timex.h>
|
|
||||||
|
|
||||||
extern unsigned long __stack_chk_guard;
|
extern unsigned long __stack_chk_guard;
|
||||||
|
|
||||||
|
@ -18,12 +17,9 @@ extern unsigned long __stack_chk_guard;
|
||||||
static __always_inline void boot_init_stack_canary(void)
|
static __always_inline void boot_init_stack_canary(void)
|
||||||
{
|
{
|
||||||
unsigned long canary;
|
unsigned long canary;
|
||||||
unsigned long tsc;
|
|
||||||
|
|
||||||
/* Try to get a semi random initial value. */
|
/* Try to get a semi random initial value. */
|
||||||
get_random_bytes(&canary, sizeof(canary));
|
get_random_bytes(&canary, sizeof(canary));
|
||||||
tsc = get_cycles();
|
|
||||||
canary += tsc + (tsc << BITS_PER_LONG/2);
|
|
||||||
canary ^= LINUX_VERSION_CODE;
|
canary ^= LINUX_VERSION_CODE;
|
||||||
canary &= CANARY_MASK;
|
canary &= CANARY_MASK;
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,19 @@ static inline u32 get_cycles_hi(void)
|
||||||
#define get_cycles_hi get_cycles_hi
|
#define get_cycles_hi get_cycles_hi
|
||||||
#endif /* CONFIG_64BIT */
|
#endif /* CONFIG_64BIT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Much like MIPS, we may not have a viable counter to use at an early point
|
||||||
|
* in the boot process. Unfortunately we don't have a fallback, so instead
|
||||||
|
* we just return 0.
|
||||||
|
*/
|
||||||
|
static inline unsigned long random_get_entropy(void)
|
||||||
|
{
|
||||||
|
if (unlikely(clint_time_val == NULL))
|
||||||
|
return 0;
|
||||||
|
return get_cycles();
|
||||||
|
}
|
||||||
|
#define random_get_entropy() random_get_entropy()
|
||||||
|
|
||||||
#else /* CONFIG_RISCV_M_MODE */
|
#else /* CONFIG_RISCV_M_MODE */
|
||||||
|
|
||||||
static inline cycles_t get_cycles(void)
|
static inline cycles_t get_cycles(void)
|
||||||
|
|
|
@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
|
||||||
|
|
||||||
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
|
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
|
||||||
|
|
||||||
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
|
static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
|
||||||
{
|
{
|
||||||
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
||||||
return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
|
return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
|
||||||
return (p4d_t *) pgd;
|
return (p4d_t *) pgdp;
|
||||||
|
}
|
||||||
|
#define p4d_offset_lockless p4d_offset_lockless
|
||||||
|
|
||||||
|
static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
|
||||||
|
{
|
||||||
|
return p4d_offset_lockless(pgdp, *pgdp, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
|
static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
|
||||||
{
|
{
|
||||||
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
||||||
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
|
return (pud_t *) p4d_deref(p4d) + pud_index(address);
|
||||||
return (pud_t *) p4d;
|
return (pud_t *) p4dp;
|
||||||
|
}
|
||||||
|
#define pud_offset_lockless pud_offset_lockless
|
||||||
|
|
||||||
|
static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
|
||||||
|
{
|
||||||
|
return pud_offset_lockless(p4dp, *p4dp, address);
|
||||||
}
|
}
|
||||||
#define pud_offset pud_offset
|
#define pud_offset pud_offset
|
||||||
|
|
||||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
|
||||||
{
|
{
|
||||||
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
||||||
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
|
return (pmd_t *) pud_deref(pud) + pmd_index(address);
|
||||||
return (pmd_t *) pud;
|
return (pmd_t *) pudp;
|
||||||
|
}
|
||||||
|
#define pmd_offset_lockless pmd_offset_lockless
|
||||||
|
|
||||||
|
static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
|
||||||
|
{
|
||||||
|
return pmd_offset_lockless(pudp, *pudp, address);
|
||||||
}
|
}
|
||||||
#define pmd_offset pmd_offset
|
#define pmd_offset pmd_offset
|
||||||
|
|
||||||
|
|
|
@ -299,7 +299,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||||
old_regs = set_irq_regs(regs);
|
old_regs = set_irq_regs(regs);
|
||||||
|
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
|
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
|
|
||||||
set_irq_regs(old_regs);
|
set_irq_regs(old_regs);
|
||||||
|
|
|
@ -682,6 +682,8 @@ SYM_CODE_END(.Lbad_gs)
|
||||||
* rdx: Function argument (can be NULL if none)
|
* rdx: Function argument (can be NULL if none)
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(asm_call_on_stack)
|
SYM_FUNC_START(asm_call_on_stack)
|
||||||
|
SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
|
||||||
|
SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
|
||||||
/*
|
/*
|
||||||
* Save the frame pointer unconditionally. This allows the ORC
|
* Save the frame pointer unconditionally. This allows the ORC
|
||||||
* unwinder to handle the stack switch.
|
* unwinder to handle the stack switch.
|
||||||
|
|
|
@ -242,7 +242,7 @@ __visible noinstr void func(struct pt_regs *regs) \
|
||||||
instrumentation_begin(); \
|
instrumentation_begin(); \
|
||||||
irq_enter_rcu(); \
|
irq_enter_rcu(); \
|
||||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||||
run_on_irqstack_cond(__##func, regs, regs); \
|
run_sysvec_on_irqstack_cond(__##func, regs); \
|
||||||
irq_exit_rcu(); \
|
irq_exit_rcu(); \
|
||||||
instrumentation_end(); \
|
instrumentation_end(); \
|
||||||
irqentry_exit(regs, state); \
|
irqentry_exit(regs, state); \
|
||||||
|
|
|
@ -12,20 +12,50 @@ static __always_inline bool irqstack_active(void)
|
||||||
return __this_cpu_read(irq_count) != -1;
|
return __this_cpu_read(irq_count) != -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void asm_call_on_stack(void *sp, void *func, void *arg);
|
void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
|
||||||
|
void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
|
||||||
|
struct pt_regs *regs);
|
||||||
|
void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
|
||||||
|
struct irq_desc *desc);
|
||||||
|
|
||||||
static __always_inline void __run_on_irqstack(void *func, void *arg)
|
static __always_inline void __run_on_irqstack(void (*func)(void))
|
||||||
{
|
{
|
||||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
||||||
|
|
||||||
__this_cpu_add(irq_count, 1);
|
__this_cpu_add(irq_count, 1);
|
||||||
asm_call_on_stack(tos - 8, func, arg);
|
asm_call_on_stack(tos - 8, func, NULL);
|
||||||
|
__this_cpu_sub(irq_count, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
||||||
|
|
||||||
|
__this_cpu_add(irq_count, 1);
|
||||||
|
asm_call_sysvec_on_stack(tos - 8, func, regs);
|
||||||
|
__this_cpu_sub(irq_count, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
|
||||||
|
struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
||||||
|
|
||||||
|
__this_cpu_add(irq_count, 1);
|
||||||
|
asm_call_irq_on_stack(tos - 8, func, desc);
|
||||||
__this_cpu_sub(irq_count, 1);
|
__this_cpu_sub(irq_count, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_X86_64 */
|
#else /* CONFIG_X86_64 */
|
||||||
static inline bool irqstack_active(void) { return false; }
|
static inline bool irqstack_active(void) { return false; }
|
||||||
static inline void __run_on_irqstack(void *func, void *arg) { }
|
static inline void __run_on_irqstack(void (*func)(void)) { }
|
||||||
|
static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
|
||||||
|
struct pt_regs *regs) { }
|
||||||
|
static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
|
||||||
|
struct irq_desc *desc) { }
|
||||||
#endif /* !CONFIG_X86_64 */
|
#endif /* !CONFIG_X86_64 */
|
||||||
|
|
||||||
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
|
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
|
||||||
|
@ -37,17 +67,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
|
||||||
return !user_mode(regs) && !irqstack_active();
|
return !user_mode(regs) && !irqstack_active();
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void run_on_irqstack_cond(void *func, void *arg,
|
|
||||||
|
static __always_inline void run_on_irqstack_cond(void (*func)(void),
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
void (*__func)(void *arg) = func;
|
|
||||||
|
|
||||||
lockdep_assert_irqs_disabled();
|
lockdep_assert_irqs_disabled();
|
||||||
|
|
||||||
if (irq_needs_irq_stack(regs))
|
if (irq_needs_irq_stack(regs))
|
||||||
__run_on_irqstack(__func, arg);
|
__run_on_irqstack(func);
|
||||||
else
|
else
|
||||||
__func(arg);
|
func();
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
lockdep_assert_irqs_disabled();
|
||||||
|
|
||||||
|
if (irq_needs_irq_stack(regs))
|
||||||
|
__run_sysvec_on_irqstack(func, regs);
|
||||||
|
else
|
||||||
|
func(regs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
lockdep_assert_irqs_disabled();
|
||||||
|
|
||||||
|
if (irq_needs_irq_stack(regs))
|
||||||
|
__run_irq_on_irqstack(func, desc);
|
||||||
|
else
|
||||||
|
func(desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2243,6 +2243,7 @@ static inline void __init check_timer(void)
|
||||||
legacy_pic->init(0);
|
legacy_pic->init(0);
|
||||||
legacy_pic->make_irq(0);
|
legacy_pic->make_irq(0);
|
||||||
apic_write(APIC_LVT0, APIC_DM_EXTINT);
|
apic_write(APIC_LVT0, APIC_DM_EXTINT);
|
||||||
|
legacy_pic->unmask(0);
|
||||||
|
|
||||||
unlock_ExtINT_logic();
|
unlock_ExtINT_logic();
|
||||||
|
|
||||||
|
|
|
@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_X86_64))
|
if (IS_ENABLED(CONFIG_X86_64))
|
||||||
run_on_irqstack_cond(desc->handle_irq, desc, regs);
|
run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
|
||||||
else
|
else
|
||||||
__handle_irq(desc, regs);
|
__handle_irq(desc, regs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
|
||||||
|
|
||||||
void do_softirq_own_stack(void)
|
void do_softirq_own_stack(void)
|
||||||
{
|
{
|
||||||
run_on_irqstack_cond(__do_softirq, NULL, NULL);
|
run_on_irqstack_cond(__do_softirq, NULL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2183,6 +2183,12 @@ static int iret_interception(struct vcpu_svm *svm)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int invd_interception(struct vcpu_svm *svm)
|
||||||
|
{
|
||||||
|
/* Treat an INVD instruction as a NOP and just skip it. */
|
||||||
|
return kvm_skip_emulated_instruction(&svm->vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
static int invlpg_interception(struct vcpu_svm *svm)
|
static int invlpg_interception(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
|
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
|
||||||
|
@ -2774,7 +2780,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
|
||||||
[SVM_EXIT_RDPMC] = rdpmc_interception,
|
[SVM_EXIT_RDPMC] = rdpmc_interception,
|
||||||
[SVM_EXIT_CPUID] = cpuid_interception,
|
[SVM_EXIT_CPUID] = cpuid_interception,
|
||||||
[SVM_EXIT_IRET] = iret_interception,
|
[SVM_EXIT_IRET] = iret_interception,
|
||||||
[SVM_EXIT_INVD] = emulate_on_interception,
|
[SVM_EXIT_INVD] = invd_interception,
|
||||||
[SVM_EXIT_PAUSE] = pause_interception,
|
[SVM_EXIT_PAUSE] = pause_interception,
|
||||||
[SVM_EXIT_HLT] = halt_interception,
|
[SVM_EXIT_HLT] = halt_interception,
|
||||||
[SVM_EXIT_INVLPG] = invlpg_interception,
|
[SVM_EXIT_INVLPG] = invlpg_interception,
|
||||||
|
|
|
@ -129,6 +129,9 @@ static bool __read_mostly enable_preemption_timer = 1;
|
||||||
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
|
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern bool __read_mostly allow_smaller_maxphyaddr;
|
||||||
|
module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
|
||||||
|
|
||||||
#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
|
#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
|
||||||
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
|
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
|
||||||
#define KVM_VM_CR0_ALWAYS_ON \
|
#define KVM_VM_CR0_ALWAYS_ON \
|
||||||
|
@ -791,6 +794,18 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
if (is_guest_mode(vcpu))
|
if (is_guest_mode(vcpu))
|
||||||
eb |= get_vmcs12(vcpu)->exception_bitmap;
|
eb |= get_vmcs12(vcpu)->exception_bitmap;
|
||||||
|
else {
|
||||||
|
/*
|
||||||
|
* If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
|
||||||
|
* between guest and host. In that case we only care about present
|
||||||
|
* faults. For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in
|
||||||
|
* prepare_vmcs02_rare.
|
||||||
|
*/
|
||||||
|
bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR));
|
||||||
|
int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0;
|
||||||
|
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
|
||||||
|
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask);
|
||||||
|
}
|
||||||
|
|
||||||
vmcs_write32(EXCEPTION_BITMAP, eb);
|
vmcs_write32(EXCEPTION_BITMAP, eb);
|
||||||
}
|
}
|
||||||
|
@ -4352,16 +4367,6 @@ static void init_vmcs(struct vcpu_vmx *vmx)
|
||||||
vmx->pt_desc.guest.output_mask = 0x7F;
|
vmx->pt_desc.guest.output_mask = 0x7F;
|
||||||
vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
|
vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
|
|
||||||
* between guest and host. In that case we only care about present
|
|
||||||
* faults.
|
|
||||||
*/
|
|
||||||
if (enable_ept) {
|
|
||||||
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, PFERR_PRESENT_MASK);
|
|
||||||
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, PFERR_PRESENT_MASK);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||||
|
@ -4803,6 +4808,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
|
||||||
* EPT will cause page fault only if we need to
|
* EPT will cause page fault only if we need to
|
||||||
* detect illegal GPAs.
|
* detect illegal GPAs.
|
||||||
*/
|
*/
|
||||||
|
WARN_ON_ONCE(!allow_smaller_maxphyaddr);
|
||||||
kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
|
kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
|
||||||
return 1;
|
return 1;
|
||||||
} else
|
} else
|
||||||
|
@ -5331,7 +5337,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
|
||||||
* would also use advanced VM-exit information for EPT violations to
|
* would also use advanced VM-exit information for EPT violations to
|
||||||
* reconstruct the page fault error code.
|
* reconstruct the page fault error code.
|
||||||
*/
|
*/
|
||||||
if (unlikely(kvm_mmu_is_illegal_gpa(vcpu, gpa)))
|
if (unlikely(allow_smaller_maxphyaddr && kvm_mmu_is_illegal_gpa(vcpu, gpa)))
|
||||||
return kvm_emulate_instruction(vcpu, 0);
|
return kvm_emulate_instruction(vcpu, 0);
|
||||||
|
|
||||||
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
|
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
|
||||||
|
@ -8305,11 +8311,12 @@ static int __init vmx_init(void)
|
||||||
vmx_check_vmcs12_offsets();
|
vmx_check_vmcs12_offsets();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel processors don't have problems with
|
* Shadow paging doesn't have a (further) performance penalty
|
||||||
* GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable
|
* from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
|
||||||
* it for VMX by default
|
* by default
|
||||||
*/
|
*/
|
||||||
allow_smaller_maxphyaddr = true;
|
if (!enable_ept)
|
||||||
|
allow_smaller_maxphyaddr = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -552,7 +552,10 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
|
||||||
|
|
||||||
static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
|
static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
|
if (!enable_ept)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
void dump_vmcs(void);
|
void dump_vmcs(void);
|
||||||
|
|
|
@ -188,7 +188,7 @@ static struct kvm_shared_msrs __percpu *shared_msrs;
|
||||||
u64 __read_mostly host_efer;
|
u64 __read_mostly host_efer;
|
||||||
EXPORT_SYMBOL_GPL(host_efer);
|
EXPORT_SYMBOL_GPL(host_efer);
|
||||||
|
|
||||||
bool __read_mostly allow_smaller_maxphyaddr;
|
bool __read_mostly allow_smaller_maxphyaddr = 0;
|
||||||
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
|
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
|
||||||
|
|
||||||
static u64 __read_mostly host_xss;
|
static u64 __read_mostly host_xss;
|
||||||
|
@ -976,6 +976,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||||
unsigned long old_cr4 = kvm_read_cr4(vcpu);
|
unsigned long old_cr4 = kvm_read_cr4(vcpu);
|
||||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
|
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
|
||||||
X86_CR4_SMEP;
|
X86_CR4_SMEP;
|
||||||
|
unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
|
||||||
|
|
||||||
if (kvm_valid_cr4(vcpu, cr4))
|
if (kvm_valid_cr4(vcpu, cr4))
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1003,7 +1004,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||||
if (kvm_x86_ops.set_cr4(vcpu, cr4))
|
if (kvm_x86_ops.set_cr4(vcpu, cr4))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (((cr4 ^ old_cr4) & pdptr_bits) ||
|
if (((cr4 ^ old_cr4) & mmu_role_bits) ||
|
||||||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
|
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
|
||||||
kvm_mmu_reset_context(vcpu);
|
kvm_mmu_reset_context(vcpu);
|
||||||
|
|
||||||
|
@ -3221,9 +3222,22 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
case MSR_IA32_POWER_CTL:
|
case MSR_IA32_POWER_CTL:
|
||||||
msr_info->data = vcpu->arch.msr_ia32_power_ctl;
|
msr_info->data = vcpu->arch.msr_ia32_power_ctl;
|
||||||
break;
|
break;
|
||||||
case MSR_IA32_TSC:
|
case MSR_IA32_TSC: {
|
||||||
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
|
/*
|
||||||
|
* Intel SDM states that MSR_IA32_TSC read adds the TSC offset
|
||||||
|
* even when not intercepted. AMD manual doesn't explicitly
|
||||||
|
* state this but appears to behave the same.
|
||||||
|
*
|
||||||
|
* On userspace reads and writes, however, we unconditionally
|
||||||
|
* operate L1's TSC value to ensure backwards-compatible
|
||||||
|
* behavior for migration.
|
||||||
|
*/
|
||||||
|
u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
|
||||||
|
vcpu->arch.tsc_offset;
|
||||||
|
|
||||||
|
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case MSR_MTRRcap:
|
case MSR_MTRRcap:
|
||||||
case 0x200 ... 0x2ff:
|
case 0x200 ... 0x2ff:
|
||||||
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
|
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
|
||||||
|
|
|
@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
|
||||||
*/
|
*/
|
||||||
if (size < 8) {
|
if (size < 8) {
|
||||||
if (!IS_ALIGNED(dest, 4) || size != 4)
|
if (!IS_ALIGNED(dest, 4) || size != 4)
|
||||||
clean_cache_range(dst, 1);
|
clean_cache_range(dst, size);
|
||||||
} else {
|
} else {
|
||||||
if (!IS_ALIGNED(dest, 8)) {
|
if (!IS_ALIGNED(dest, 8)) {
|
||||||
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
||||||
|
|
|
@ -1412,6 +1412,11 @@ out:
|
||||||
|
|
||||||
hctx->dispatched[queued_to_index(queued)]++;
|
hctx->dispatched[queued_to_index(queued)]++;
|
||||||
|
|
||||||
|
/* If we didn't flush the entire list, we could have told the driver
|
||||||
|
* there was more coming, but that turned out to be a lie.
|
||||||
|
*/
|
||||||
|
if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
|
||||||
|
q->mq_ops->commit_rqs(hctx);
|
||||||
/*
|
/*
|
||||||
* Any items that need requeuing? Stuff them into hctx->dispatch,
|
* Any items that need requeuing? Stuff them into hctx->dispatch,
|
||||||
* that is where we will continue on next queue run.
|
* that is where we will continue on next queue run.
|
||||||
|
@ -1425,14 +1430,6 @@ out:
|
||||||
|
|
||||||
blk_mq_release_budgets(q, nr_budgets);
|
blk_mq_release_budgets(q, nr_budgets);
|
||||||
|
|
||||||
/*
|
|
||||||
* If we didn't flush the entire list, we could have told
|
|
||||||
* the driver there was more coming, but that turned out to
|
|
||||||
* be a lie.
|
|
||||||
*/
|
|
||||||
if (q->mq_ops->commit_rqs && queued)
|
|
||||||
q->mq_ops->commit_rqs(hctx);
|
|
||||||
|
|
||||||
spin_lock(&hctx->lock);
|
spin_lock(&hctx->lock);
|
||||||
list_splice_tail_init(list, &hctx->dispatch);
|
list_splice_tail_init(list, &hctx->dispatch);
|
||||||
spin_unlock(&hctx->lock);
|
spin_unlock(&hctx->lock);
|
||||||
|
@ -2079,6 +2076,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
struct list_head *list)
|
struct list_head *list)
|
||||||
{
|
{
|
||||||
int queued = 0;
|
int queued = 0;
|
||||||
|
int errors = 0;
|
||||||
|
|
||||||
while (!list_empty(list)) {
|
while (!list_empty(list)) {
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
@ -2095,6 +2093,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
blk_mq_end_request(rq, ret);
|
blk_mq_end_request(rq, ret);
|
||||||
|
errors++;
|
||||||
} else
|
} else
|
||||||
queued++;
|
queued++;
|
||||||
}
|
}
|
||||||
|
@ -2104,7 +2103,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
* the driver there was more coming, but that turned out to
|
* the driver there was more coming, but that turned out to
|
||||||
* be a lie.
|
* be a lie.
|
||||||
*/
|
*/
|
||||||
if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs && queued)
|
if ((!list_empty(list) || errors) &&
|
||||||
|
hctx->queue->mq_ops->commit_rqs && queued)
|
||||||
hctx->queue->mq_ops->commit_rqs(hctx);
|
hctx->queue->mq_ops->commit_rqs(hctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -801,6 +801,52 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_queue_set_zoned - configure a disk queue zoned model.
|
||||||
|
* @disk: the gendisk of the queue to configure
|
||||||
|
* @model: the zoned model to set
|
||||||
|
*
|
||||||
|
* Set the zoned model of the request queue of @disk according to @model.
|
||||||
|
* When @model is BLK_ZONED_HM (host managed), this should be called only
|
||||||
|
* if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
|
||||||
|
* If @model specifies BLK_ZONED_HA (host aware), the effective model used
|
||||||
|
* depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
|
||||||
|
* on the disk.
|
||||||
|
*/
|
||||||
|
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||||
|
{
|
||||||
|
switch (model) {
|
||||||
|
case BLK_ZONED_HM:
|
||||||
|
/*
|
||||||
|
* Host managed devices are supported only if
|
||||||
|
* CONFIG_BLK_DEV_ZONED is enabled.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
|
||||||
|
break;
|
||||||
|
case BLK_ZONED_HA:
|
||||||
|
/*
|
||||||
|
* Host aware devices can be treated either as regular block
|
||||||
|
* devices (similar to drive managed devices) or as zoned block
|
||||||
|
* devices to take advantage of the zone command set, similarly
|
||||||
|
* to host managed devices. We try the latter if there are no
|
||||||
|
* partitions and zoned block device support is enabled, else
|
||||||
|
* we do nothing special as far as the block layer is concerned.
|
||||||
|
*/
|
||||||
|
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
|
||||||
|
disk_has_partitions(disk))
|
||||||
|
model = BLK_ZONED_NONE;
|
||||||
|
break;
|
||||||
|
case BLK_ZONED_NONE:
|
||||||
|
default:
|
||||||
|
if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
|
||||||
|
model = BLK_ZONED_NONE;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
disk->queue->limits.zoned = model;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
|
||||||
|
|
||||||
static int __init blk_settings_init(void)
|
static int __init blk_settings_init(void)
|
||||||
{
|
{
|
||||||
blk_max_low_pfn = max_low_pfn - 1;
|
blk_max_low_pfn = max_low_pfn - 1;
|
||||||
|
|
|
@ -176,6 +176,7 @@ static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
|
||||||
static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
|
static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
|
||||||
struct acpi_processor_cx *cx)
|
struct acpi_processor_cx *cx)
|
||||||
{
|
{
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -761,14 +761,36 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
|
||||||
return pfn_to_nid(pfn);
|
return pfn_to_nid(pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int do_register_memory_block_under_node(int nid,
|
||||||
|
struct memory_block *mem_blk)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this memory block spans multiple nodes, we only indicate
|
||||||
|
* the last processed node.
|
||||||
|
*/
|
||||||
|
mem_blk->nid = nid;
|
||||||
|
|
||||||
|
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
|
||||||
|
&mem_blk->dev.kobj,
|
||||||
|
kobject_name(&mem_blk->dev.kobj));
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
|
||||||
|
&node_devices[nid]->dev.kobj,
|
||||||
|
kobject_name(&node_devices[nid]->dev.kobj));
|
||||||
|
}
|
||||||
|
|
||||||
/* register memory section under specified node if it spans that node */
|
/* register memory section under specified node if it spans that node */
|
||||||
static int register_mem_sect_under_node(struct memory_block *mem_blk,
|
static int register_mem_block_under_node_early(struct memory_block *mem_blk,
|
||||||
void *arg)
|
void *arg)
|
||||||
{
|
{
|
||||||
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
|
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
|
||||||
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
|
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
|
||||||
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
|
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
|
||||||
int ret, nid = *(int *)arg;
|
int nid = *(int *)arg;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
|
|
||||||
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
|
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
|
||||||
|
@ -785,38 +807,33 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to check if page belongs to nid only for the boot
|
* We need to check if page belongs to nid only at the boot
|
||||||
* case, during hotplug we know that all pages in the memory
|
* case because node's ranges can be interleaved.
|
||||||
* block belong to the same node.
|
|
||||||
*/
|
*/
|
||||||
if (system_state == SYSTEM_BOOTING) {
|
page_nid = get_nid_for_pfn(pfn);
|
||||||
page_nid = get_nid_for_pfn(pfn);
|
if (page_nid < 0)
|
||||||
if (page_nid < 0)
|
continue;
|
||||||
continue;
|
if (page_nid != nid)
|
||||||
if (page_nid != nid)
|
continue;
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
return do_register_memory_block_under_node(nid, mem_blk);
|
||||||
* If this memory block spans multiple nodes, we only indicate
|
|
||||||
* the last processed node.
|
|
||||||
*/
|
|
||||||
mem_blk->nid = nid;
|
|
||||||
|
|
||||||
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
|
|
||||||
&mem_blk->dev.kobj,
|
|
||||||
kobject_name(&mem_blk->dev.kobj));
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
|
|
||||||
&node_devices[nid]->dev.kobj,
|
|
||||||
kobject_name(&node_devices[nid]->dev.kobj));
|
|
||||||
}
|
}
|
||||||
/* mem section does not span the specified node */
|
/* mem section does not span the specified node */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* During hotplug we know that all pages in the memory block belong to the same
|
||||||
|
* node.
|
||||||
|
*/
|
||||||
|
static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
|
||||||
|
void *arg)
|
||||||
|
{
|
||||||
|
int nid = *(int *)arg;
|
||||||
|
|
||||||
|
return do_register_memory_block_under_node(nid, mem_blk);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unregister a memory block device under the node it spans. Memory blocks
|
* Unregister a memory block device under the node it spans. Memory blocks
|
||||||
* with multiple nodes cannot be offlined and therefore also never be removed.
|
* with multiple nodes cannot be offlined and therefore also never be removed.
|
||||||
|
@ -832,11 +849,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
|
||||||
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
|
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
|
||||||
}
|
}
|
||||||
|
|
||||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
|
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
|
||||||
|
enum meminit_context context)
|
||||||
{
|
{
|
||||||
|
walk_memory_blocks_func_t func;
|
||||||
|
|
||||||
|
if (context == MEMINIT_HOTPLUG)
|
||||||
|
func = register_mem_block_under_node_hotplug;
|
||||||
|
else
|
||||||
|
func = register_mem_block_under_node_early;
|
||||||
|
|
||||||
return walk_memory_blocks(PFN_PHYS(start_pfn),
|
return walk_memory_blocks(PFN_PHYS(start_pfn),
|
||||||
PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
|
PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
|
||||||
register_mem_sect_under_node);
|
func);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLBFS
|
#ifdef CONFIG_HUGETLBFS
|
||||||
|
|
|
@ -217,7 +217,7 @@ struct regmap_field {
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
extern void regmap_debugfs_initcall(void);
|
extern void regmap_debugfs_initcall(void);
|
||||||
extern void regmap_debugfs_init(struct regmap *map, const char *name);
|
extern void regmap_debugfs_init(struct regmap *map);
|
||||||
extern void regmap_debugfs_exit(struct regmap *map);
|
extern void regmap_debugfs_exit(struct regmap *map);
|
||||||
|
|
||||||
static inline void regmap_debugfs_disable(struct regmap *map)
|
static inline void regmap_debugfs_disable(struct regmap *map)
|
||||||
|
@ -227,7 +227,7 @@ static inline void regmap_debugfs_disable(struct regmap *map)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void regmap_debugfs_initcall(void) { }
|
static inline void regmap_debugfs_initcall(void) { }
|
||||||
static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
|
static inline void regmap_debugfs_init(struct regmap *map) { }
|
||||||
static inline void regmap_debugfs_exit(struct regmap *map) { }
|
static inline void regmap_debugfs_exit(struct regmap *map) { }
|
||||||
static inline void regmap_debugfs_disable(struct regmap *map) { }
|
static inline void regmap_debugfs_disable(struct regmap *map) { }
|
||||||
#endif
|
#endif
|
||||||
|
@ -259,7 +259,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
|
||||||
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
|
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
|
||||||
|
|
||||||
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||||
const void *val, size_t val_len);
|
const void *val, size_t val_len, bool noinc);
|
||||||
|
|
||||||
void regmap_async_complete_cb(struct regmap_async *async, int ret);
|
void regmap_async_complete_cb(struct regmap_async *async, int ret);
|
||||||
|
|
||||||
|
|
|
@ -717,7 +717,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
|
||||||
|
|
||||||
map->cache_bypass = true;
|
map->cache_bypass = true;
|
||||||
|
|
||||||
ret = _regmap_raw_write(map, base, *data, count * val_bytes);
|
ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
|
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
|
||||||
base, cur - map->reg_stride, ret);
|
base, cur - map->reg_stride, ret);
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
|
|
||||||
struct regmap_debugfs_node {
|
struct regmap_debugfs_node {
|
||||||
struct regmap *map;
|
struct regmap *map;
|
||||||
const char *name;
|
|
||||||
struct list_head link;
|
struct list_head link;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -544,11 +543,12 @@ static const struct file_operations regmap_cache_bypass_fops = {
|
||||||
.write = regmap_cache_bypass_write_file,
|
.write = regmap_cache_bypass_write_file,
|
||||||
};
|
};
|
||||||
|
|
||||||
void regmap_debugfs_init(struct regmap *map, const char *name)
|
void regmap_debugfs_init(struct regmap *map)
|
||||||
{
|
{
|
||||||
struct rb_node *next;
|
struct rb_node *next;
|
||||||
struct regmap_range_node *range_node;
|
struct regmap_range_node *range_node;
|
||||||
const char *devname = "dummy";
|
const char *devname = "dummy";
|
||||||
|
const char *name = map->name;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Userspace can initiate reads from the hardware over debugfs.
|
* Userspace can initiate reads from the hardware over debugfs.
|
||||||
|
@ -569,7 +569,6 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
|
||||||
if (!node)
|
if (!node)
|
||||||
return;
|
return;
|
||||||
node->map = map;
|
node->map = map;
|
||||||
node->name = name;
|
|
||||||
mutex_lock(®map_debugfs_early_lock);
|
mutex_lock(®map_debugfs_early_lock);
|
||||||
list_add(&node->link, ®map_debugfs_early_list);
|
list_add(&node->link, ®map_debugfs_early_list);
|
||||||
mutex_unlock(®map_debugfs_early_lock);
|
mutex_unlock(®map_debugfs_early_lock);
|
||||||
|
@ -679,7 +678,7 @@ void regmap_debugfs_initcall(void)
|
||||||
|
|
||||||
mutex_lock(®map_debugfs_early_lock);
|
mutex_lock(®map_debugfs_early_lock);
|
||||||
list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) {
|
list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) {
|
||||||
regmap_debugfs_init(node->map, node->name);
|
regmap_debugfs_init(node->map);
|
||||||
list_del(&node->link);
|
list_del(&node->link);
|
||||||
kfree(node);
|
kfree(node);
|
||||||
}
|
}
|
||||||
|
|
|
@ -581,14 +581,34 @@ static void regmap_range_exit(struct regmap *map)
|
||||||
kfree(map->selector_work_buf);
|
kfree(map->selector_work_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
|
||||||
|
{
|
||||||
|
if (config->name) {
|
||||||
|
const char *name = kstrdup_const(config->name, GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!name)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
kfree_const(map->name);
|
||||||
|
map->name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int regmap_attach_dev(struct device *dev, struct regmap *map,
|
int regmap_attach_dev(struct device *dev, struct regmap *map,
|
||||||
const struct regmap_config *config)
|
const struct regmap_config *config)
|
||||||
{
|
{
|
||||||
struct regmap **m;
|
struct regmap **m;
|
||||||
|
int ret;
|
||||||
|
|
||||||
map->dev = dev;
|
map->dev = dev;
|
||||||
|
|
||||||
regmap_debugfs_init(map, config->name);
|
ret = regmap_set_name(map, config);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
regmap_debugfs_init(map);
|
||||||
|
|
||||||
/* Add a devres resource for dev_get_regmap() */
|
/* Add a devres resource for dev_get_regmap() */
|
||||||
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
|
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
|
||||||
|
@ -687,13 +707,9 @@ struct regmap *__regmap_init(struct device *dev,
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config->name) {
|
ret = regmap_set_name(map, config);
|
||||||
map->name = kstrdup_const(config->name, GFP_KERNEL);
|
if (ret)
|
||||||
if (!map->name) {
|
goto err_map;
|
||||||
ret = -ENOMEM;
|
|
||||||
goto err_map;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (config->disable_locking) {
|
if (config->disable_locking) {
|
||||||
map->lock = map->unlock = regmap_lock_unlock_none;
|
map->lock = map->unlock = regmap_lock_unlock_none;
|
||||||
|
@ -1137,7 +1153,7 @@ skip_format_initialization:
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto err_regcache;
|
goto err_regcache;
|
||||||
} else {
|
} else {
|
||||||
regmap_debugfs_init(map, config->name);
|
regmap_debugfs_init(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
return map;
|
return map;
|
||||||
|
@ -1297,6 +1313,8 @@ EXPORT_SYMBOL_GPL(regmap_field_free);
|
||||||
*/
|
*/
|
||||||
int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
|
int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
regcache_exit(map);
|
regcache_exit(map);
|
||||||
regmap_debugfs_exit(map);
|
regmap_debugfs_exit(map);
|
||||||
|
|
||||||
|
@ -1309,7 +1327,11 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
|
||||||
map->readable_noinc_reg = config->readable_noinc_reg;
|
map->readable_noinc_reg = config->readable_noinc_reg;
|
||||||
map->cache_type = config->cache_type;
|
map->cache_type = config->cache_type;
|
||||||
|
|
||||||
regmap_debugfs_init(map, config->name);
|
ret = regmap_set_name(map, config);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
regmap_debugfs_init(map);
|
||||||
|
|
||||||
map->cache_bypass = false;
|
map->cache_bypass = false;
|
||||||
map->cache_only = false;
|
map->cache_only = false;
|
||||||
|
@ -1464,7 +1486,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||||
const void *val, size_t val_len)
|
const void *val, size_t val_len, bool noinc)
|
||||||
{
|
{
|
||||||
struct regmap_range_node *range;
|
struct regmap_range_node *range;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -1523,7 +1545,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||||
win_residue, val_len / map->format.val_bytes);
|
win_residue, val_len / map->format.val_bytes);
|
||||||
ret = _regmap_raw_write_impl(map, reg, val,
|
ret = _regmap_raw_write_impl(map, reg, val,
|
||||||
win_residue *
|
win_residue *
|
||||||
map->format.val_bytes);
|
map->format.val_bytes, noinc);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1537,7 +1559,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||||
win_residue = range->window_len - win_offset;
|
win_residue = range->window_len - win_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = _regmap_select_page(map, ®, range, val_num);
|
ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1745,7 +1767,8 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
|
||||||
map->work_buf +
|
map->work_buf +
|
||||||
map->format.reg_bytes +
|
map->format.reg_bytes +
|
||||||
map->format.pad_bytes,
|
map->format.pad_bytes,
|
||||||
map->format.val_bytes);
|
map->format.val_bytes,
|
||||||
|
false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *_regmap_map_get_context(struct regmap *map)
|
static inline void *_regmap_map_get_context(struct regmap *map)
|
||||||
|
@ -1839,7 +1862,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
|
||||||
EXPORT_SYMBOL_GPL(regmap_write_async);
|
EXPORT_SYMBOL_GPL(regmap_write_async);
|
||||||
|
|
||||||
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||||
const void *val, size_t val_len)
|
const void *val, size_t val_len, bool noinc)
|
||||||
{
|
{
|
||||||
size_t val_bytes = map->format.val_bytes;
|
size_t val_bytes = map->format.val_bytes;
|
||||||
size_t val_count = val_len / val_bytes;
|
size_t val_count = val_len / val_bytes;
|
||||||
|
@ -1860,7 +1883,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||||
|
|
||||||
/* Write as many bytes as possible with chunk_size */
|
/* Write as many bytes as possible with chunk_size */
|
||||||
for (i = 0; i < chunk_count; i++) {
|
for (i = 0; i < chunk_count; i++) {
|
||||||
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
|
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1871,7 +1894,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||||
|
|
||||||
/* Write remaining bytes */
|
/* Write remaining bytes */
|
||||||
if (val_len)
|
if (val_len)
|
||||||
ret = _regmap_raw_write_impl(map, reg, val, val_len);
|
ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1904,7 +1927,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||||
|
|
||||||
map->lock(map->lock_arg);
|
map->lock(map->lock_arg);
|
||||||
|
|
||||||
ret = _regmap_raw_write(map, reg, val, val_len);
|
ret = _regmap_raw_write(map, reg, val, val_len, false);
|
||||||
|
|
||||||
map->unlock(map->lock_arg);
|
map->unlock(map->lock_arg);
|
||||||
|
|
||||||
|
@ -1962,7 +1985,7 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
|
||||||
write_len = map->max_raw_write;
|
write_len = map->max_raw_write;
|
||||||
else
|
else
|
||||||
write_len = val_len;
|
write_len = val_len;
|
||||||
ret = _regmap_raw_write(map, reg, val, write_len);
|
ret = _regmap_raw_write(map, reg, val, write_len, true);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
val = ((u8 *)val) + write_len;
|
val = ((u8 *)val) + write_len;
|
||||||
|
@ -2439,7 +2462,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
|
||||||
|
|
||||||
map->async = true;
|
map->async = true;
|
||||||
|
|
||||||
ret = _regmap_raw_write(map, reg, val, val_len);
|
ret = _regmap_raw_write(map, reg, val, val_len, false);
|
||||||
|
|
||||||
map->async = false;
|
map->async = false;
|
||||||
|
|
||||||
|
@ -2450,7 +2473,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
|
||||||
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
|
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
|
||||||
|
|
||||||
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||||
unsigned int val_len)
|
unsigned int val_len, bool noinc)
|
||||||
{
|
{
|
||||||
struct regmap_range_node *range;
|
struct regmap_range_node *range;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -2463,7 +2486,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||||
range = _regmap_range_lookup(map, reg);
|
range = _regmap_range_lookup(map, reg);
|
||||||
if (range) {
|
if (range) {
|
||||||
ret = _regmap_select_page(map, ®, range,
|
ret = _regmap_select_page(map, ®, range,
|
||||||
val_len / map->format.val_bytes);
|
noinc ? 1 : val_len / map->format.val_bytes);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2501,7 +2524,7 @@ static int _regmap_bus_read(void *context, unsigned int reg,
|
||||||
if (!map->format.parse_val)
|
if (!map->format.parse_val)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
|
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
*val = map->format.parse_val(work_val);
|
*val = map->format.parse_val(work_val);
|
||||||
|
|
||||||
|
@ -2617,7 +2640,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||||
|
|
||||||
/* Read bytes that fit into whole chunks */
|
/* Read bytes that fit into whole chunks */
|
||||||
for (i = 0; i < chunk_count; i++) {
|
for (i = 0; i < chunk_count; i++) {
|
||||||
ret = _regmap_raw_read(map, reg, val, chunk_bytes);
|
ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -2628,7 +2651,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||||
|
|
||||||
/* Read remaining bytes */
|
/* Read remaining bytes */
|
||||||
if (val_len) {
|
if (val_len) {
|
||||||
ret = _regmap_raw_read(map, reg, val, val_len);
|
ret = _regmap_raw_read(map, reg, val, val_len, false);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -2703,7 +2726,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
|
||||||
read_len = map->max_raw_read;
|
read_len = map->max_raw_read;
|
||||||
else
|
else
|
||||||
read_len = val_len;
|
read_len = val_len;
|
||||||
ret = _regmap_raw_read(map, reg, val, read_len);
|
ret = _regmap_raw_read(map, reg, val, read_len, true);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
val = ((u8 *)val) + read_len;
|
val = ((u8 *)val) + read_len;
|
||||||
|
|
|
@ -1553,7 +1553,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
|
||||||
* put_page(); and would cause either a VM_BUG directly, or
|
* put_page(); and would cause either a VM_BUG directly, or
|
||||||
* __page_cache_release a page that would actually still be referenced
|
* __page_cache_release a page that would actually still be referenced
|
||||||
* by someone, leading to some obscure delayed Oops somewhere else. */
|
* by someone, leading to some obscure delayed Oops somewhere else. */
|
||||||
if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
|
if (drbd_disable_sendpage || !sendpage_ok(page))
|
||||||
return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
|
return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
|
||||||
|
|
||||||
msg_flags |= MSG_NOSIGNAL;
|
msg_flags |= MSG_NOSIGNAL;
|
||||||
|
|
|
@ -927,7 +927,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = {
|
||||||
GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
|
GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
|
||||||
GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
|
GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
|
||||||
GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
|
GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
|
||||||
GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
|
GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
|
||||||
GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
|
GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
|
||||||
CLK_IGNORE_UNUSED, 0),
|
CLK_IGNORE_UNUSED, 0),
|
||||||
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0,
|
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0,
|
||||||
|
@ -969,7 +969,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
|
||||||
0),
|
0),
|
||||||
GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
|
GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
|
||||||
GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
|
GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
|
||||||
GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
|
GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
|
||||||
GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
|
GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
|
||||||
CLK_IGNORE_UNUSED, 0),
|
CLK_IGNORE_UNUSED, 0),
|
||||||
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0,
|
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0,
|
||||||
|
|
|
@ -1655,6 +1655,11 @@ static void __init exynos5x_clk_init(struct device_node *np,
|
||||||
* main G3D clock enablement status.
|
* main G3D clock enablement status.
|
||||||
*/
|
*/
|
||||||
clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d"));
|
clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d"));
|
||||||
|
/*
|
||||||
|
* Keep top BPLL mux enabled permanently to ensure that DRAM operates
|
||||||
|
* properly.
|
||||||
|
*/
|
||||||
|
clk_prepare_enable(__clk_lookup("mout_bpll"));
|
||||||
|
|
||||||
samsung_clk_of_add_provider(np, ctx);
|
samsung_clk_of_add_provider(np, ctx);
|
||||||
}
|
}
|
||||||
|
|
|
@ -209,7 +209,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
|
||||||
{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
|
{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
|
||||||
0, 0, 2, 0xB0, 1},
|
0, 0, 2, 0xB0, 1},
|
||||||
{ STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
|
{ STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
|
||||||
ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
|
ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2},
|
||||||
{ STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
|
{ STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
|
||||||
ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3},
|
ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3},
|
||||||
{ STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
|
{ STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
|
||||||
|
|
|
@ -1611,9 +1611,6 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
unsigned long input_rate;
|
unsigned long input_rate;
|
||||||
|
|
||||||
if (clk_pll_is_enabled(hw))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
|
input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
|
||||||
|
|
||||||
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
|
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
|
||||||
|
@ -1673,7 +1670,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
|
||||||
pll_writel(val, PLLE_SS_CTRL, pll);
|
pll_writel(val, PLLE_SS_CTRL, pll);
|
||||||
udelay(1);
|
udelay(1);
|
||||||
|
|
||||||
/* Enable hw control of xusb brick pll */
|
/* Enable HW control of XUSB brick PLL */
|
||||||
val = pll_readl_misc(pll);
|
val = pll_readl_misc(pll);
|
||||||
val &= ~PLLE_MISC_IDDQ_SW_CTRL;
|
val &= ~PLLE_MISC_IDDQ_SW_CTRL;
|
||||||
pll_writel_misc(val, pll);
|
pll_writel_misc(val, pll);
|
||||||
|
@ -1696,7 +1693,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
|
||||||
val |= XUSBIO_PLL_CFG0_SEQ_ENABLE;
|
val |= XUSBIO_PLL_CFG0_SEQ_ENABLE;
|
||||||
pll_writel(val, XUSBIO_PLL_CFG0, pll);
|
pll_writel(val, XUSBIO_PLL_CFG0, pll);
|
||||||
|
|
||||||
/* Enable hw control of SATA pll */
|
/* Enable HW control of SATA PLL */
|
||||||
val = pll_readl(SATA_PLL_CFG0, pll);
|
val = pll_readl(SATA_PLL_CFG0, pll);
|
||||||
val &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL;
|
val &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL;
|
||||||
val |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET;
|
val |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET;
|
||||||
|
|
|
@ -12,6 +12,8 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
#include "clk.h"
|
||||||
|
|
||||||
#define CLK_SOURCE_EMC 0x19c
|
#define CLK_SOURCE_EMC 0x19c
|
||||||
#define CLK_SOURCE_EMC_2X_CLK_SRC GENMASK(31, 29)
|
#define CLK_SOURCE_EMC_2X_CLK_SRC GENMASK(31, 29)
|
||||||
#define CLK_SOURCE_EMC_MC_EMC_SAME_FREQ BIT(16)
|
#define CLK_SOURCE_EMC_MC_EMC_SAME_FREQ BIT(16)
|
||||||
|
|
|
@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node)
|
||||||
return PTR_ERR(clk);
|
return PTR_ERR(clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ENXIO;
|
ret = -ENXIO;
|
||||||
base = of_iomap(node, 0);
|
base = of_iomap(node, 0);
|
||||||
if (!base) {
|
if (!base) {
|
||||||
pr_err("failed to map registers for clockevent\n");
|
pr_err("failed to map registers for clockevent\n");
|
||||||
|
|
|
@ -38,6 +38,7 @@ static unsigned int clint_timer_irq;
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_M_MODE
|
#ifdef CONFIG_RISCV_M_MODE
|
||||||
u64 __iomem *clint_time_val;
|
u64 __iomem *clint_time_val;
|
||||||
|
EXPORT_SYMBOL(clint_time_val);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void clint_send_ipi(const struct cpumask *target)
|
static void clint_send_ipi(const struct cpumask *target)
|
||||||
|
|
|
@ -28,6 +28,7 @@ static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev)
|
||||||
void __iomem *base = timer_of_base(to_timer_of(ce));
|
void __iomem *base = timer_of_base(to_timer_of(ce));
|
||||||
|
|
||||||
writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS);
|
writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS);
|
||||||
|
writel_relaxed(0, base + TIMER_INI);
|
||||||
|
|
||||||
ce->event_handler(ce);
|
ce->event_handler(ce);
|
||||||
|
|
||||||
|
|
|
@ -69,12 +69,33 @@ static bool dmtimer_systimer_revision1(struct dmtimer_systimer *t)
|
||||||
return !(tidr >> 16);
|
return !(tidr >> 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
if (dmtimer_systimer_revision1(t))
|
||||||
|
val = DMTIMER_TYPE1_ENABLE;
|
||||||
|
else
|
||||||
|
val = DMTIMER_TYPE2_ENABLE;
|
||||||
|
|
||||||
|
writel_relaxed(val, t->base + t->sysc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
|
||||||
|
{
|
||||||
|
if (!dmtimer_systimer_revision1(t))
|
||||||
|
return;
|
||||||
|
|
||||||
|
writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
|
||||||
|
}
|
||||||
|
|
||||||
static int __init dmtimer_systimer_type1_reset(struct dmtimer_systimer *t)
|
static int __init dmtimer_systimer_type1_reset(struct dmtimer_systimer *t)
|
||||||
{
|
{
|
||||||
void __iomem *syss = t->base + OMAP_TIMER_V1_SYS_STAT_OFFSET;
|
void __iomem *syss = t->base + OMAP_TIMER_V1_SYS_STAT_OFFSET;
|
||||||
int ret;
|
int ret;
|
||||||
u32 l;
|
u32 l;
|
||||||
|
|
||||||
|
dmtimer_systimer_enable(t);
|
||||||
writel_relaxed(BIT(1) | BIT(2), t->base + t->ifctrl);
|
writel_relaxed(BIT(1) | BIT(2), t->base + t->ifctrl);
|
||||||
ret = readl_poll_timeout_atomic(syss, l, l & BIT(0), 100,
|
ret = readl_poll_timeout_atomic(syss, l, l & BIT(0), 100,
|
||||||
DMTIMER_RESET_WAIT);
|
DMTIMER_RESET_WAIT);
|
||||||
|
@ -88,6 +109,7 @@ static int __init dmtimer_systimer_type2_reset(struct dmtimer_systimer *t)
|
||||||
void __iomem *sysc = t->base + t->sysc;
|
void __iomem *sysc = t->base + t->sysc;
|
||||||
u32 l;
|
u32 l;
|
||||||
|
|
||||||
|
dmtimer_systimer_enable(t);
|
||||||
l = readl_relaxed(sysc);
|
l = readl_relaxed(sysc);
|
||||||
l |= BIT(0);
|
l |= BIT(0);
|
||||||
writel_relaxed(l, sysc);
|
writel_relaxed(l, sysc);
|
||||||
|
@ -336,26 +358,6 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
|
|
||||||
{
|
|
||||||
u32 val;
|
|
||||||
|
|
||||||
if (dmtimer_systimer_revision1(t))
|
|
||||||
val = DMTIMER_TYPE1_ENABLE;
|
|
||||||
else
|
|
||||||
val = DMTIMER_TYPE2_ENABLE;
|
|
||||||
|
|
||||||
writel_relaxed(val, t->base + t->sysc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
|
|
||||||
{
|
|
||||||
if (!dmtimer_systimer_revision1(t))
|
|
||||||
return;
|
|
||||||
|
|
||||||
writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init dmtimer_systimer_setup(struct device_node *np,
|
static int __init dmtimer_systimer_setup(struct device_node *np,
|
||||||
struct dmtimer_systimer *t)
|
struct dmtimer_systimer *t)
|
||||||
{
|
{
|
||||||
|
@ -409,8 +411,8 @@ static int __init dmtimer_systimer_setup(struct device_node *np,
|
||||||
t->wakeup = regbase + _OMAP_TIMER_WAKEUP_EN_OFFSET;
|
t->wakeup = regbase + _OMAP_TIMER_WAKEUP_EN_OFFSET;
|
||||||
t->ifctrl = regbase + _OMAP_TIMER_IF_CTRL_OFFSET;
|
t->ifctrl = regbase + _OMAP_TIMER_IF_CTRL_OFFSET;
|
||||||
|
|
||||||
dmtimer_systimer_enable(t);
|
|
||||||
dmtimer_systimer_reset(t);
|
dmtimer_systimer_reset(t);
|
||||||
|
dmtimer_systimer_enable(t);
|
||||||
pr_debug("dmtimer rev %08x sysc %08x\n", readl_relaxed(t->base),
|
pr_debug("dmtimer rev %08x sysc %08x\n", readl_relaxed(t->base),
|
||||||
readl_relaxed(t->base + t->sysc));
|
readl_relaxed(t->base + t->sysc));
|
||||||
|
|
||||||
|
|
|
@ -2781,6 +2781,7 @@ static int intel_pstate_update_status(const char *buf, size_t size)
|
||||||
|
|
||||||
cpufreq_unregister_driver(intel_pstate_driver);
|
cpufreq_unregister_driver(intel_pstate_driver);
|
||||||
intel_pstate_driver_cleanup();
|
intel_pstate_driver_cleanup();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (size == 6 && !strncmp(buf, "active", size)) {
|
if (size == 6 && !strncmp(buf, "active", size)) {
|
||||||
|
|
|
@ -66,7 +66,7 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
/* Do runtime PM to manage a hierarchical CPU toplogy. */
|
/* Do runtime PM to manage a hierarchical CPU toplogy. */
|
||||||
pm_runtime_put_sync_suspend(pd_dev);
|
RCU_NONIDLE(pm_runtime_put_sync_suspend(pd_dev));
|
||||||
|
|
||||||
state = psci_get_domain_state();
|
state = psci_get_domain_state();
|
||||||
if (!state)
|
if (!state)
|
||||||
|
@ -74,7 +74,7 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
|
||||||
|
|
||||||
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
|
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
|
||||||
|
|
||||||
pm_runtime_get_sync(pd_dev);
|
RCU_NONIDLE(pm_runtime_get_sync(pd_dev));
|
||||||
|
|
||||||
cpu_pm_exit();
|
cpu_pm_exit();
|
||||||
|
|
||||||
|
|
|
@ -142,11 +142,6 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
|
||||||
|
|
||||||
time_start = ns_to_ktime(local_clock());
|
time_start = ns_to_ktime(local_clock());
|
||||||
|
|
||||||
/*
|
|
||||||
* trace_suspend_resume() called by tick_freeze() for the last CPU
|
|
||||||
* executing it contains RCU usage regarded as invalid in the idle
|
|
||||||
* context, so tell RCU about that.
|
|
||||||
*/
|
|
||||||
tick_freeze();
|
tick_freeze();
|
||||||
/*
|
/*
|
||||||
* The state used here cannot be a "coupled" one, because the "coupled"
|
* The state used here cannot be a "coupled" one, because the "coupled"
|
||||||
|
@ -159,11 +154,6 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
|
||||||
target_state->enter_s2idle(dev, drv, index);
|
target_state->enter_s2idle(dev, drv, index);
|
||||||
if (WARN_ON_ONCE(!irqs_disabled()))
|
if (WARN_ON_ONCE(!irqs_disabled()))
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
/*
|
|
||||||
* timekeeping_resume() that will be called by tick_unfreeze() for the
|
|
||||||
* first CPU executing it calls functions containing RCU read-side
|
|
||||||
* critical sections, so tell RCU about that.
|
|
||||||
*/
|
|
||||||
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
|
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
|
||||||
rcu_idle_exit();
|
rcu_idle_exit();
|
||||||
tick_unfreeze();
|
tick_unfreeze();
|
||||||
|
|
|
@ -1766,20 +1766,23 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
|
||||||
struct devfreq *p_devfreq = NULL;
|
struct devfreq *p_devfreq = NULL;
|
||||||
unsigned long cur_freq, min_freq, max_freq;
|
unsigned long cur_freq, min_freq, max_freq;
|
||||||
unsigned int polling_ms;
|
unsigned int polling_ms;
|
||||||
|
unsigned int timer;
|
||||||
|
|
||||||
seq_printf(s, "%-30s %-30s %-15s %10s %12s %12s %12s\n",
|
seq_printf(s, "%-30s %-30s %-15s %-10s %10s %12s %12s %12s\n",
|
||||||
"dev",
|
"dev",
|
||||||
"parent_dev",
|
"parent_dev",
|
||||||
"governor",
|
"governor",
|
||||||
|
"timer",
|
||||||
"polling_ms",
|
"polling_ms",
|
||||||
"cur_freq_Hz",
|
"cur_freq_Hz",
|
||||||
"min_freq_Hz",
|
"min_freq_Hz",
|
||||||
"max_freq_Hz");
|
"max_freq_Hz");
|
||||||
seq_printf(s, "%30s %30s %15s %10s %12s %12s %12s\n",
|
seq_printf(s, "%30s %30s %15s %10s %10s %12s %12s %12s\n",
|
||||||
"------------------------------",
|
"------------------------------",
|
||||||
"------------------------------",
|
"------------------------------",
|
||||||
"---------------",
|
"---------------",
|
||||||
"----------",
|
"----------",
|
||||||
|
"----------",
|
||||||
"------------",
|
"------------",
|
||||||
"------------",
|
"------------",
|
||||||
"------------");
|
"------------");
|
||||||
|
@ -1803,13 +1806,15 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
|
||||||
cur_freq = devfreq->previous_freq;
|
cur_freq = devfreq->previous_freq;
|
||||||
get_freq_range(devfreq, &min_freq, &max_freq);
|
get_freq_range(devfreq, &min_freq, &max_freq);
|
||||||
polling_ms = devfreq->profile->polling_ms;
|
polling_ms = devfreq->profile->polling_ms;
|
||||||
|
timer = devfreq->profile->timer;
|
||||||
mutex_unlock(&devfreq->lock);
|
mutex_unlock(&devfreq->lock);
|
||||||
|
|
||||||
seq_printf(s,
|
seq_printf(s,
|
||||||
"%-30s %-30s %-15s %10d %12ld %12ld %12ld\n",
|
"%-30s %-30s %-15s %-10s %10d %12ld %12ld %12ld\n",
|
||||||
dev_name(&devfreq->dev),
|
dev_name(&devfreq->dev),
|
||||||
p_devfreq ? dev_name(&p_devfreq->dev) : "null",
|
p_devfreq ? dev_name(&p_devfreq->dev) : "null",
|
||||||
devfreq->governor_name,
|
devfreq->governor_name,
|
||||||
|
polling_ms ? timer_name[timer] : "null",
|
||||||
polling_ms,
|
polling_ms,
|
||||||
cur_freq,
|
cur_freq,
|
||||||
min_freq,
|
min_freq,
|
||||||
|
|
|
@ -836,7 +836,8 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
|
||||||
rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
|
rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
|
||||||
if (rate < 0) {
|
if (rate < 0) {
|
||||||
dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
|
dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
|
||||||
return rate;
|
err = rate;
|
||||||
|
goto disable_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
tegra->max_freq = rate / KHZ;
|
tegra->max_freq = rate / KHZ;
|
||||||
|
@ -897,6 +898,7 @@ remove_opps:
|
||||||
dev_pm_opp_remove_all_dynamic(&pdev->dev);
|
dev_pm_opp_remove_all_dynamic(&pdev->dev);
|
||||||
|
|
||||||
reset_control_reset(tegra->reset);
|
reset_control_reset(tegra->reset);
|
||||||
|
disable_clk:
|
||||||
clk_disable_unprepare(tegra->clock);
|
clk_disable_unprepare(tegra->clock);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -59,6 +59,8 @@ static void dma_buf_release(struct dentry *dentry)
|
||||||
struct dma_buf *dmabuf;
|
struct dma_buf *dmabuf;
|
||||||
|
|
||||||
dmabuf = dentry->d_fsdata;
|
dmabuf = dentry->d_fsdata;
|
||||||
|
if (unlikely(!dmabuf))
|
||||||
|
return;
|
||||||
|
|
||||||
BUG_ON(dmabuf->vmapping_counter);
|
BUG_ON(dmabuf->vmapping_counter);
|
||||||
|
|
||||||
|
|
|
@ -129,6 +129,7 @@ struct dmatest_params {
|
||||||
* @nr_channels: number of channels under test
|
* @nr_channels: number of channels under test
|
||||||
* @lock: access protection to the fields of this structure
|
* @lock: access protection to the fields of this structure
|
||||||
* @did_init: module has been initialized completely
|
* @did_init: module has been initialized completely
|
||||||
|
* @last_error: test has faced configuration issues
|
||||||
*/
|
*/
|
||||||
static struct dmatest_info {
|
static struct dmatest_info {
|
||||||
/* Test parameters */
|
/* Test parameters */
|
||||||
|
@ -137,6 +138,7 @@ static struct dmatest_info {
|
||||||
/* Internal state */
|
/* Internal state */
|
||||||
struct list_head channels;
|
struct list_head channels;
|
||||||
unsigned int nr_channels;
|
unsigned int nr_channels;
|
||||||
|
int last_error;
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
bool did_init;
|
bool did_init;
|
||||||
} test_info = {
|
} test_info = {
|
||||||
|
@ -1184,10 +1186,22 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
|
||||||
return ret;
|
return ret;
|
||||||
} else if (dmatest_run) {
|
} else if (dmatest_run) {
|
||||||
if (!is_threaded_test_pending(info)) {
|
if (!is_threaded_test_pending(info)) {
|
||||||
pr_info("No channels configured, continue with any\n");
|
/*
|
||||||
if (!is_threaded_test_run(info))
|
* We have nothing to run. This can be due to:
|
||||||
stop_threaded_test(info);
|
*/
|
||||||
add_threaded_test(info);
|
ret = info->last_error;
|
||||||
|
if (ret) {
|
||||||
|
/* 1) Misconfiguration */
|
||||||
|
pr_err("Channel misconfigured, can't continue\n");
|
||||||
|
mutex_unlock(&info->lock);
|
||||||
|
return ret;
|
||||||
|
} else {
|
||||||
|
/* 2) We rely on defaults */
|
||||||
|
pr_info("No channels configured, continue with any\n");
|
||||||
|
if (!is_threaded_test_run(info))
|
||||||
|
stop_threaded_test(info);
|
||||||
|
add_threaded_test(info);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
start_threaded_tests(info);
|
start_threaded_tests(info);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1204,7 +1218,7 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
|
||||||
struct dmatest_info *info = &test_info;
|
struct dmatest_info *info = &test_info;
|
||||||
struct dmatest_chan *dtc;
|
struct dmatest_chan *dtc;
|
||||||
char chan_reset_val[20];
|
char chan_reset_val[20];
|
||||||
int ret = 0;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&info->lock);
|
mutex_lock(&info->lock);
|
||||||
ret = param_set_copystring(val, kp);
|
ret = param_set_copystring(val, kp);
|
||||||
|
@ -1259,12 +1273,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
|
||||||
goto add_chan_err;
|
goto add_chan_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info->last_error = ret;
|
||||||
mutex_unlock(&info->lock);
|
mutex_unlock(&info->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
add_chan_err:
|
add_chan_err:
|
||||||
param_set_copystring(chan_reset_val, kp);
|
param_set_copystring(chan_reset_val, kp);
|
||||||
|
info->last_error = ret;
|
||||||
mutex_unlock(&info->lock);
|
mutex_unlock(&info->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -92,7 +92,7 @@ static int amd_fch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
|
||||||
ret = (readl_relaxed(ptr) & AMD_FCH_GPIO_FLAG_DIRECTION);
|
ret = (readl_relaxed(ptr) & AMD_FCH_GPIO_FLAG_DIRECTION);
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
|
|
||||||
return ret ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
|
return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_fch_gpio_set(struct gpio_chip *gc,
|
static void amd_fch_gpio_set(struct gpio_chip *gc,
|
||||||
|
|
|
@ -17,7 +17,17 @@
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
|
||||||
#define MAX_NR_SGPIO 80
|
/*
|
||||||
|
* MAX_NR_HW_GPIO represents the number of actual hardware-supported GPIOs (ie,
|
||||||
|
* slots within the clocked serial GPIO data). Since each HW GPIO is both an
|
||||||
|
* input and an output, we provide MAX_NR_HW_GPIO * 2 lines on our gpiochip
|
||||||
|
* device.
|
||||||
|
*
|
||||||
|
* We use SGPIO_OUTPUT_OFFSET to define the split between the inputs and
|
||||||
|
* outputs; the inputs start at line 0, the outputs start at OUTPUT_OFFSET.
|
||||||
|
*/
|
||||||
|
#define MAX_NR_HW_SGPIO 80
|
||||||
|
#define SGPIO_OUTPUT_OFFSET MAX_NR_HW_SGPIO
|
||||||
|
|
||||||
#define ASPEED_SGPIO_CTRL 0x54
|
#define ASPEED_SGPIO_CTRL 0x54
|
||||||
|
|
||||||
|
@ -30,8 +40,8 @@ struct aspeed_sgpio {
|
||||||
struct clk *pclk;
|
struct clk *pclk;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
uint32_t dir_in[3];
|
|
||||||
int irq;
|
int irq;
|
||||||
|
int n_sgpio;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct aspeed_sgpio_bank {
|
struct aspeed_sgpio_bank {
|
||||||
|
@ -111,31 +121,69 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GPIO_BANK(x) ((x) >> 5)
|
#define GPIO_BANK(x) ((x % SGPIO_OUTPUT_OFFSET) >> 5)
|
||||||
#define GPIO_OFFSET(x) ((x) & 0x1f)
|
#define GPIO_OFFSET(x) ((x % SGPIO_OUTPUT_OFFSET) & 0x1f)
|
||||||
#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
|
#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
|
||||||
|
|
||||||
static const struct aspeed_sgpio_bank *to_bank(unsigned int offset)
|
static const struct aspeed_sgpio_bank *to_bank(unsigned int offset)
|
||||||
{
|
{
|
||||||
unsigned int bank = GPIO_BANK(offset);
|
unsigned int bank;
|
||||||
|
|
||||||
|
bank = GPIO_BANK(offset);
|
||||||
|
|
||||||
WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks));
|
WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks));
|
||||||
return &aspeed_sgpio_banks[bank];
|
return &aspeed_sgpio_banks[bank];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int aspeed_sgpio_init_valid_mask(struct gpio_chip *gc,
|
||||||
|
unsigned long *valid_mask, unsigned int ngpios)
|
||||||
|
{
|
||||||
|
struct aspeed_sgpio *sgpio = gpiochip_get_data(gc);
|
||||||
|
int n = sgpio->n_sgpio;
|
||||||
|
int c = SGPIO_OUTPUT_OFFSET - n;
|
||||||
|
|
||||||
|
WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2);
|
||||||
|
|
||||||
|
/* input GPIOs in the lower range */
|
||||||
|
bitmap_set(valid_mask, 0, n);
|
||||||
|
bitmap_clear(valid_mask, n, c);
|
||||||
|
|
||||||
|
/* output GPIOS above SGPIO_OUTPUT_OFFSET */
|
||||||
|
bitmap_set(valid_mask, SGPIO_OUTPUT_OFFSET, n);
|
||||||
|
bitmap_clear(valid_mask, SGPIO_OUTPUT_OFFSET + n, c);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void aspeed_sgpio_irq_init_valid_mask(struct gpio_chip *gc,
|
||||||
|
unsigned long *valid_mask, unsigned int ngpios)
|
||||||
|
{
|
||||||
|
struct aspeed_sgpio *sgpio = gpiochip_get_data(gc);
|
||||||
|
int n = sgpio->n_sgpio;
|
||||||
|
|
||||||
|
WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2);
|
||||||
|
|
||||||
|
/* input GPIOs in the lower range */
|
||||||
|
bitmap_set(valid_mask, 0, n);
|
||||||
|
bitmap_clear(valid_mask, n, ngpios - n);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool aspeed_sgpio_is_input(unsigned int offset)
|
||||||
|
{
|
||||||
|
return offset < SGPIO_OUTPUT_OFFSET;
|
||||||
|
}
|
||||||
|
|
||||||
static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
|
static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
|
||||||
{
|
{
|
||||||
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
|
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
|
||||||
const struct aspeed_sgpio_bank *bank = to_bank(offset);
|
const struct aspeed_sgpio_bank *bank = to_bank(offset);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
enum aspeed_sgpio_reg reg;
|
enum aspeed_sgpio_reg reg;
|
||||||
bool is_input;
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&gpio->lock, flags);
|
spin_lock_irqsave(&gpio->lock, flags);
|
||||||
|
|
||||||
is_input = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset);
|
reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
|
||||||
reg = is_input ? reg_val : reg_rdata;
|
|
||||||
rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
|
rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
|
||||||
|
|
||||||
spin_unlock_irqrestore(&gpio->lock, flags);
|
spin_unlock_irqrestore(&gpio->lock, flags);
|
||||||
|
@ -143,22 +191,31 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
|
static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
|
||||||
{
|
{
|
||||||
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
|
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
|
||||||
const struct aspeed_sgpio_bank *bank = to_bank(offset);
|
const struct aspeed_sgpio_bank *bank = to_bank(offset);
|
||||||
void __iomem *addr;
|
void __iomem *addr_r, *addr_w;
|
||||||
u32 reg = 0;
|
u32 reg = 0;
|
||||||
|
|
||||||
addr = bank_reg(gpio, bank, reg_val);
|
if (aspeed_sgpio_is_input(offset))
|
||||||
reg = ioread32(addr);
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Since this is an output, read the cached value from rdata, then
|
||||||
|
* update val. */
|
||||||
|
addr_r = bank_reg(gpio, bank, reg_rdata);
|
||||||
|
addr_w = bank_reg(gpio, bank, reg_val);
|
||||||
|
|
||||||
|
reg = ioread32(addr_r);
|
||||||
|
|
||||||
if (val)
|
if (val)
|
||||||
reg |= GPIO_BIT(offset);
|
reg |= GPIO_BIT(offset);
|
||||||
else
|
else
|
||||||
reg &= ~GPIO_BIT(offset);
|
reg &= ~GPIO_BIT(offset);
|
||||||
|
|
||||||
iowrite32(reg, addr);
|
iowrite32(reg, addr_w);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
|
static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
|
||||||
|
@ -175,43 +232,28 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
|
||||||
|
|
||||||
static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
|
static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
|
||||||
{
|
{
|
||||||
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
|
return aspeed_sgpio_is_input(offset) ? 0 : -EINVAL;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&gpio->lock, flags);
|
|
||||||
gpio->dir_in[GPIO_BANK(offset)] |= GPIO_BIT(offset);
|
|
||||||
spin_unlock_irqrestore(&gpio->lock, flags);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
|
static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
|
||||||
{
|
{
|
||||||
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
|
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* No special action is required for setting the direction; we'll
|
||||||
|
* error-out in sgpio_set_value if this isn't an output GPIO */
|
||||||
|
|
||||||
spin_lock_irqsave(&gpio->lock, flags);
|
spin_lock_irqsave(&gpio->lock, flags);
|
||||||
|
rc = sgpio_set_value(gc, offset, val);
|
||||||
gpio->dir_in[GPIO_BANK(offset)] &= ~GPIO_BIT(offset);
|
|
||||||
sgpio_set_value(gc, offset, val);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&gpio->lock, flags);
|
spin_unlock_irqrestore(&gpio->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
|
static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
|
||||||
{
|
{
|
||||||
int dir_status;
|
return !!aspeed_sgpio_is_input(offset);
|
||||||
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&gpio->lock, flags);
|
|
||||||
dir_status = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset);
|
|
||||||
spin_unlock_irqrestore(&gpio->lock, flags);
|
|
||||||
|
|
||||||
return dir_status;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void irqd_to_aspeed_sgpio_data(struct irq_data *d,
|
static void irqd_to_aspeed_sgpio_data(struct irq_data *d,
|
||||||
|
@ -402,6 +444,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
|
||||||
|
|
||||||
irq = &gpio->chip.irq;
|
irq = &gpio->chip.irq;
|
||||||
irq->chip = &aspeed_sgpio_irqchip;
|
irq->chip = &aspeed_sgpio_irqchip;
|
||||||
|
irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask;
|
||||||
irq->handler = handle_bad_irq;
|
irq->handler = handle_bad_irq;
|
||||||
irq->default_type = IRQ_TYPE_NONE;
|
irq->default_type = IRQ_TYPE_NONE;
|
||||||
irq->parent_handler = aspeed_sgpio_irq_handler;
|
irq->parent_handler = aspeed_sgpio_irq_handler;
|
||||||
|
@ -409,17 +452,15 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
|
||||||
irq->parents = &gpio->irq;
|
irq->parents = &gpio->irq;
|
||||||
irq->num_parents = 1;
|
irq->num_parents = 1;
|
||||||
|
|
||||||
/* set IRQ settings and Enable Interrupt */
|
/* Apply default IRQ settings */
|
||||||
for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
|
for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
|
||||||
bank = &aspeed_sgpio_banks[i];
|
bank = &aspeed_sgpio_banks[i];
|
||||||
/* set falling or level-low irq */
|
/* set falling or level-low irq */
|
||||||
iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
|
iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
|
||||||
/* trigger type is edge */
|
/* trigger type is edge */
|
||||||
iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
|
iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
|
||||||
/* dual edge trigger mode. */
|
/* single edge trigger */
|
||||||
iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_type2));
|
iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2));
|
||||||
/* enable irq */
|
|
||||||
iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_enable));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -452,11 +493,12 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
dev_err(&pdev->dev, "Could not read ngpios property\n");
|
dev_err(&pdev->dev, "Could not read ngpios property\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
} else if (nr_gpios > MAX_NR_SGPIO) {
|
} else if (nr_gpios > MAX_NR_HW_SGPIO) {
|
||||||
dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n",
|
dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n",
|
||||||
MAX_NR_SGPIO, nr_gpios);
|
MAX_NR_HW_SGPIO, nr_gpios);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
gpio->n_sgpio = nr_gpios;
|
||||||
|
|
||||||
rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq);
|
rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
|
@ -497,7 +539,8 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
|
||||||
spin_lock_init(&gpio->lock);
|
spin_lock_init(&gpio->lock);
|
||||||
|
|
||||||
gpio->chip.parent = &pdev->dev;
|
gpio->chip.parent = &pdev->dev;
|
||||||
gpio->chip.ngpio = nr_gpios;
|
gpio->chip.ngpio = MAX_NR_HW_SGPIO * 2;
|
||||||
|
gpio->chip.init_valid_mask = aspeed_sgpio_init_valid_mask;
|
||||||
gpio->chip.direction_input = aspeed_sgpio_dir_in;
|
gpio->chip.direction_input = aspeed_sgpio_dir_in;
|
||||||
gpio->chip.direction_output = aspeed_sgpio_dir_out;
|
gpio->chip.direction_output = aspeed_sgpio_dir_out;
|
||||||
gpio->chip.get_direction = aspeed_sgpio_get_direction;
|
gpio->chip.get_direction = aspeed_sgpio_get_direction;
|
||||||
|
@ -509,9 +552,6 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
|
||||||
gpio->chip.label = dev_name(&pdev->dev);
|
gpio->chip.label = dev_name(&pdev->dev);
|
||||||
gpio->chip.base = -1;
|
gpio->chip.base = -1;
|
||||||
|
|
||||||
/* set all SGPIO pins as input (1). */
|
|
||||||
memset(gpio->dir_in, 0xff, sizeof(gpio->dir_in));
|
|
||||||
|
|
||||||
aspeed_sgpio_setup_irqs(gpio, pdev);
|
aspeed_sgpio_setup_irqs(gpio, pdev);
|
||||||
|
|
||||||
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
|
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
|
||||||
|
|
|
@ -1114,8 +1114,8 @@ static const struct aspeed_gpio_config ast2500_config =
|
||||||
|
|
||||||
static const struct aspeed_bank_props ast2600_bank_props[] = {
|
static const struct aspeed_bank_props ast2600_bank_props[] = {
|
||||||
/* input output */
|
/* input output */
|
||||||
{5, 0xffffffff, 0x0000ffff}, /* U/V/W/X */
|
{5, 0xffffffff, 0xffffff00}, /* U/V/W/X */
|
||||||
{6, 0xffff0000, 0x0fff0000}, /* Y/Z */
|
{6, 0x0000ffff, 0x0000ffff}, /* Y/Z */
|
||||||
{ },
|
{ },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -552,6 +552,7 @@ static int __init gpio_mockup_init(void)
|
||||||
err = platform_driver_register(&gpio_mockup_driver);
|
err = platform_driver_register(&gpio_mockup_driver);
|
||||||
if (err) {
|
if (err) {
|
||||||
gpio_mockup_err("error registering platform driver\n");
|
gpio_mockup_err("error registering platform driver\n");
|
||||||
|
debugfs_remove_recursive(gpio_mockup_dbg_dir);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -582,6 +583,7 @@ static int __init gpio_mockup_init(void)
|
||||||
gpio_mockup_err("error registering device");
|
gpio_mockup_err("error registering device");
|
||||||
platform_driver_unregister(&gpio_mockup_driver);
|
platform_driver_unregister(&gpio_mockup_driver);
|
||||||
gpio_mockup_unregister_pdevs();
|
gpio_mockup_unregister_pdevs();
|
||||||
|
debugfs_remove_recursive(gpio_mockup_dbg_dir);
|
||||||
return PTR_ERR(pdev);
|
return PTR_ERR(pdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1516,7 +1516,7 @@ static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int omap_gpio_suspend(struct device *dev)
|
static int __maybe_unused omap_gpio_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct gpio_bank *bank = dev_get_drvdata(dev);
|
struct gpio_bank *bank = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
@ -1528,7 +1528,7 @@ static int omap_gpio_suspend(struct device *dev)
|
||||||
return omap_gpio_runtime_suspend(dev);
|
return omap_gpio_runtime_suspend(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int omap_gpio_resume(struct device *dev)
|
static int __maybe_unused omap_gpio_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct gpio_bank *bank = dev_get_drvdata(dev);
|
struct gpio_bank *bank = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
|
|
@ -818,6 +818,8 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
|
||||||
int level;
|
int level;
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
|
bitmap_zero(pending, MAX_LINE);
|
||||||
|
|
||||||
mutex_lock(&chip->i2c_lock);
|
mutex_lock(&chip->i2c_lock);
|
||||||
ret = pca953x_irq_pending(chip, pending);
|
ret = pca953x_irq_pending(chip, pending);
|
||||||
mutex_unlock(&chip->i2c_lock);
|
mutex_unlock(&chip->i2c_lock);
|
||||||
|
@ -940,6 +942,7 @@ out:
|
||||||
static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
|
static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
|
||||||
{
|
{
|
||||||
DECLARE_BITMAP(val, MAX_LINE);
|
DECLARE_BITMAP(val, MAX_LINE);
|
||||||
|
unsigned int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = device_pca95xx_init(chip, invert);
|
ret = device_pca95xx_init(chip, invert);
|
||||||
|
@ -947,7 +950,9 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* To enable register 6, 7 to control pull up and pull down */
|
/* To enable register 6, 7 to control pull up and pull down */
|
||||||
memset(val, 0x02, NBANK(chip));
|
for (i = 0; i < NBANK(chip); i++)
|
||||||
|
bitmap_set_value8(val, 0x02, i * BANK_SZ);
|
||||||
|
|
||||||
ret = pca953x_write_regs(chip, PCA957X_BKEN, val);
|
ret = pca953x_write_regs(chip, PCA957X_BKEN, val);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -245,6 +245,7 @@ static int gpio_siox_probe(struct siox_device *sdevice)
|
||||||
girq->chip = &ddata->ichip;
|
girq->chip = &ddata->ichip;
|
||||||
girq->default_type = IRQ_TYPE_NONE;
|
girq->default_type = IRQ_TYPE_NONE;
|
||||||
girq->handler = handle_level_irq;
|
girq->handler = handle_level_irq;
|
||||||
|
girq->threaded = true;
|
||||||
|
|
||||||
ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL);
|
ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data,
|
||||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
|
||||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
|
||||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1);
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1);
|
||||||
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
|
||||||
irq_set_handler_locked(data, handle_edge_irq);
|
irq_set_handler_locked(data, handle_edge_irq);
|
||||||
break;
|
break;
|
||||||
case IRQ_TYPE_EDGE_FALLING:
|
case IRQ_TYPE_EDGE_FALLING:
|
||||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
|
||||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
|
||||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0);
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0);
|
||||||
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
|
||||||
irq_set_handler_locked(data, handle_edge_irq);
|
irq_set_handler_locked(data, handle_edge_irq);
|
||||||
break;
|
break;
|
||||||
case IRQ_TYPE_EDGE_BOTH:
|
case IRQ_TYPE_EDGE_BOTH:
|
||||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
|
||||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1);
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1);
|
||||||
|
sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
|
||||||
irq_set_handler_locked(data, handle_edge_irq);
|
irq_set_handler_locked(data, handle_edge_irq);
|
||||||
break;
|
break;
|
||||||
case IRQ_TYPE_LEVEL_HIGH:
|
case IRQ_TYPE_LEVEL_HIGH:
|
||||||
|
|
|
@ -212,7 +212,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
tc3589x_gpio->oldregs[i][j] = new;
|
tc3589x_gpio->oldregs[i][j] = new;
|
||||||
tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new);
|
tc3589x_reg_write(tc3589x, regmap[i] + j, new);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -423,6 +423,21 @@ static __poll_t lineevent_poll(struct file *file,
|
||||||
return events;
|
return events;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t lineevent_get_size(void)
|
||||||
|
{
|
||||||
|
#ifdef __x86_64__
|
||||||
|
/* i386 has no padding after 'id' */
|
||||||
|
if (in_ia32_syscall()) {
|
||||||
|
struct compat_gpioeevent_data {
|
||||||
|
compat_u64 timestamp;
|
||||||
|
u32 id;
|
||||||
|
};
|
||||||
|
|
||||||
|
return sizeof(struct compat_gpioeevent_data);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return sizeof(struct gpioevent_data);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t lineevent_read(struct file *file,
|
static ssize_t lineevent_read(struct file *file,
|
||||||
char __user *buf,
|
char __user *buf,
|
||||||
|
@ -432,9 +447,20 @@ static ssize_t lineevent_read(struct file *file,
|
||||||
struct lineevent_state *le = file->private_data;
|
struct lineevent_state *le = file->private_data;
|
||||||
struct gpioevent_data ge;
|
struct gpioevent_data ge;
|
||||||
ssize_t bytes_read = 0;
|
ssize_t bytes_read = 0;
|
||||||
|
ssize_t ge_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (count < sizeof(ge))
|
/*
|
||||||
|
* When compatible system call is being used the struct gpioevent_data,
|
||||||
|
* in case of at least ia32, has different size due to the alignment
|
||||||
|
* differences. Because we have first member 64 bits followed by one of
|
||||||
|
* 32 bits there is no gap between them. The only difference is the
|
||||||
|
* padding at the end of the data structure. Hence, we calculate the
|
||||||
|
* actual sizeof() and pass this as an argument to copy_to_user() to
|
||||||
|
* drop unneeded bytes from the output.
|
||||||
|
*/
|
||||||
|
ge_size = lineevent_get_size();
|
||||||
|
if (count < ge_size)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
@ -470,10 +496,10 @@ static ssize_t lineevent_read(struct file *file,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (copy_to_user(buf + bytes_read, &ge, sizeof(ge)))
|
if (copy_to_user(buf + bytes_read, &ge, ge_size))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
bytes_read += sizeof(ge);
|
bytes_read += ge_size;
|
||||||
} while (count >= bytes_read + sizeof(ge));
|
} while (count >= bytes_read + ge_size);
|
||||||
|
|
||||||
return bytes_read;
|
return bytes_read;
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,8 +80,6 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
|
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
|
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
|
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/sienna_cichlid_gpu_info.bin");
|
|
||||||
MODULE_FIRMWARE("amdgpu/navy_flounder_gpu_info.bin");
|
|
||||||
|
|
||||||
#define AMDGPU_RESUME_MS 2000
|
#define AMDGPU_RESUME_MS 2000
|
||||||
|
|
||||||
|
@ -1600,6 +1598,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||||
case CHIP_CARRIZO:
|
case CHIP_CARRIZO:
|
||||||
case CHIP_STONEY:
|
case CHIP_STONEY:
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
|
case CHIP_SIENNA_CICHLID:
|
||||||
|
case CHIP_NAVY_FLOUNDER:
|
||||||
default:
|
default:
|
||||||
return 0;
|
return 0;
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
|
@ -1631,12 +1631,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||||
case CHIP_NAVI12:
|
case CHIP_NAVI12:
|
||||||
chip_name = "navi12";
|
chip_name = "navi12";
|
||||||
break;
|
break;
|
||||||
case CHIP_SIENNA_CICHLID:
|
|
||||||
chip_name = "sienna_cichlid";
|
|
||||||
break;
|
|
||||||
case CHIP_NAVY_FLOUNDER:
|
|
||||||
chip_name = "navy_flounder";
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
|
||||||
|
|
|
@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
|
||||||
take the current one */
|
take the current one */
|
||||||
if (active && !adev->have_disp_power_ref) {
|
if (active && !adev->have_disp_power_ref) {
|
||||||
adev->have_disp_power_ref = true;
|
adev->have_disp_power_ref = true;
|
||||||
goto out;
|
return ret;
|
||||||
}
|
}
|
||||||
/* if we have no active crtcs, then drop the power ref
|
/* if we have no active crtcs, then drop the power ref
|
||||||
we got before */
|
we got before */
|
||||||
|
|
|
@ -1044,8 +1044,16 @@ static const struct pci_device_id pciidlist[] = {
|
||||||
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
|
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
|
||||||
|
|
||||||
/* Navi12 */
|
/* Navi12 */
|
||||||
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
|
||||||
{0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
|
||||||
|
|
||||||
|
/* Sienna_Cichlid */
|
||||||
|
{0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||||
|
{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||||
|
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||||
|
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||||
|
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||||
|
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||||
|
|
||||||
{0, 0, 0}
|
{0, 0, 0}
|
||||||
};
|
};
|
||||||
|
|
|
@ -1076,6 +1076,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
||||||
|
|
||||||
release_sg:
|
release_sg:
|
||||||
kfree(ttm->sg);
|
kfree(ttm->sg);
|
||||||
|
ttm->sg = NULL;
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3595,6 +3595,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
|
||||||
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
|
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
|
||||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||||
break;
|
break;
|
||||||
|
case CHIP_NAVY_FLOUNDER:
|
||||||
|
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -746,18 +746,18 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
|
||||||
| UVD_SUVD_CGC_GATE__IME_HEVC_MASK
|
| UVD_SUVD_CGC_GATE__IME_HEVC_MASK
|
||||||
| UVD_SUVD_CGC_GATE__EFC_MASK
|
| UVD_SUVD_CGC_GATE__EFC_MASK
|
||||||
| UVD_SUVD_CGC_GATE__SAOE_MASK
|
| UVD_SUVD_CGC_GATE__SAOE_MASK
|
||||||
| 0x08000000
|
| UVD_SUVD_CGC_GATE__SRE_AV1_MASK
|
||||||
| UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
|
| UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
|
||||||
| UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
|
| UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
|
||||||
| 0x40000000
|
| UVD_SUVD_CGC_GATE__SCM_AV1_MASK
|
||||||
| UVD_SUVD_CGC_GATE__SMPA_MASK);
|
| UVD_SUVD_CGC_GATE__SMPA_MASK);
|
||||||
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
|
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
|
||||||
|
|
||||||
data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
|
data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
|
||||||
data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
|
data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
|
||||||
| UVD_SUVD_CGC_GATE2__MPBE1_MASK
|
| UVD_SUVD_CGC_GATE2__MPBE1_MASK
|
||||||
| 0x00000004
|
| UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
|
||||||
| 0x00000008
|
| UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
|
||||||
| UVD_SUVD_CGC_GATE2__MPC1_MASK);
|
| UVD_SUVD_CGC_GATE2__MPC1_MASK);
|
||||||
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
|
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
|
||||||
|
|
||||||
|
@ -776,8 +776,8 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
|
||||||
| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
|
| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
|
| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
|
| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
|
||||||
| 0x00008000
|
| UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
|
||||||
| 0x00010000
|
| UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
|
| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
|
| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
|
| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
|
||||||
|
@ -892,8 +892,8 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
|
||||||
| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
|
| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
|
| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
|
| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
|
||||||
| 0x00008000
|
| UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
|
||||||
| 0x00010000
|
| UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
|
| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
|
| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
|
||||||
| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
|
| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
|
||||||
|
|
|
@ -604,7 +604,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
|
hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
|
||||||
if (hdcp_work == NULL)
|
if (ZERO_OR_NULL_PTR(hdcp_work))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
|
hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
|
||||||
|
|
|
@ -783,7 +783,6 @@ void rn_clk_mgr_construct(
|
||||||
} else {
|
} else {
|
||||||
struct clk_log_info log_info = {0};
|
struct clk_log_info log_info = {0};
|
||||||
|
|
||||||
clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
|
|
||||||
clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr);
|
clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr);
|
||||||
|
|
||||||
/* SMU Version 55.51.0 and up no longer have an issue
|
/* SMU Version 55.51.0 and up no longer have an issue
|
||||||
|
|
|
@ -31,9 +31,21 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
|
||||||
dcn30_dio_link_encoder.o dcn30_resource.o
|
dcn30_dio_link_encoder.o dcn30_resource.o
|
||||||
|
|
||||||
|
|
||||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse -mpreferred-stack-boundary=4
|
ifdef CONFIG_X86
|
||||||
|
|
||||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
|
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
|
||||||
|
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef CONFIG_PPC64
|
||||||
|
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
|
||||||
|
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef CONFIG_ARM64
|
||||||
|
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
|
||||||
|
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
|
||||||
|
endif
|
||||||
|
|
||||||
ifdef CONFIG_CC_IS_GCC
|
ifdef CONFIG_CC_IS_GCC
|
||||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||||
IS_OLD_GCC = 1
|
IS_OLD_GCC = 1
|
||||||
|
@ -45,8 +57,10 @@ ifdef IS_OLD_GCC
|
||||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||||
# (8B stack alignment).
|
# (8B stack alignment).
|
||||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mpreferred-stack-boundary=4
|
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mpreferred-stack-boundary=4
|
||||||
|
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mpreferred-stack-boundary=4
|
||||||
else
|
else
|
||||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -msse2
|
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -msse2
|
||||||
|
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -msse2
|
||||||
endif
|
endif
|
||||||
|
|
||||||
AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
|
AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
|
||||||
|
|
|
@ -2727,6 +2727,7 @@
|
||||||
#define mmDB_STENCIL_WRITE_BASE_DEFAULT 0x00000000
|
#define mmDB_STENCIL_WRITE_BASE_DEFAULT 0x00000000
|
||||||
#define mmDB_RESERVED_REG_1_DEFAULT 0x00000000
|
#define mmDB_RESERVED_REG_1_DEFAULT 0x00000000
|
||||||
#define mmDB_RESERVED_REG_3_DEFAULT 0x00000000
|
#define mmDB_RESERVED_REG_3_DEFAULT 0x00000000
|
||||||
|
#define mmDB_VRS_OVERRIDE_CNTL_DEFAULT 0x00000000
|
||||||
#define mmDB_Z_READ_BASE_HI_DEFAULT 0x00000000
|
#define mmDB_Z_READ_BASE_HI_DEFAULT 0x00000000
|
||||||
#define mmDB_STENCIL_READ_BASE_HI_DEFAULT 0x00000000
|
#define mmDB_STENCIL_READ_BASE_HI_DEFAULT 0x00000000
|
||||||
#define mmDB_Z_WRITE_BASE_HI_DEFAULT 0x00000000
|
#define mmDB_Z_WRITE_BASE_HI_DEFAULT 0x00000000
|
||||||
|
@ -3062,6 +3063,7 @@
|
||||||
#define mmPA_SU_OVER_RASTERIZATION_CNTL_DEFAULT 0x00000000
|
#define mmPA_SU_OVER_RASTERIZATION_CNTL_DEFAULT 0x00000000
|
||||||
#define mmPA_STEREO_CNTL_DEFAULT 0x00000000
|
#define mmPA_STEREO_CNTL_DEFAULT 0x00000000
|
||||||
#define mmPA_STATE_STEREO_X_DEFAULT 0x00000000
|
#define mmPA_STATE_STEREO_X_DEFAULT 0x00000000
|
||||||
|
#define mmPA_CL_VRS_CNTL_DEFAULT 0x00000000
|
||||||
#define mmPA_SU_POINT_SIZE_DEFAULT 0x00000000
|
#define mmPA_SU_POINT_SIZE_DEFAULT 0x00000000
|
||||||
#define mmPA_SU_POINT_MINMAX_DEFAULT 0x00000000
|
#define mmPA_SU_POINT_MINMAX_DEFAULT 0x00000000
|
||||||
#define mmPA_SU_LINE_CNTL_DEFAULT 0x00000000
|
#define mmPA_SU_LINE_CNTL_DEFAULT 0x00000000
|
||||||
|
|
|
@ -5379,6 +5379,8 @@
|
||||||
#define mmDB_RESERVED_REG_1_BASE_IDX 1
|
#define mmDB_RESERVED_REG_1_BASE_IDX 1
|
||||||
#define mmDB_RESERVED_REG_3 0x0017
|
#define mmDB_RESERVED_REG_3 0x0017
|
||||||
#define mmDB_RESERVED_REG_3_BASE_IDX 1
|
#define mmDB_RESERVED_REG_3_BASE_IDX 1
|
||||||
|
#define mmDB_VRS_OVERRIDE_CNTL 0x0019
|
||||||
|
#define mmDB_VRS_OVERRIDE_CNTL_BASE_IDX 1
|
||||||
#define mmDB_Z_READ_BASE_HI 0x001a
|
#define mmDB_Z_READ_BASE_HI 0x001a
|
||||||
#define mmDB_Z_READ_BASE_HI_BASE_IDX 1
|
#define mmDB_Z_READ_BASE_HI_BASE_IDX 1
|
||||||
#define mmDB_STENCIL_READ_BASE_HI 0x001b
|
#define mmDB_STENCIL_READ_BASE_HI 0x001b
|
||||||
|
@ -6049,6 +6051,8 @@
|
||||||
#define mmPA_STEREO_CNTL_BASE_IDX 1
|
#define mmPA_STEREO_CNTL_BASE_IDX 1
|
||||||
#define mmPA_STATE_STEREO_X 0x0211
|
#define mmPA_STATE_STEREO_X 0x0211
|
||||||
#define mmPA_STATE_STEREO_X_BASE_IDX 1
|
#define mmPA_STATE_STEREO_X_BASE_IDX 1
|
||||||
|
#define mmPA_CL_VRS_CNTL 0x0212
|
||||||
|
#define mmPA_CL_VRS_CNTL_BASE_IDX 1
|
||||||
#define mmPA_SU_POINT_SIZE 0x0280
|
#define mmPA_SU_POINT_SIZE 0x0280
|
||||||
#define mmPA_SU_POINT_SIZE_BASE_IDX 1
|
#define mmPA_SU_POINT_SIZE_BASE_IDX 1
|
||||||
#define mmPA_SU_POINT_MINMAX 0x0281
|
#define mmPA_SU_POINT_MINMAX 0x0281
|
||||||
|
|
|
@ -9777,6 +9777,7 @@
|
||||||
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE__SHIFT 0x3
|
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE__SHIFT 0x3
|
||||||
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD__SHIFT 0x4
|
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD__SHIFT 0x4
|
||||||
#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE__SHIFT 0x8
|
#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE__SHIFT 0x8
|
||||||
|
#define DB_EXCEPTION_CONTROL__FORCE_VRS_RATE_FINE__SHIFT 0x10
|
||||||
#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK__SHIFT 0x18
|
#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK__SHIFT 0x18
|
||||||
#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
|
#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
|
||||||
#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
|
#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
|
||||||
|
@ -9784,6 +9785,7 @@
|
||||||
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE_MASK 0x00000008L
|
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE_MASK 0x00000008L
|
||||||
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD_MASK 0x00000010L
|
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD_MASK 0x00000010L
|
||||||
#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE_MASK 0x00000F00L
|
#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE_MASK 0x00000F00L
|
||||||
|
#define DB_EXCEPTION_CONTROL__FORCE_VRS_RATE_FINE_MASK 0x00FF0000L
|
||||||
#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK_MASK 0x7F000000L
|
#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK_MASK 0x7F000000L
|
||||||
//DB_DFSM_CONFIG
|
//DB_DFSM_CONFIG
|
||||||
#define DB_DFSM_CONFIG__BYPASS_DFSM__SHIFT 0x0
|
#define DB_DFSM_CONFIG__BYPASS_DFSM__SHIFT 0x0
|
||||||
|
@ -10076,6 +10078,7 @@
|
||||||
#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM__SHIFT 0x18
|
#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM__SHIFT 0x18
|
||||||
#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT__SHIFT 0x19
|
#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT__SHIFT 0x19
|
||||||
#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING__SHIFT 0x1a
|
#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING__SHIFT 0x1a
|
||||||
|
#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT__SHIFT 0x1c
|
||||||
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT__SHIFT 0x1e
|
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT__SHIFT 0x1e
|
||||||
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC__SHIFT 0x1f
|
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC__SHIFT 0x1f
|
||||||
#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L
|
#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L
|
||||||
|
@ -10103,12 +10106,15 @@
|
||||||
#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM_MASK 0x01000000L
|
#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM_MASK 0x01000000L
|
||||||
#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT_MASK 0x02000000L
|
#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT_MASK 0x02000000L
|
||||||
#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING_MASK 0x04000000L
|
#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING_MASK 0x04000000L
|
||||||
|
#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT_MASK 0x10000000L
|
||||||
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_MASK 0x40000000L
|
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_MASK 0x40000000L
|
||||||
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC_MASK 0x80000000L
|
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC_MASK 0x80000000L
|
||||||
//CB_HW_CONTROL
|
//CB_HW_CONTROL
|
||||||
#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x0
|
#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x0
|
||||||
|
#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION__SHIFT 0x1
|
||||||
#define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC__SHIFT 0x3
|
#define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC__SHIFT 0x3
|
||||||
#define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX__SHIFT 0x4
|
#define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX__SHIFT 0x4
|
||||||
|
#define CB_HW_CONTROL__DISABLE_COMPRESS_1FRAG_WHEN_VRS_RATE_HINT_EN__SHIFT 0x5
|
||||||
#define CB_HW_CONTROL__RMI_CREDITS__SHIFT 0x6
|
#define CB_HW_CONTROL__RMI_CREDITS__SHIFT 0x6
|
||||||
#define CB_HW_CONTROL__CHICKEN_BITS__SHIFT 0xc
|
#define CB_HW_CONTROL__CHICKEN_BITS__SHIFT 0xc
|
||||||
#define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS__SHIFT 0xf
|
#define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS__SHIFT 0xf
|
||||||
|
@ -10129,8 +10135,10 @@
|
||||||
#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
|
#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
|
||||||
#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
|
#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
|
||||||
#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00000001L
|
#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00000001L
|
||||||
|
#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION_MASK 0x00000002L
|
||||||
#define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC_MASK 0x00000008L
|
#define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC_MASK 0x00000008L
|
||||||
#define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX_MASK 0x00000010L
|
#define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX_MASK 0x00000010L
|
||||||
|
#define CB_HW_CONTROL__DISABLE_COMPRESS_1FRAG_WHEN_VRS_RATE_HINT_EN_MASK 0x00000020L
|
||||||
#define CB_HW_CONTROL__RMI_CREDITS_MASK 0x00000FC0L
|
#define CB_HW_CONTROL__RMI_CREDITS_MASK 0x00000FC0L
|
||||||
#define CB_HW_CONTROL__CHICKEN_BITS_MASK 0x00007000L
|
#define CB_HW_CONTROL__CHICKEN_BITS_MASK 0x00007000L
|
||||||
#define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS_MASK 0x00008000L
|
#define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS_MASK 0x00008000L
|
||||||
|
@ -19881,6 +19889,7 @@
|
||||||
#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
|
#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
|
||||||
#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
|
#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
|
||||||
#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
|
#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
|
||||||
|
#define DB_RENDER_OVERRIDE2__FORCE_VRS_RATE_FINE__SHIFT 0x1a
|
||||||
#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE__SHIFT 0x1b
|
#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE__SHIFT 0x1b
|
||||||
#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
|
#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
|
||||||
#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
|
#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
|
||||||
|
@ -19898,6 +19907,7 @@
|
||||||
#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
|
#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
|
||||||
#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
|
#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
|
||||||
#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
|
#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
|
||||||
|
#define DB_RENDER_OVERRIDE2__FORCE_VRS_RATE_FINE_MASK 0x04000000L
|
||||||
#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE_MASK 0x18000000L
|
#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE_MASK 0x18000000L
|
||||||
//DB_HTILE_DATA_BASE
|
//DB_HTILE_DATA_BASE
|
||||||
#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
|
#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
|
||||||
|
@ -20021,6 +20031,13 @@
|
||||||
//DB_RESERVED_REG_3
|
//DB_RESERVED_REG_3
|
||||||
#define DB_RESERVED_REG_3__FIELD_1__SHIFT 0x0
|
#define DB_RESERVED_REG_3__FIELD_1__SHIFT 0x0
|
||||||
#define DB_RESERVED_REG_3__FIELD_1_MASK 0x003FFFFFL
|
#define DB_RESERVED_REG_3__FIELD_1_MASK 0x003FFFFFL
|
||||||
|
//DB_VRS_OVERRIDE_CNTL
|
||||||
|
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE__SHIFT 0x0
|
||||||
|
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_X__SHIFT 0x4
|
||||||
|
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_Y__SHIFT 0x6
|
||||||
|
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE_MASK 0x00000007L
|
||||||
|
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_X_MASK 0x00000030L
|
||||||
|
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_Y_MASK 0x000000C0L
|
||||||
//DB_Z_READ_BASE_HI
|
//DB_Z_READ_BASE_HI
|
||||||
#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
|
#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
|
||||||
#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
|
#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
|
||||||
|
@ -22598,6 +22615,7 @@
|
||||||
#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
|
#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
|
||||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x19
|
#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x19
|
||||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1b
|
#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1b
|
||||||
|
#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE__SHIFT 0x1c
|
||||||
#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER__SHIFT 0x1d
|
#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER__SHIFT 0x1d
|
||||||
#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER__SHIFT 0x1e
|
#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER__SHIFT 0x1e
|
||||||
#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
|
#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
|
||||||
|
@ -22627,6 +22645,7 @@
|
||||||
#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
|
#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
|
||||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L
|
#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L
|
||||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x08000000L
|
#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x08000000L
|
||||||
|
#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE_MASK 0x10000000L
|
||||||
#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER_MASK 0x20000000L
|
#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER_MASK 0x20000000L
|
||||||
#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER_MASK 0x40000000L
|
#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER_MASK 0x40000000L
|
||||||
//PA_CL_NANINF_CNTL
|
//PA_CL_NANINF_CNTL
|
||||||
|
@ -22740,6 +22759,19 @@
|
||||||
//PA_STATE_STEREO_X
|
//PA_STATE_STEREO_X
|
||||||
#define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT 0x0
|
#define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT 0x0
|
||||||
#define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK 0xFFFFFFFFL
|
#define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK 0xFFFFFFFFL
|
||||||
|
//PA_CL_VRS_CNTL
|
||||||
|
#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE__SHIFT 0x0
|
||||||
|
#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE__SHIFT 0x3
|
||||||
|
#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE__SHIFT 0x6
|
||||||
|
#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE__SHIFT 0x9
|
||||||
|
#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK__SHIFT 0xd
|
||||||
|
#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO__SHIFT 0xe
|
||||||
|
#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE_MASK 0x00000007L
|
||||||
|
#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE_MASK 0x00000038L
|
||||||
|
#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE_MASK 0x000001C0L
|
||||||
|
#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE_MASK 0x00000E00L
|
||||||
|
#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK_MASK 0x00002000L
|
||||||
|
#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO_MASK 0x00004000L
|
||||||
//PA_SU_POINT_SIZE
|
//PA_SU_POINT_SIZE
|
||||||
#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
|
#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
|
||||||
#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
|
#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
|
||||||
|
@ -23088,6 +23120,7 @@
|
||||||
#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
|
#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
|
||||||
#define DB_HTILE_SURFACE__RESERVED_FIELD_6__SHIFT 0x11
|
#define DB_HTILE_SURFACE__RESERVED_FIELD_6__SHIFT 0x11
|
||||||
#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
|
#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
|
||||||
|
#define DB_HTILE_SURFACE__VRS_HTILE_ENCODING__SHIFT 0x13
|
||||||
#define DB_HTILE_SURFACE__RESERVED_FIELD_1_MASK 0x00000001L
|
#define DB_HTILE_SURFACE__RESERVED_FIELD_1_MASK 0x00000001L
|
||||||
#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
|
#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
|
||||||
#define DB_HTILE_SURFACE__RESERVED_FIELD_2_MASK 0x00000004L
|
#define DB_HTILE_SURFACE__RESERVED_FIELD_2_MASK 0x00000004L
|
||||||
|
@ -23097,6 +23130,7 @@
|
||||||
#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
|
#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
|
||||||
#define DB_HTILE_SURFACE__RESERVED_FIELD_6_MASK 0x00020000L
|
#define DB_HTILE_SURFACE__RESERVED_FIELD_6_MASK 0x00020000L
|
||||||
#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
|
#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
|
||||||
|
#define DB_HTILE_SURFACE__VRS_HTILE_ENCODING_MASK 0x00180000L
|
||||||
//DB_SRESULTS_COMPARE_STATE0
|
//DB_SRESULTS_COMPARE_STATE0
|
||||||
#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
|
#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
|
||||||
#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
|
#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
|
||||||
|
@ -24954,6 +24988,7 @@
|
||||||
#define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
#define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||||
#define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
#define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||||
#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||||
|
#define CB_COLOR0_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||||
#define CB_COLOR0_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
#define CB_COLOR0_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||||
#define CB_COLOR0_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
#define CB_COLOR0_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||||
#define CB_COLOR0_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
#define CB_COLOR0_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||||
|
@ -24962,6 +24997,7 @@
|
||||||
#define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
#define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||||
#define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
#define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||||
#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||||
|
#define CB_COLOR0_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||||
//CB_COLOR1_ATTRIB3
|
//CB_COLOR1_ATTRIB3
|
||||||
#define CB_COLOR1_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
#define CB_COLOR1_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||||
#define CB_COLOR1_ATTRIB3__META_LINEAR__SHIFT 0xd
|
#define CB_COLOR1_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||||
|
@ -24971,6 +25007,7 @@
|
||||||
#define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
#define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||||
#define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
#define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||||
#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||||
|
#define CB_COLOR1_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||||
#define CB_COLOR1_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
#define CB_COLOR1_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||||
#define CB_COLOR1_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
#define CB_COLOR1_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||||
#define CB_COLOR1_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
#define CB_COLOR1_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||||
|
@ -24979,6 +25016,7 @@
|
||||||
#define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
#define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||||
#define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
#define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||||
#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||||
|
#define CB_COLOR1_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||||
//CB_COLOR2_ATTRIB3
|
//CB_COLOR2_ATTRIB3
|
||||||
#define CB_COLOR2_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
#define CB_COLOR2_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||||
#define CB_COLOR2_ATTRIB3__META_LINEAR__SHIFT 0xd
|
#define CB_COLOR2_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||||
|
@ -24988,6 +25026,7 @@
|
||||||
#define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
#define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||||
#define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
#define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||||
#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||||
|
#define CB_COLOR2_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||||
#define CB_COLOR2_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
#define CB_COLOR2_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||||
#define CB_COLOR2_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
#define CB_COLOR2_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||||
#define CB_COLOR2_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
#define CB_COLOR2_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||||
|
@ -24996,6 +25035,7 @@
|
||||||
#define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
#define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||||
#define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
#define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||||
#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||||
|
#define CB_COLOR2_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||||
//CB_COLOR3_ATTRIB3
|
//CB_COLOR3_ATTRIB3
|
||||||
#define CB_COLOR3_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
#define CB_COLOR3_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||||
#define CB_COLOR3_ATTRIB3__META_LINEAR__SHIFT 0xd
|
#define CB_COLOR3_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||||
|
@ -25005,6 +25045,7 @@
|
||||||
#define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
#define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||||
#define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
#define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||||
#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||||
|
#define CB_COLOR3_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||||
#define CB_COLOR3_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
#define CB_COLOR3_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||||
#define CB_COLOR3_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
#define CB_COLOR3_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||||
#define CB_COLOR3_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
#define CB_COLOR3_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||||
|
@ -25013,6 +25054,7 @@
|
||||||
#define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
#define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||||
#define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
#define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||||
#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||||
|
#define CB_COLOR3_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||||
//CB_COLOR4_ATTRIB3
|
//CB_COLOR4_ATTRIB3
|
||||||
#define CB_COLOR4_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
#define CB_COLOR4_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||||
#define CB_COLOR4_ATTRIB3__META_LINEAR__SHIFT 0xd
|
#define CB_COLOR4_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||||
|
@ -25022,6 +25064,7 @@
|
||||||
#define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
#define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||||
#define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
#define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||||
#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||||
|
#define CB_COLOR4_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||||
#define CB_COLOR4_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
#define CB_COLOR4_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||||
#define CB_COLOR4_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
#define CB_COLOR4_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||||
#define CB_COLOR4_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
#define CB_COLOR4_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||||
|
@ -25030,6 +25073,7 @@
|
||||||
#define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
#define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||||
#define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
#define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||||
#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||||
|
#define CB_COLOR4_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||||
//CB_COLOR5_ATTRIB3
|
//CB_COLOR5_ATTRIB3
|
||||||
#define CB_COLOR5_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
#define CB_COLOR5_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||||
#define CB_COLOR5_ATTRIB3__META_LINEAR__SHIFT 0xd
|
#define CB_COLOR5_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||||
|
@ -25039,6 +25083,7 @@
|
||||||
#define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
#define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||||
#define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
#define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||||
#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||||
|
#define CB_COLOR5_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||||
#define CB_COLOR5_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
#define CB_COLOR5_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||||
#define CB_COLOR5_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
#define CB_COLOR5_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||||
#define CB_COLOR5_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
#define CB_COLOR5_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||||
|
@ -25047,6 +25092,7 @@
|
||||||
#define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
#define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||||
#define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
#define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||||
#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||||
|
#define CB_COLOR5_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||||
//CB_COLOR6_ATTRIB3
|
//CB_COLOR6_ATTRIB3
|
||||||
#define CB_COLOR6_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
#define CB_COLOR6_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||||
#define CB_COLOR6_ATTRIB3__META_LINEAR__SHIFT 0xd
|
#define CB_COLOR6_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||||
|
@ -25056,6 +25102,7 @@
|
||||||
#define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
#define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||||
#define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
#define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||||
#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||||
|
#define CB_COLOR6_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||||
#define CB_COLOR6_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
#define CB_COLOR6_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||||
#define CB_COLOR6_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
#define CB_COLOR6_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||||
#define CB_COLOR6_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
#define CB_COLOR6_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||||
|
@ -25064,6 +25111,7 @@
|
||||||
#define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
#define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||||
#define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
#define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||||
#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||||
|
#define CB_COLOR6_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||||
//CB_COLOR7_ATTRIB3
|
//CB_COLOR7_ATTRIB3
|
||||||
#define CB_COLOR7_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
#define CB_COLOR7_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||||
#define CB_COLOR7_ATTRIB3__META_LINEAR__SHIFT 0xd
|
#define CB_COLOR7_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||||
|
@ -25073,6 +25121,7 @@
|
||||||
#define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
#define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||||
#define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
#define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||||
#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||||
|
#define CB_COLOR7_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||||
#define CB_COLOR7_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
#define CB_COLOR7_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||||
#define CB_COLOR7_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
#define CB_COLOR7_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||||
#define CB_COLOR7_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
#define CB_COLOR7_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||||
|
@ -25081,6 +25130,7 @@
|
||||||
#define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
#define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||||
#define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
#define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||||
#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||||
|
#define CB_COLOR7_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||||
|
|
||||||
|
|
||||||
// addressBlock: gc_gfxudec
|
// addressBlock: gc_gfxudec
|
||||||
|
|
|
@ -2393,6 +2393,7 @@
|
||||||
#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC__SHIFT 0x7
|
#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC__SHIFT 0x7
|
||||||
#define VCN_FEATURES__HAS_SCLR_DEC__SHIFT 0x8
|
#define VCN_FEATURES__HAS_SCLR_DEC__SHIFT 0x8
|
||||||
#define VCN_FEATURES__HAS_VP9_DEC__SHIFT 0x9
|
#define VCN_FEATURES__HAS_VP9_DEC__SHIFT 0x9
|
||||||
|
#define VCN_FEATURES__HAS_AV1_DEC__SHIFT 0xa
|
||||||
#define VCN_FEATURES__HAS_EFC_ENC__SHIFT 0xb
|
#define VCN_FEATURES__HAS_EFC_ENC__SHIFT 0xb
|
||||||
#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC__SHIFT 0xc
|
#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC__SHIFT 0xc
|
||||||
#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC__SHIFT 0xd
|
#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC__SHIFT 0xd
|
||||||
|
@ -2407,6 +2408,7 @@
|
||||||
#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC_MASK 0x00000080L
|
#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC_MASK 0x00000080L
|
||||||
#define VCN_FEATURES__HAS_SCLR_DEC_MASK 0x00000100L
|
#define VCN_FEATURES__HAS_SCLR_DEC_MASK 0x00000100L
|
||||||
#define VCN_FEATURES__HAS_VP9_DEC_MASK 0x00000200L
|
#define VCN_FEATURES__HAS_VP9_DEC_MASK 0x00000200L
|
||||||
|
#define VCN_FEATURES__HAS_AV1_DEC_MASK 0x00000400L
|
||||||
#define VCN_FEATURES__HAS_EFC_ENC_MASK 0x00000800L
|
#define VCN_FEATURES__HAS_EFC_ENC_MASK 0x00000800L
|
||||||
#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC_MASK 0x00001000L
|
#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC_MASK 0x00001000L
|
||||||
#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC_MASK 0x00002000L
|
#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC_MASK 0x00002000L
|
||||||
|
@ -2809,8 +2811,10 @@
|
||||||
#define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
|
#define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
|
||||||
#define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19
|
#define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19
|
||||||
#define UVD_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
|
#define UVD_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
|
||||||
|
#define UVD_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
|
||||||
#define UVD_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
|
#define UVD_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
|
||||||
#define UVD_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
|
#define UVD_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
|
||||||
|
#define UVD_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
|
||||||
#define UVD_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
|
#define UVD_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
|
||||||
#define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L
|
#define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L
|
||||||
#define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L
|
#define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L
|
||||||
|
@ -2839,8 +2843,10 @@
|
||||||
#define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
|
#define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
|
||||||
#define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L
|
#define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L
|
||||||
#define UVD_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
|
#define UVD_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
|
||||||
|
#define UVD_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
|
||||||
#define UVD_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
|
#define UVD_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
|
||||||
#define UVD_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
|
#define UVD_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
|
||||||
|
#define UVD_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
|
||||||
#define UVD_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
|
#define UVD_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
|
||||||
//UVD_SUVD_CGC_STATUS
|
//UVD_SUVD_CGC_STATUS
|
||||||
#define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0
|
#define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0
|
||||||
|
@ -2873,6 +2879,8 @@
|
||||||
#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b
|
#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b
|
||||||
#define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c
|
#define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c
|
||||||
#define UVD_SUVD_CGC_STATUS__SAOE_DCLK__SHIFT 0x1d
|
#define UVD_SUVD_CGC_STATUS__SAOE_DCLK__SHIFT 0x1d
|
||||||
|
#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK__SHIFT 0x1e
|
||||||
|
#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK__SHIFT 0x1f
|
||||||
#define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L
|
#define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L
|
||||||
#define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L
|
#define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L
|
||||||
#define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L
|
#define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L
|
||||||
|
@ -2903,6 +2911,8 @@
|
||||||
#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L
|
#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L
|
||||||
#define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L
|
#define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L
|
||||||
#define UVD_SUVD_CGC_STATUS__SAOE_DCLK_MASK 0x20000000L
|
#define UVD_SUVD_CGC_STATUS__SAOE_DCLK_MASK 0x20000000L
|
||||||
|
#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK_MASK 0x40000000L
|
||||||
|
#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK_MASK 0x80000000L
|
||||||
//UVD_SUVD_CGC_CTRL
|
//UVD_SUVD_CGC_CTRL
|
||||||
#define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
|
#define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
|
||||||
#define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
|
#define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
|
||||||
|
@ -2919,6 +2929,8 @@
|
||||||
#define UVD_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
|
#define UVD_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
|
||||||
#define UVD_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
|
#define UVD_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
|
||||||
#define UVD_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
|
#define UVD_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
|
||||||
|
#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
|
||||||
|
#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
|
||||||
#define UVD_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
|
#define UVD_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
|
||||||
#define UVD_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
|
#define UVD_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
|
||||||
#define UVD_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
|
#define UVD_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
|
||||||
|
@ -2937,6 +2949,8 @@
|
||||||
#define UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
|
#define UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
|
||||||
#define UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
|
#define UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
|
||||||
#define UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
|
#define UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
|
||||||
|
#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
|
||||||
|
#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
|
||||||
#define UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
|
#define UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
|
||||||
#define UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
|
#define UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
|
||||||
#define UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
|
#define UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
|
||||||
|
@ -3658,6 +3672,8 @@
|
||||||
#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK__SHIFT 0x0
|
#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK__SHIFT 0x0
|
||||||
#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK__SHIFT 0x1
|
#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK__SHIFT 0x1
|
||||||
#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK__SHIFT 0x3
|
#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK__SHIFT 0x3
|
||||||
|
#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK__SHIFT 0x4
|
||||||
|
#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK__SHIFT 0x5
|
||||||
#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK__SHIFT 0x6
|
#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK__SHIFT 0x6
|
||||||
#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK__SHIFT 0x7
|
#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK__SHIFT 0x7
|
||||||
#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK__SHIFT 0x8
|
#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK__SHIFT 0x8
|
||||||
|
@ -3666,6 +3682,8 @@
|
||||||
#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK_MASK 0x00000001L
|
#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK_MASK 0x00000001L
|
||||||
#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK_MASK 0x00000002L
|
#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK_MASK 0x00000002L
|
||||||
#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK_MASK 0x00000008L
|
#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK_MASK 0x00000008L
|
||||||
|
#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK_MASK 0x00000010L
|
||||||
|
#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK_MASK 0x00000020L
|
||||||
#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK_MASK 0x00000040L
|
#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK_MASK 0x00000040L
|
||||||
#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK_MASK 0x00000080L
|
#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK_MASK 0x00000080L
|
||||||
#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK_MASK 0x00000100L
|
#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK_MASK 0x00000100L
|
||||||
|
@ -3674,25 +3692,41 @@
|
||||||
//UVD_SUVD_CGC_GATE2
|
//UVD_SUVD_CGC_GATE2
|
||||||
#define UVD_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
|
#define UVD_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
|
||||||
#define UVD_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
|
#define UVD_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
|
||||||
|
#define UVD_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
|
||||||
|
#define UVD_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
|
||||||
#define UVD_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
|
#define UVD_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
|
||||||
#define UVD_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
|
#define UVD_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
|
||||||
#define UVD_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
|
#define UVD_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
|
||||||
|
#define UVD_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
|
||||||
|
#define UVD_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
|
||||||
#define UVD_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
|
#define UVD_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
|
||||||
//UVD_SUVD_INT_STATUS2
|
//UVD_SUVD_INT_STATUS2
|
||||||
#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT__SHIFT 0x0
|
#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT__SHIFT 0x0
|
||||||
#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT__SHIFT 0x5
|
#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT__SHIFT 0x5
|
||||||
|
#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT__SHIFT 0x6
|
||||||
|
#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT__SHIFT 0xb
|
||||||
#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT_MASK 0x0000001FL
|
#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT_MASK 0x0000001FL
|
||||||
#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT_MASK 0x00000020L
|
#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT_MASK 0x00000020L
|
||||||
|
#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT_MASK 0x000007C0L
|
||||||
|
#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT_MASK 0x00000800L
|
||||||
//UVD_SUVD_INT_EN2
|
//UVD_SUVD_INT_EN2
|
||||||
#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN__SHIFT 0x0
|
#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN__SHIFT 0x0
|
||||||
#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN__SHIFT 0x5
|
#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN__SHIFT 0x5
|
||||||
|
#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN__SHIFT 0x6
|
||||||
|
#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN__SHIFT 0xb
|
||||||
#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN_MASK 0x0000001FL
|
#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN_MASK 0x0000001FL
|
||||||
#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN_MASK 0x00000020L
|
#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN_MASK 0x00000020L
|
||||||
|
#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN_MASK 0x000007C0L
|
||||||
|
#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN_MASK 0x00000800L
|
||||||
//UVD_SUVD_INT_ACK2
|
//UVD_SUVD_INT_ACK2
|
||||||
#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK__SHIFT 0x0
|
#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK__SHIFT 0x0
|
||||||
#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK__SHIFT 0x5
|
#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK__SHIFT 0x5
|
||||||
|
#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK__SHIFT 0x6
|
||||||
|
#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK__SHIFT 0xb
|
||||||
#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK_MASK 0x0000001FL
|
#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK_MASK 0x0000001FL
|
||||||
#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK_MASK 0x00000020L
|
#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK_MASK 0x00000020L
|
||||||
|
#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK_MASK 0x000007C0L
|
||||||
|
#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK_MASK 0x00000800L
|
||||||
|
|
||||||
|
|
||||||
// addressBlock: uvd0_ecpudec
|
// addressBlock: uvd0_ecpudec
|
||||||
|
|
|
@ -479,17 +479,6 @@ static int smu_late_init(void *handle)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Set initialized values (get from vbios) to dpm tables context such as
|
|
||||||
* gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
|
|
||||||
* type of clks.
|
|
||||||
*/
|
|
||||||
ret = smu_set_default_dpm_table(smu);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = smu_populate_umd_state_clk(smu);
|
ret = smu_populate_umd_state_clk(smu);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
|
dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
|
||||||
|
@ -984,6 +973,17 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set initialized values (get from vbios) to dpm tables context such as
|
||||||
|
* gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
|
||||||
|
* type of clks.
|
||||||
|
*/
|
||||||
|
ret = smu_set_default_dpm_table(smu);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
ret = smu_notify_display_change(smu);
|
ret = smu_notify_display_change(smu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -563,6 +563,8 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
|
||||||
struct smu10_hwmgr *data = hwmgr->backend;
|
struct smu10_hwmgr *data = hwmgr->backend;
|
||||||
uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
|
uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
|
||||||
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
|
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
|
||||||
|
uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
|
||||||
|
uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
|
||||||
|
|
||||||
if (hwmgr->smu_version < 0x1E3700) {
|
if (hwmgr->smu_version < 0x1E3700) {
|
||||||
pr_info("smu firmware version too old, can not set dpm level\n");
|
pr_info("smu firmware version too old, can not set dpm level\n");
|
||||||
|
@ -676,13 +678,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
|
||||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||||
PPSMC_MSG_SetHardMinFclkByFreq,
|
PPSMC_MSG_SetHardMinFclkByFreq,
|
||||||
hwmgr->display_config->num_display > 3 ?
|
hwmgr->display_config->num_display > 3 ?
|
||||||
SMU10_UMD_PSTATE_PEAK_FCLK :
|
data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
|
||||||
min_mclk,
|
min_mclk,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||||
PPSMC_MSG_SetHardMinSocclkByFreq,
|
PPSMC_MSG_SetHardMinSocclkByFreq,
|
||||||
SMU10_UMD_PSTATE_MIN_SOCCLK,
|
data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
|
||||||
NULL);
|
NULL);
|
||||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||||
PPSMC_MSG_SetHardMinVcn,
|
PPSMC_MSG_SetHardMinVcn,
|
||||||
|
@ -695,11 +697,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
|
||||||
NULL);
|
NULL);
|
||||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||||
PPSMC_MSG_SetSoftMaxFclkByFreq,
|
PPSMC_MSG_SetSoftMaxFclkByFreq,
|
||||||
SMU10_UMD_PSTATE_PEAK_FCLK,
|
data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
|
||||||
NULL);
|
NULL);
|
||||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||||
PPSMC_MSG_SetSoftMaxSocclkByFreq,
|
PPSMC_MSG_SetSoftMaxSocclkByFreq,
|
||||||
SMU10_UMD_PSTATE_PEAK_SOCCLK,
|
data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
|
||||||
NULL);
|
NULL);
|
||||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||||
PPSMC_MSG_SetSoftMaxVcn,
|
PPSMC_MSG_SetSoftMaxVcn,
|
||||||
|
|
|
@ -232,14 +232,16 @@ static int renoir_get_profiling_clk_mask(struct smu_context *smu,
|
||||||
*sclk_mask = 0;
|
*sclk_mask = 0;
|
||||||
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
|
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
|
||||||
if (mclk_mask)
|
if (mclk_mask)
|
||||||
*mclk_mask = 0;
|
/* mclk levels are in reverse order */
|
||||||
|
*mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
|
||||||
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
|
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
|
||||||
if(sclk_mask)
|
if(sclk_mask)
|
||||||
/* The sclk as gfxclk and has three level about max/min/current */
|
/* The sclk as gfxclk and has three level about max/min/current */
|
||||||
*sclk_mask = 3 - 1;
|
*sclk_mask = 3 - 1;
|
||||||
|
|
||||||
if(mclk_mask)
|
if(mclk_mask)
|
||||||
*mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
|
/* mclk levels are in reverse order */
|
||||||
|
*mclk_mask = 0;
|
||||||
|
|
||||||
if(soc_mask)
|
if(soc_mask)
|
||||||
*soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
|
*soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
|
||||||
|
@ -333,7 +335,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
|
||||||
case SMU_UCLK:
|
case SMU_UCLK:
|
||||||
case SMU_FCLK:
|
case SMU_FCLK:
|
||||||
case SMU_MCLK:
|
case SMU_MCLK:
|
||||||
ret = renoir_get_dpm_clk_limited(smu, clk_type, 0, min);
|
ret = renoir_get_dpm_clk_limited(smu, clk_type, NUM_MEMCLK_DPM_LEVELS - 1, min);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto failed;
|
goto failed;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -368,6 +368,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
|
||||||
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||||
struct intel_vgpu_creation_params *param)
|
struct intel_vgpu_creation_params *param)
|
||||||
{
|
{
|
||||||
|
struct drm_i915_private *dev_priv = gvt->gt->i915;
|
||||||
struct intel_vgpu *vgpu;
|
struct intel_vgpu *vgpu;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -436,7 +437,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_clean_sched_policy;
|
goto out_clean_sched_policy;
|
||||||
|
|
||||||
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
|
if (IS_BROADWELL(dev_priv))
|
||||||
|
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
|
||||||
|
else
|
||||||
|
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_clean_sched_policy;
|
goto out_clean_sched_policy;
|
||||||
|
|
||||||
|
|
|
@ -118,11 +118,11 @@ static struct dev_pm_domain pm_domain = {
|
||||||
|
|
||||||
struct drm_i915_private *mock_gem_device(void)
|
struct drm_i915_private *mock_gem_device(void)
|
||||||
{
|
{
|
||||||
|
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
|
||||||
|
static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
|
||||||
|
#endif
|
||||||
struct drm_i915_private *i915;
|
struct drm_i915_private *i915;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
|
|
||||||
struct dev_iommu iommu;
|
|
||||||
#endif
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
|
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
|
||||||
|
@ -141,10 +141,8 @@ struct drm_i915_private *mock_gem_device(void)
|
||||||
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
|
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
|
||||||
/* HACK HACK HACK to disable iommu for the fake device; force identity mapping */
|
/* HACK to disable iommu for the fake device; force identity mapping */
|
||||||
memset(&iommu, 0, sizeof(iommu));
|
pdev->dev.iommu = &fake_iommu;
|
||||||
iommu.priv = (void *)-1;
|
|
||||||
pdev->dev.iommu = &iommu;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
pci_set_drvdata(pdev, i915);
|
pci_set_drvdata(pdev, i915);
|
||||||
|
|
|
@ -12,7 +12,7 @@ struct sun8i_mixer;
|
||||||
|
|
||||||
/* VI channel CSC units offsets */
|
/* VI channel CSC units offsets */
|
||||||
#define CCSC00_OFFSET 0xAA050
|
#define CCSC00_OFFSET 0xAA050
|
||||||
#define CCSC01_OFFSET 0xFA000
|
#define CCSC01_OFFSET 0xFA050
|
||||||
#define CCSC10_OFFSET 0xA0000
|
#define CCSC10_OFFSET 0xA0000
|
||||||
#define CCSC11_OFFSET 0xF0000
|
#define CCSC11_OFFSET 0xF0000
|
||||||
|
|
||||||
|
|
|
@ -307,7 +307,7 @@ static struct regmap_config sun8i_mixer_regmap_config = {
|
||||||
.reg_bits = 32,
|
.reg_bits = 32,
|
||||||
.val_bits = 32,
|
.val_bits = 32,
|
||||||
.reg_stride = 4,
|
.reg_stride = 4,
|
||||||
.max_register = 0xbfffc, /* guessed */
|
.max_register = 0xffffc, /* guessed */
|
||||||
};
|
};
|
||||||
|
|
||||||
static int sun8i_mixer_of_get_id(struct device_node *node)
|
static int sun8i_mixer_of_get_id(struct device_node *node)
|
||||||
|
|
|
@ -1117,6 +1117,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
|
||||||
card->num_links = 1;
|
card->num_links = 1;
|
||||||
card->name = "vc4-hdmi";
|
card->name = "vc4-hdmi";
|
||||||
card->dev = dev;
|
card->dev = dev;
|
||||||
|
card->owner = THIS_MODULE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Be careful, snd_soc_register_card() calls dev_set_drvdata() and
|
* Be careful, snd_soc_register_card() calls dev_set_drvdata() and
|
||||||
|
|
|
@ -55,7 +55,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
|
||||||
|
|
||||||
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
|
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
|
||||||
if (id < 0)
|
if (id < 0)
|
||||||
return (id != -ENOMEM ? 0 : id);
|
return id;
|
||||||
|
|
||||||
spin_lock(&gman->lock);
|
spin_lock(&gman->lock);
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue