Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
36302685f5
|
@ -73,3 +73,12 @@ KernelVersion: 3.0
|
|||
Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
Description:
|
||||
Number of sectors written by the frontend.
|
||||
|
||||
What: /sys/bus/xen-backend/devices/*/state
|
||||
Date: August 2018
|
||||
KernelVersion: 4.19
|
||||
Contact: Joe Jin <joe.jin@oracle.com>
|
||||
Description:
|
||||
The state of the device. One of: 'Unknown',
|
||||
'Initialising', 'Initialised', 'Connected', 'Closing',
|
||||
'Closed', 'Reconfiguring', 'Reconfigured'.
|
||||
|
|
|
@ -15,3 +15,13 @@ Description:
|
|||
blkback. If the frontend tries to use more than
|
||||
max_persistent_grants, the LRU kicks in and starts
|
||||
removing 5% of max_persistent_grants every 100ms.
|
||||
|
||||
What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds
|
||||
Date: August 2018
|
||||
KernelVersion: 4.19
|
||||
Contact: Roger Pau Monné <roger.pau@citrix.com>
|
||||
Description:
|
||||
How long a persistent grant is allowed to remain
|
||||
allocated without being in use. The time is in
|
||||
seconds, 0 means indefinitely long.
|
||||
The default is 60 seconds.
|
||||
|
|
|
@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg)
|
|||
thread.
|
||||
|
||||
* Changing the vector length causes all of P0..P15, FFR and all bits of
|
||||
Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
|
||||
Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
|
||||
unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current
|
||||
vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC
|
||||
flag, does not constitute a change to the vector length for this purpose.
|
||||
|
@ -500,7 +500,7 @@ References
|
|||
[2] arch/arm64/include/uapi/asm/ptrace.h
|
||||
AArch64 Linux ptrace ABI definitions
|
||||
|
||||
[3] linux/Documentation/arm64/cpu-feature-registers.txt
|
||||
[3] Documentation/arm64/cpu-feature-registers.txt
|
||||
|
||||
[4] ARM IHI0055C
|
||||
http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf
|
||||
|
|
|
@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are
|
|||
attached to every HLIC: software interrupts, the timer interrupt, and external
|
||||
interrupts. Software interrupts are used to send IPIs between cores. The
|
||||
timer interrupt comes from an architecturally mandated real-time timer that is
|
||||
controller via Supervisor Binary Interface (SBI) calls and CSR reads. External
|
||||
controlled via Supervisor Binary Interface (SBI) calls and CSR reads. External
|
||||
interrupts connect all other device interrupts to the HLIC, which are routed
|
||||
via the platform-level interrupt controller (PLIC).
|
||||
|
||||
|
@ -25,7 +25,15 @@ in the system.
|
|||
|
||||
Required properties:
|
||||
- compatible : "riscv,cpu-intc"
|
||||
- #interrupt-cells : should be <1>
|
||||
- #interrupt-cells : should be <1>. The interrupt sources are defined by the
|
||||
RISC-V supervisor ISA manual, with only the following three interrupts being
|
||||
defined for supervisor mode:
|
||||
- Source 1 is the supervisor software interrupt, which can be sent by an SBI
|
||||
call and is reserved for use by software.
|
||||
- Source 5 is the supervisor timer interrupt, which can be configured by
|
||||
SBI calls and implements a one-shot timer.
|
||||
- Source 9 is the supervisor external interrupt, which chains to all other
|
||||
device interrupts.
|
||||
- interrupt-controller : Identifies the node as an interrupt controller
|
||||
|
||||
Furthermore, this interrupt-controller MUST be embedded inside the cpu
|
||||
|
@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below.
|
|||
...
|
||||
cpu1-intc: interrupt-controller {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc";
|
||||
compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc";
|
||||
interrupt-controller;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -19,6 +19,10 @@ Required properties:
|
|||
- slaves : Specifies number for slaves
|
||||
- active_slave : Specifies the slave to use for time stamping,
|
||||
ethtool and SIOCGMIIPHY
|
||||
- cpsw-phy-sel : Specifies the phandle to the CPSW phy mode selection
|
||||
device. See also cpsw-phy-sel.txt for it's binding.
|
||||
Note that in legacy cases cpsw-phy-sel may be
|
||||
a child device instead of a phandle.
|
||||
|
||||
Optional properties:
|
||||
- ti,hwmods : Must be "cpgmac0"
|
||||
|
@ -75,6 +79,7 @@ Examples:
|
|||
cpts_clock_mult = <0x80000000>;
|
||||
cpts_clock_shift = <29>;
|
||||
syscon = <&cm>;
|
||||
cpsw-phy-sel = <&phy_sel>;
|
||||
cpsw_emac0: slave@0 {
|
||||
phy_id = <&davinci_mdio>, <0>;
|
||||
phy-mode = "rgmii-txid";
|
||||
|
@ -103,6 +108,7 @@ Examples:
|
|||
cpts_clock_mult = <0x80000000>;
|
||||
cpts_clock_shift = <29>;
|
||||
syscon = <&cm>;
|
||||
cpsw-phy-sel = <&phy_sel>;
|
||||
cpsw_emac0: slave@0 {
|
||||
phy_id = <&davinci_mdio>, <0>;
|
||||
phy-mode = "rgmii-txid";
|
||||
|
|
|
@ -16,6 +16,7 @@ Required properties:
|
|||
"renesas,ether-r8a7794" if the device is a part of R8A7794 SoC.
|
||||
"renesas,gether-r8a77980" if the device is a part of R8A77980 SoC.
|
||||
"renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
|
||||
"renesas,ether-r7s9210" if the device is a part of R7S9210 SoC.
|
||||
"renesas,rcar-gen1-ether" for a generic R-Car Gen1 device.
|
||||
"renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1
|
||||
device.
|
||||
|
|
|
@ -7,6 +7,7 @@ Required properties:
|
|||
Examples with soctypes are:
|
||||
- "renesas,r8a7743-wdt" (RZ/G1M)
|
||||
- "renesas,r8a7745-wdt" (RZ/G1E)
|
||||
- "renesas,r8a774a1-wdt" (RZ/G2M)
|
||||
- "renesas,r8a7790-wdt" (R-Car H2)
|
||||
- "renesas,r8a7791-wdt" (R-Car M2-W)
|
||||
- "renesas,r8a7792-wdt" (R-Car V2H)
|
||||
|
@ -21,8 +22,8 @@ Required properties:
|
|||
- "renesas,r7s72100-wdt" (RZ/A1)
|
||||
The generic compatible string must be:
|
||||
- "renesas,rza-wdt" for RZ/A
|
||||
- "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G
|
||||
- "renesas,rcar-gen3-wdt" for R-Car Gen3
|
||||
- "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1
|
||||
- "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2
|
||||
|
||||
- reg : Should contain WDT registers location and length
|
||||
- clocks : the clock feeding the watchdog timer.
|
||||
|
|
|
@ -32,7 +32,7 @@ Supported chips:
|
|||
Datasheet: Publicly available at the Texas Instruments website
|
||||
http://www.ti.com/
|
||||
|
||||
Author: Lothar Felten <l-felten@ti.com>
|
||||
Author: Lothar Felten <lothar.felten@gmail.com>
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
|
|
@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the
|
|||
returned buffer. If NULL is returned, the threshold was not met or a bounce
|
||||
buffer could not be allocated. Fall back to PIO in that case.
|
||||
|
||||
In any case, a buffer obtained from above needs to be released. It ensures data
|
||||
is copied back to the message and a potentially used bounce buffer is freed::
|
||||
In any case, a buffer obtained from above needs to be released. Another helper
|
||||
function ensures a potentially used bounce buffer is freed::
|
||||
|
||||
i2c_release_dma_safe_msg_buf(msg, dma_buf);
|
||||
i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred);
|
||||
|
||||
The last argument 'xferred' controls if the buffer is synced back to the
|
||||
message or not. No syncing is needed in cases setting up DMA had an error and
|
||||
there was no data transferred.
|
||||
|
||||
The bounce buffer handling from the core is generic and simple. It will always
|
||||
allocate a new bounce buffer. If you want a more sophisticated handling (e.g.
|
||||
|
|
5
Makefile
5
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Merciless Moray
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -807,6 +807,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
|
|||
# disable pointer signed / unsigned warnings in gcc 4.0
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
|
||||
|
||||
# disable stringop warnings in gcc 8+
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
|
||||
|
||||
# disable invalid "can't wrap" optimizations for signed / pointers
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
|
||||
|
||||
|
|
|
@ -469,6 +469,7 @@
|
|||
ti,hwmods = "rtc";
|
||||
clocks = <&clk_32768_ck>;
|
||||
clock-names = "int-clk";
|
||||
system-power-controller;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
|
|
@ -13,6 +13,43 @@
|
|||
reg = <0x40000000 0x08000000>;
|
||||
};
|
||||
|
||||
reg_vddio_sd0: regulator-vddio-sd0 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vddio-sd0";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio1 29 0>;
|
||||
};
|
||||
|
||||
reg_lcd_3v3: regulator-lcd-3v3 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "lcd-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio1 18 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_lcd_5v: regulator-lcd-5v {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "lcd-5v";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
};
|
||||
|
||||
panel {
|
||||
compatible = "sii,43wvf1g";
|
||||
backlight = <&backlight_display>;
|
||||
dvdd-supply = <®_lcd_3v3>;
|
||||
avdd-supply = <®_lcd_5v>;
|
||||
|
||||
port {
|
||||
panel_in: endpoint {
|
||||
remote-endpoint = <&display_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
apb@80000000 {
|
||||
apbh@80000000 {
|
||||
gpmi-nand@8000c000 {
|
||||
|
@ -52,31 +89,11 @@
|
|||
lcdif@80030000 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&lcdif_24bit_pins_a>;
|
||||
lcd-supply = <®_lcd_3v3>;
|
||||
display = <&display0>;
|
||||
status = "okay";
|
||||
|
||||
display0: display0 {
|
||||
bits-per-pixel = <32>;
|
||||
bus-width = <24>;
|
||||
|
||||
display-timings {
|
||||
native-mode = <&timing0>;
|
||||
timing0: timing0 {
|
||||
clock-frequency = <9200000>;
|
||||
hactive = <480>;
|
||||
vactive = <272>;
|
||||
hback-porch = <15>;
|
||||
hfront-porch = <8>;
|
||||
vback-porch = <12>;
|
||||
vfront-porch = <4>;
|
||||
hsync-len = <1>;
|
||||
vsync-len = <1>;
|
||||
hsync-active = <0>;
|
||||
vsync-active = <0>;
|
||||
de-active = <1>;
|
||||
pixelclk-active = <0>;
|
||||
};
|
||||
port {
|
||||
display_out: endpoint {
|
||||
remote-endpoint = <&panel_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -118,32 +135,7 @@
|
|||
};
|
||||
};
|
||||
|
||||
regulators {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
reg_vddio_sd0: regulator@0 {
|
||||
compatible = "regulator-fixed";
|
||||
reg = <0>;
|
||||
regulator-name = "vddio-sd0";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio1 29 0>;
|
||||
};
|
||||
|
||||
reg_lcd_3v3: regulator@1 {
|
||||
compatible = "regulator-fixed";
|
||||
reg = <1>;
|
||||
regulator-name = "lcd-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio1 18 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
};
|
||||
|
||||
backlight {
|
||||
backlight_display: backlight {
|
||||
compatible = "pwm-backlight";
|
||||
pwms = <&pwm 2 5000000>;
|
||||
brightness-levels = <0 4 8 16 32 64 128 255>;
|
||||
|
|
|
@ -13,6 +13,87 @@
|
|||
reg = <0x40000000 0x08000000>;
|
||||
};
|
||||
|
||||
|
||||
reg_3p3v: regulator-3p3v {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "3P3V";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
reg_vddio_sd0: regulator-vddio-sd0 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vddio-sd0";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio3 28 0>;
|
||||
};
|
||||
|
||||
reg_fec_3v3: regulator-fec-3v3 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "fec-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio2 15 0>;
|
||||
};
|
||||
|
||||
reg_usb0_vbus: regulator-usb0-vbus {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "usb0_vbus";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
gpio = <&gpio3 9 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_usb1_vbus: regulator-usb1-vbus {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "usb1_vbus";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
gpio = <&gpio3 8 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_lcd_3v3: regulator-lcd-3v3 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "lcd-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio3 30 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_can_3v3: regulator-can-3v3 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "can-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio2 13 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_lcd_5v: regulator-lcd-5v {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "lcd-5v";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
};
|
||||
|
||||
panel {
|
||||
compatible = "sii,43wvf1g";
|
||||
backlight = <&backlight_display>;
|
||||
dvdd-supply = <®_lcd_3v3>;
|
||||
avdd-supply = <®_lcd_5v>;
|
||||
|
||||
port {
|
||||
panel_in: endpoint {
|
||||
remote-endpoint = <&display_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
apb@80000000 {
|
||||
apbh@80000000 {
|
||||
gpmi-nand@8000c000 {
|
||||
|
@ -116,31 +197,11 @@
|
|||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&lcdif_24bit_pins_a
|
||||
&lcdif_pins_evk>;
|
||||
lcd-supply = <®_lcd_3v3>;
|
||||
display = <&display0>;
|
||||
status = "okay";
|
||||
|
||||
display0: display0 {
|
||||
bits-per-pixel = <32>;
|
||||
bus-width = <24>;
|
||||
|
||||
display-timings {
|
||||
native-mode = <&timing0>;
|
||||
timing0: timing0 {
|
||||
clock-frequency = <33500000>;
|
||||
hactive = <800>;
|
||||
vactive = <480>;
|
||||
hback-porch = <89>;
|
||||
hfront-porch = <164>;
|
||||
vback-porch = <23>;
|
||||
vfront-porch = <10>;
|
||||
hsync-len = <10>;
|
||||
vsync-len = <10>;
|
||||
hsync-active = <0>;
|
||||
vsync-active = <0>;
|
||||
de-active = <1>;
|
||||
pixelclk-active = <0>;
|
||||
};
|
||||
port {
|
||||
display_out: endpoint {
|
||||
remote-endpoint = <&panel_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -269,80 +330,6 @@
|
|||
};
|
||||
};
|
||||
|
||||
regulators {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
reg_3p3v: regulator@0 {
|
||||
compatible = "regulator-fixed";
|
||||
reg = <0>;
|
||||
regulator-name = "3P3V";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
reg_vddio_sd0: regulator@1 {
|
||||
compatible = "regulator-fixed";
|
||||
reg = <1>;
|
||||
regulator-name = "vddio-sd0";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio3 28 0>;
|
||||
};
|
||||
|
||||
reg_fec_3v3: regulator@2 {
|
||||
compatible = "regulator-fixed";
|
||||
reg = <2>;
|
||||
regulator-name = "fec-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio2 15 0>;
|
||||
};
|
||||
|
||||
reg_usb0_vbus: regulator@3 {
|
||||
compatible = "regulator-fixed";
|
||||
reg = <3>;
|
||||
regulator-name = "usb0_vbus";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
gpio = <&gpio3 9 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_usb1_vbus: regulator@4 {
|
||||
compatible = "regulator-fixed";
|
||||
reg = <4>;
|
||||
regulator-name = "usb1_vbus";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
gpio = <&gpio3 8 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_lcd_3v3: regulator@5 {
|
||||
compatible = "regulator-fixed";
|
||||
reg = <5>;
|
||||
regulator-name = "lcd-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio3 30 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_can_3v3: regulator@6 {
|
||||
compatible = "regulator-fixed";
|
||||
reg = <6>;
|
||||
regulator-name = "can-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio2 13 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
sound {
|
||||
compatible = "fsl,imx28-evk-sgtl5000",
|
||||
"fsl,mxs-audio-sgtl5000";
|
||||
|
@ -363,7 +350,7 @@
|
|||
};
|
||||
};
|
||||
|
||||
backlight {
|
||||
backlight_display: backlight {
|
||||
compatible = "pwm-backlight";
|
||||
pwms = <&pwm 2 5000000>;
|
||||
brightness-levels = <0 4 8 16 32 64 128 255>;
|
||||
|
|
|
@ -126,10 +126,14 @@
|
|||
interrupt-names = "msi";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0x7>;
|
||||
interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
|
||||
/*
|
||||
* Reference manual lists pci irqs incorrectly
|
||||
* Real hardware ordering is same as imx6: D+MSI, C, B, A
|
||||
*/
|
||||
interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
|
||||
<&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
|
||||
<&clks IMX7D_PCIE_PHY_ROOT_CLK>;
|
||||
|
|
|
@ -354,7 +354,7 @@
|
|||
&mmc2 {
|
||||
vmmc-supply = <&vsdio>;
|
||||
bus-width = <8>;
|
||||
non-removable;
|
||||
ti,non-removable;
|
||||
};
|
||||
|
||||
&mmc3 {
|
||||
|
@ -621,15 +621,6 @@
|
|||
OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
&omap4_pmx_wkup {
|
||||
usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
|
||||
/* gpio_wk0 */
|
||||
pinctrl-single,pins = <
|
||||
OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
|
||||
>;
|
||||
};
|
||||
|
||||
vibrator_direction_pin: pinmux_vibrator_direction_pin {
|
||||
pinctrl-single,pins = <
|
||||
|
@ -644,6 +635,15 @@
|
|||
};
|
||||
};
|
||||
|
||||
&omap4_pmx_wkup {
|
||||
usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
|
||||
/* gpio_wk0 */
|
||||
pinctrl-single,pins = <
|
||||
OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
|
||||
* uart1 wakeirq.
|
||||
|
|
|
@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y
|
|||
CONFIG_DRM=y
|
||||
CONFIG_DRM_PANEL_LVDS=y
|
||||
CONFIG_DRM_PANEL_SIMPLE=y
|
||||
CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
|
||||
CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
|
||||
CONFIG_DRM_DW_HDMI_CEC=y
|
||||
CONFIG_DRM_IMX=y
|
||||
|
|
|
@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y
|
|||
CONFIG_REGULATOR=y
|
||||
CONFIG_REGULATOR_FIXED_VOLTAGE=y
|
||||
CONFIG_DRM=y
|
||||
CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
|
||||
CONFIG_DRM_MXSFB=y
|
||||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_BACKLIGHT_LCD_SUPPORT=y
|
||||
|
|
|
@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y
|
|||
CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_ARCH_MULTI_V7 is not set
|
||||
CONFIG_ARCH_VERSATILE=y
|
||||
CONFIG_AEABI=y
|
||||
CONFIG_OABI_COMPAT=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_ZBOOT_ROM_TEXT=0x0
|
||||
CONFIG_ZBOOT_ROM_BSS=0x0
|
||||
CONFIG_CMDLINE="root=1f03 mem=32M"
|
||||
CONFIG_FPE_NWFPE=y
|
||||
CONFIG_VFP=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
|
@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y
|
|||
CONFIG_DRM=y
|
||||
CONFIG_DRM_PANEL_ARM_VERSATILE=y
|
||||
CONFIG_DRM_PANEL_SIMPLE=y
|
||||
CONFIG_DRM_DUMB_VGA_DAC=y
|
||||
CONFIG_DRM_PL111=y
|
||||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_BACKLIGHT_LCD_SUPPORT=y
|
||||
|
@ -89,9 +90,10 @@ CONFIG_NFSD=y
|
|||
CONFIG_NFSD_V3=y
|
||||
CONFIG_NLS_CODEPAGE_850=m
|
||||
CONFIG_NLS_ISO8859_1=m
|
||||
CONFIG_FONTS=y
|
||||
CONFIG_FONT_ACORN_8x8=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_USER=y
|
||||
CONFIG_DEBUG_LL=y
|
||||
CONFIG_FONTS=y
|
||||
CONFIG_FONT_ACORN_8x8=y
|
||||
|
|
|
@ -2160,6 +2160,37 @@ static int of_dev_hwmod_lookup(struct device_node *np,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets
|
||||
*
|
||||
* @oh: struct omap_hwmod *
|
||||
* @np: struct device_node *
|
||||
*
|
||||
* Fix up module register offsets for modules with mpu_rt_idx.
|
||||
* Only needed for cpsw with interconnect target module defined
|
||||
* in device tree while still using legacy hwmod platform data
|
||||
* for rev, sysc and syss registers.
|
||||
*
|
||||
* Can be removed when all cpsw hwmod platform data has been
|
||||
* dropped.
|
||||
*/
|
||||
static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh,
|
||||
struct device_node *np,
|
||||
struct resource *res)
|
||||
{
|
||||
struct device_node *child = NULL;
|
||||
int error;
|
||||
|
||||
child = of_get_next_child(np, child);
|
||||
if (!child)
|
||||
return;
|
||||
|
||||
error = of_address_to_resource(child, oh->mpu_rt_idx, res);
|
||||
if (error)
|
||||
pr_err("%s: error mapping mpu_rt_idx: %i\n",
|
||||
__func__, error);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_hwmod_parse_module_range - map module IO range from device tree
|
||||
* @oh: struct omap_hwmod *
|
||||
|
@ -2220,7 +2251,13 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh,
|
|||
size = be32_to_cpup(ranges);
|
||||
|
||||
pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n",
|
||||
oh->name, np->name, base, size);
|
||||
oh ? oh->name : "", np->name, base, size);
|
||||
|
||||
if (oh && oh->mpu_rt_idx) {
|
||||
omap_hwmod_fix_mpu_rt_idx(oh, np, res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
res->start = base;
|
||||
res->end = base + size - 1;
|
||||
|
|
|
@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
|
|||
|
||||
config HOLES_IN_ZONE
|
||||
def_bool y
|
||||
depends on NUMA
|
||||
|
||||
source kernel/Kconfig.hz
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y
|
|||
CONFIG_ARCH_BERLIN=y
|
||||
CONFIG_ARCH_BRCMSTB=y
|
||||
CONFIG_ARCH_EXYNOS=y
|
||||
CONFIG_ARCH_K3=y
|
||||
CONFIG_ARCH_LAYERSCAPE=y
|
||||
CONFIG_ARCH_LG1K=y
|
||||
CONFIG_ARCH_HISI=y
|
||||
|
@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y
|
|||
CONFIG_ARCH_TEGRA_210_SOC=y
|
||||
CONFIG_ARCH_TEGRA_186_SOC=y
|
||||
CONFIG_ARCH_TEGRA_194_SOC=y
|
||||
CONFIG_ARCH_K3_AM6_SOC=y
|
||||
CONFIG_SOC_TI=y
|
||||
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
|
||||
CONFIG_EXTCON_USB_GPIO=y
|
||||
CONFIG_EXTCON_USBC_CROS_EC=y
|
||||
|
|
|
@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req)
|
|||
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
|
||||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
|
||||
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
|
@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req)
|
|||
NULL);
|
||||
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes % AES_BLOCK_SIZE);
|
||||
walk.nbytes % (2 * AES_BLOCK_SIZE));
|
||||
}
|
||||
if (walk.nbytes)
|
||||
if (walk.nbytes) {
|
||||
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
|
||||
nrounds);
|
||||
if (walk.nbytes > AES_BLOCK_SIZE) {
|
||||
crypto_inc(iv, AES_BLOCK_SIZE);
|
||||
__aes_arm64_encrypt(ctx->aes_key.key_enc,
|
||||
ks + AES_BLOCK_SIZE, iv,
|
||||
nrounds);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* handle the tail */
|
||||
|
@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req)
|
|||
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
|
||||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
|
||||
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
|
@ -564,12 +571,22 @@ static int gcm_decrypt(struct aead_request *req)
|
|||
} while (--blocks > 0);
|
||||
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes % AES_BLOCK_SIZE);
|
||||
walk.nbytes % (2 * AES_BLOCK_SIZE));
|
||||
}
|
||||
if (walk.nbytes) {
|
||||
if (walk.nbytes > AES_BLOCK_SIZE) {
|
||||
u8 *iv2 = iv + AES_BLOCK_SIZE;
|
||||
|
||||
memcpy(iv2, iv, AES_BLOCK_SIZE);
|
||||
crypto_inc(iv2, AES_BLOCK_SIZE);
|
||||
|
||||
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
|
||||
iv2, nrounds);
|
||||
}
|
||||
if (walk.nbytes)
|
||||
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
|
||||
nrounds);
|
||||
}
|
||||
}
|
||||
|
||||
/* handle the tail */
|
||||
if (walk.nbytes) {
|
||||
|
|
|
@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void)
|
|||
crypto_unregister_alg(&sm4_ce_alg);
|
||||
}
|
||||
|
||||
module_cpu_feature_match(SM3, sm4_ce_mod_init);
|
||||
module_cpu_feature_match(SM4, sm4_ce_mod_init);
|
||||
module_exit(sm4_ce_mod_fini);
|
||||
|
|
|
@ -98,11 +98,10 @@ static time64_t pmu_read_time(void)
|
|||
|
||||
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
|
||||
return 0;
|
||||
while (!req.complete)
|
||||
pmu_poll();
|
||||
pmu_wait_complete(&req);
|
||||
|
||||
time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
|
||||
(req.reply[3] << 8) | req.reply[4]);
|
||||
time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) |
|
||||
(req.reply[2] << 8) | req.reply[3]);
|
||||
|
||||
return time - RTC_OFFSET;
|
||||
}
|
||||
|
@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time)
|
|||
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
|
||||
(data >> 8) & 0xFF, data & 0xFF) < 0)
|
||||
return;
|
||||
while (!req.complete)
|
||||
pmu_poll();
|
||||
pmu_wait_complete(&req);
|
||||
}
|
||||
|
||||
static __u8 pmu_read_pram(int offset)
|
||||
|
|
|
@ -3,15 +3,6 @@
|
|||
config TRACE_IRQFLAGS_SUPPORT
|
||||
def_bool y
|
||||
|
||||
config DEBUG_STACK_USAGE
|
||||
bool "Enable stack utilization instrumentation"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
Enables the display of the minimum amount of free stack which each
|
||||
task has ever had available in the sysrq-T and sysrq-P debug output.
|
||||
|
||||
This option will slow down process creation somewhat.
|
||||
|
||||
config EARLY_PRINTK
|
||||
bool "Activate early kernel debugging"
|
||||
default y
|
||||
|
|
|
@ -177,7 +177,6 @@ config PPC
|
|||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_MMAP_RND_BITS
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||
select HAVE_ARCH_PREL32_RELOCATIONS
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_CBPF_JIT if !PPC64
|
||||
|
|
|
@ -14,6 +14,10 @@
|
|||
#ifndef _ASM_RISCV_TLB_H
|
||||
#define _ASM_RISCV_TLB_H
|
||||
|
||||
struct mmu_gather;
|
||||
|
||||
static void tlb_flush(struct mmu_gather *tlb);
|
||||
|
||||
#include <asm-generic/tlb.h>
|
||||
|
||||
static inline void tlb_flush(struct mmu_gather *tlb)
|
||||
|
|
|
@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
|
|||
SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
|
||||
uintptr_t, flags)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
struct mm_struct *mm = current->mm;
|
||||
bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
|
||||
#endif
|
||||
|
||||
/* Check the reserved flags. */
|
||||
if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(),
|
||||
* which generates unused variable warnings all over this function.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
flush_icache_mm(mm, local);
|
||||
#else
|
||||
flush_icache_all();
|
||||
#endif
|
||||
flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/leon.h>
|
||||
#include <asm/leon_amba.h>
|
||||
|
||||
|
@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
|
|||
else
|
||||
dev_set_name(&op->dev, "%08x", dp->phandle);
|
||||
|
||||
op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
op->dev.dma_mask = &op->dev.coherent_dma_mask;
|
||||
|
||||
if (of_device_register(op)) {
|
||||
printk("%s: Could not register of device.\n",
|
||||
dp->full_name);
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
|
@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
|
|||
dev_set_name(&op->dev, "root");
|
||||
else
|
||||
dev_set_name(&op->dev, "%08x", dp->phandle);
|
||||
op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
op->dev.dma_mask = &op->dev.coherent_dma_mask;
|
||||
|
||||
if (of_device_register(op)) {
|
||||
printk("%s: Could not register of device.\n",
|
||||
|
|
|
@ -2843,7 +2843,7 @@ config X86_SYSFB
|
|||
This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
|
||||
framebuffers so the new generic system-framebuffer drivers can be
|
||||
used on x86. If the framebuffer is not compatible with the generic
|
||||
modes, it is adverticed as fallback platform framebuffer so legacy
|
||||
modes, it is advertised as fallback platform framebuffer so legacy
|
||||
drivers like efifb, vesafb and uvesafb can pick it up.
|
||||
If this option is not selected, all system framebuffers are always
|
||||
marked as fallback platform framebuffers as usual.
|
||||
|
|
|
@ -175,22 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|||
endif
|
||||
endif
|
||||
|
||||
ifndef CC_HAVE_ASM_GOTO
|
||||
$(error Compiler lacks asm-goto support.)
|
||||
endif
|
||||
|
||||
#
|
||||
# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
|
||||
# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
|
||||
# to test for this bug at compile-time because the test case needs to execute,
|
||||
# which is a no-go for cross compilers. So check the GCC version instead.
|
||||
#
|
||||
ifdef CONFIG_JUMP_LABEL
|
||||
ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
|
||||
ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
|
||||
# This compiler flag is not supported by Clang:
|
||||
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
|
||||
|
@ -312,6 +296,13 @@ PHONY += vdso_install
|
|||
vdso_install:
|
||||
$(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
|
||||
|
||||
archprepare: checkbin
|
||||
checkbin:
|
||||
ifndef CC_HAVE_ASM_GOTO
|
||||
@echo Compiler lacks asm-goto support.
|
||||
@exit 1
|
||||
endif
|
||||
|
||||
archclean:
|
||||
$(Q)rm -rf $(objtree)/arch/i386
|
||||
$(Q)rm -rf $(objtree)/arch/x86_64
|
||||
|
|
|
@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
|
|||
pcmpeqd TWOONE(%rip), \TMP2
|
||||
pand POLY(%rip), \TMP2
|
||||
pxor \TMP2, \TMP3
|
||||
movdqa \TMP3, HashKey(%arg2)
|
||||
movdqu \TMP3, HashKey(%arg2)
|
||||
|
||||
movdqa \TMP3, \TMP5
|
||||
pshufd $78, \TMP3, \TMP1
|
||||
pxor \TMP3, \TMP1
|
||||
movdqa \TMP1, HashKey_k(%arg2)
|
||||
movdqu \TMP1, HashKey_k(%arg2)
|
||||
|
||||
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
|
||||
# TMP5 = HashKey^2<<1 (mod poly)
|
||||
movdqa \TMP5, HashKey_2(%arg2)
|
||||
movdqu \TMP5, HashKey_2(%arg2)
|
||||
# HashKey_2 = HashKey^2<<1 (mod poly)
|
||||
pshufd $78, \TMP5, \TMP1
|
||||
pxor \TMP5, \TMP1
|
||||
movdqa \TMP1, HashKey_2_k(%arg2)
|
||||
movdqu \TMP1, HashKey_2_k(%arg2)
|
||||
|
||||
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
|
||||
# TMP5 = HashKey^3<<1 (mod poly)
|
||||
movdqa \TMP5, HashKey_3(%arg2)
|
||||
movdqu \TMP5, HashKey_3(%arg2)
|
||||
pshufd $78, \TMP5, \TMP1
|
||||
pxor \TMP5, \TMP1
|
||||
movdqa \TMP1, HashKey_3_k(%arg2)
|
||||
movdqu \TMP1, HashKey_3_k(%arg2)
|
||||
|
||||
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
|
||||
# TMP5 = HashKey^3<<1 (mod poly)
|
||||
movdqa \TMP5, HashKey_4(%arg2)
|
||||
movdqu \TMP5, HashKey_4(%arg2)
|
||||
pshufd $78, \TMP5, \TMP1
|
||||
pxor \TMP5, \TMP1
|
||||
movdqa \TMP1, HashKey_4_k(%arg2)
|
||||
movdqu \TMP1, HashKey_4_k(%arg2)
|
||||
.endm
|
||||
|
||||
# GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
|
||||
|
@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
|
|||
movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
|
||||
|
||||
PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
|
||||
movdqa HashKey(%arg2), %xmm13
|
||||
movdqu HashKey(%arg2), %xmm13
|
||||
|
||||
CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
|
||||
%xmm4, %xmm5, %xmm6
|
||||
|
@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
pshufd $78, \XMM5, \TMP6
|
||||
pxor \XMM5, \TMP6
|
||||
paddd ONE(%rip), \XMM0 # INCR CNT
|
||||
movdqa HashKey_4(%arg2), \TMP5
|
||||
movdqu HashKey_4(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
|
||||
movdqa \XMM0, \XMM1
|
||||
paddd ONE(%rip), \XMM0 # INCR CNT
|
||||
|
@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
pxor (%arg1), \XMM2
|
||||
pxor (%arg1), \XMM3
|
||||
pxor (%arg1), \XMM4
|
||||
movdqa HashKey_4_k(%arg2), \TMP5
|
||||
movdqu HashKey_4_k(%arg2), \TMP5
|
||||
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
|
||||
movaps 0x10(%arg1), \TMP1
|
||||
AESENC \TMP1, \XMM1 # Round 1
|
||||
|
@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
movdqa \XMM6, \TMP1
|
||||
pshufd $78, \XMM6, \TMP2
|
||||
pxor \XMM6, \TMP2
|
||||
movdqa HashKey_3(%arg2), \TMP5
|
||||
movdqu HashKey_3(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
|
||||
movaps 0x30(%arg1), \TMP3
|
||||
AESENC \TMP3, \XMM1 # Round 3
|
||||
|
@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
AESENC \TMP3, \XMM2
|
||||
AESENC \TMP3, \XMM3
|
||||
AESENC \TMP3, \XMM4
|
||||
movdqa HashKey_3_k(%arg2), \TMP5
|
||||
movdqu HashKey_3_k(%arg2), \TMP5
|
||||
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
movaps 0x50(%arg1), \TMP3
|
||||
AESENC \TMP3, \XMM1 # Round 5
|
||||
|
@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
movdqa \XMM7, \TMP1
|
||||
pshufd $78, \XMM7, \TMP2
|
||||
pxor \XMM7, \TMP2
|
||||
movdqa HashKey_2(%arg2), \TMP5
|
||||
movdqu HashKey_2(%arg2), \TMP5
|
||||
|
||||
# Multiply TMP5 * HashKey using karatsuba
|
||||
|
||||
|
@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
AESENC \TMP3, \XMM2
|
||||
AESENC \TMP3, \XMM3
|
||||
AESENC \TMP3, \XMM4
|
||||
movdqa HashKey_2_k(%arg2), \TMP5
|
||||
movdqu HashKey_2_k(%arg2), \TMP5
|
||||
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
movaps 0x80(%arg1), \TMP3
|
||||
AESENC \TMP3, \XMM1 # Round 8
|
||||
|
@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
movdqa \XMM8, \TMP1
|
||||
pshufd $78, \XMM8, \TMP2
|
||||
pxor \XMM8, \TMP2
|
||||
movdqa HashKey(%arg2), \TMP5
|
||||
movdqu HashKey(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
|
||||
movaps 0x90(%arg1), \TMP3
|
||||
AESENC \TMP3, \XMM1 # Round 9
|
||||
|
@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@:
|
|||
AESENCLAST \TMP3, \XMM2
|
||||
AESENCLAST \TMP3, \XMM3
|
||||
AESENCLAST \TMP3, \XMM4
|
||||
movdqa HashKey_k(%arg2), \TMP5
|
||||
movdqu HashKey_k(%arg2), \TMP5
|
||||
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
movdqu (%arg4,%r11,1), \TMP3
|
||||
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
|
||||
|
@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
pshufd $78, \XMM5, \TMP6
|
||||
pxor \XMM5, \TMP6
|
||||
paddd ONE(%rip), \XMM0 # INCR CNT
|
||||
movdqa HashKey_4(%arg2), \TMP5
|
||||
movdqu HashKey_4(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
|
||||
movdqa \XMM0, \XMM1
|
||||
paddd ONE(%rip), \XMM0 # INCR CNT
|
||||
|
@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
pxor (%arg1), \XMM2
|
||||
pxor (%arg1), \XMM3
|
||||
pxor (%arg1), \XMM4
|
||||
movdqa HashKey_4_k(%arg2), \TMP5
|
||||
movdqu HashKey_4_k(%arg2), \TMP5
|
||||
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
|
||||
movaps 0x10(%arg1), \TMP1
|
||||
AESENC \TMP1, \XMM1 # Round 1
|
||||
|
@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
movdqa \XMM6, \TMP1
|
||||
pshufd $78, \XMM6, \TMP2
|
||||
pxor \XMM6, \TMP2
|
||||
movdqa HashKey_3(%arg2), \TMP5
|
||||
movdqu HashKey_3(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
|
||||
movaps 0x30(%arg1), \TMP3
|
||||
AESENC \TMP3, \XMM1 # Round 3
|
||||
|
@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
AESENC \TMP3, \XMM2
|
||||
AESENC \TMP3, \XMM3
|
||||
AESENC \TMP3, \XMM4
|
||||
movdqa HashKey_3_k(%arg2), \TMP5
|
||||
movdqu HashKey_3_k(%arg2), \TMP5
|
||||
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
movaps 0x50(%arg1), \TMP3
|
||||
AESENC \TMP3, \XMM1 # Round 5
|
||||
|
@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
movdqa \XMM7, \TMP1
|
||||
pshufd $78, \XMM7, \TMP2
|
||||
pxor \XMM7, \TMP2
|
||||
movdqa HashKey_2(%arg2), \TMP5
|
||||
movdqu HashKey_2(%arg2), \TMP5
|
||||
|
||||
# Multiply TMP5 * HashKey using karatsuba
|
||||
|
||||
|
@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
AESENC \TMP3, \XMM2
|
||||
AESENC \TMP3, \XMM3
|
||||
AESENC \TMP3, \XMM4
|
||||
movdqa HashKey_2_k(%arg2), \TMP5
|
||||
movdqu HashKey_2_k(%arg2), \TMP5
|
||||
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
movaps 0x80(%arg1), \TMP3
|
||||
AESENC \TMP3, \XMM1 # Round 8
|
||||
|
@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
|||
movdqa \XMM8, \TMP1
|
||||
pshufd $78, \XMM8, \TMP2
|
||||
pxor \XMM8, \TMP2
|
||||
movdqa HashKey(%arg2), \TMP5
|
||||
movdqu HashKey(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
|
||||
movaps 0x90(%arg1), \TMP3
|
||||
AESENC \TMP3, \XMM1 # Round 9
|
||||
|
@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@:
|
|||
AESENCLAST \TMP3, \XMM2
|
||||
AESENCLAST \TMP3, \XMM3
|
||||
AESENCLAST \TMP3, \XMM4
|
||||
movdqa HashKey_k(%arg2), \TMP5
|
||||
movdqu HashKey_k(%arg2), \TMP5
|
||||
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
movdqu (%arg4,%r11,1), \TMP3
|
||||
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
|
||||
|
@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
|
|||
movdqa \XMM1, \TMP6
|
||||
pshufd $78, \XMM1, \TMP2
|
||||
pxor \XMM1, \TMP2
|
||||
movdqa HashKey_4(%arg2), \TMP5
|
||||
movdqu HashKey_4(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
|
||||
PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
|
||||
movdqa HashKey_4_k(%arg2), \TMP4
|
||||
movdqu HashKey_4_k(%arg2), \TMP4
|
||||
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
movdqa \XMM1, \XMMDst
|
||||
movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
|
||||
|
@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
|
|||
movdqa \XMM2, \TMP1
|
||||
pshufd $78, \XMM2, \TMP2
|
||||
pxor \XMM2, \TMP2
|
||||
movdqa HashKey_3(%arg2), \TMP5
|
||||
movdqu HashKey_3(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
|
||||
PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
|
||||
movdqa HashKey_3_k(%arg2), \TMP4
|
||||
movdqu HashKey_3_k(%arg2), \TMP4
|
||||
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
pxor \TMP1, \TMP6
|
||||
pxor \XMM2, \XMMDst
|
||||
|
@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
|
|||
movdqa \XMM3, \TMP1
|
||||
pshufd $78, \XMM3, \TMP2
|
||||
pxor \XMM3, \TMP2
|
||||
movdqa HashKey_2(%arg2), \TMP5
|
||||
movdqu HashKey_2(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
|
||||
PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
|
||||
movdqa HashKey_2_k(%arg2), \TMP4
|
||||
movdqu HashKey_2_k(%arg2), \TMP4
|
||||
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
pxor \TMP1, \TMP6
|
||||
pxor \XMM3, \XMMDst
|
||||
|
@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
|
|||
movdqa \XMM4, \TMP1
|
||||
pshufd $78, \XMM4, \TMP2
|
||||
pxor \XMM4, \TMP2
|
||||
movdqa HashKey(%arg2), \TMP5
|
||||
movdqu HashKey(%arg2), \TMP5
|
||||
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
|
||||
PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
|
||||
movdqa HashKey_k(%arg2), \TMP4
|
||||
movdqu HashKey_k(%arg2), \TMP4
|
||||
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
|
||||
pxor \TMP1, \TMP6
|
||||
pxor \XMM4, \XMMDst
|
||||
|
|
|
@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
|
|||
|
||||
perf_callchain_store(entry, regs->ip);
|
||||
|
||||
if (!current->mm)
|
||||
if (!nmi_uaccess_okay())
|
||||
return;
|
||||
|
||||
if (perf_callchain_user32(regs, entry))
|
||||
|
|
|
@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
|
|||
return flags;
|
||||
}
|
||||
|
||||
static inline void native_restore_fl(unsigned long flags)
|
||||
extern inline void native_restore_fl(unsigned long flags);
|
||||
extern inline void native_restore_fl(unsigned long flags)
|
||||
{
|
||||
asm volatile("push %0 ; popf"
|
||||
: /* no output */
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
#ifndef _ASM_X86_PGTABLE_3LEVEL_H
|
||||
#define _ASM_X86_PGTABLE_3LEVEL_H
|
||||
|
||||
#include <asm/atomic64_32.h>
|
||||
|
||||
/*
|
||||
* Intel Physical Address Extension (PAE) Mode - three-level page
|
||||
* tables on PPro+ CPUs.
|
||||
|
@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
|
|||
{
|
||||
pte_t res;
|
||||
|
||||
/* xchg acts as a barrier before the setting of the high bits */
|
||||
res.pte_low = xchg(&ptep->pte_low, 0);
|
||||
res.pte_high = ptep->pte_high;
|
||||
ptep->pte_high = 0;
|
||||
res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -132,6 +132,8 @@ struct cpuinfo_x86 {
|
|||
/* Index into per_cpu list: */
|
||||
u16 cpu_index;
|
||||
u32 microcode;
|
||||
/* Address space bits used by the cache internally */
|
||||
u8 x86_cache_bits;
|
||||
unsigned initialized : 1;
|
||||
} __randomize_layout;
|
||||
|
||||
|
@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
|
|||
|
||||
static inline unsigned long long l1tf_pfn_limit(void)
|
||||
{
|
||||
return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
|
||||
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
extern void early_cpu_init(void);
|
||||
|
|
|
@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs);
|
|||
|
||||
#define __ARCH_HAS_SA_RESTORER
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <uapi/asm/sigcontext.h>
|
||||
|
||||
#ifdef __i386__
|
||||
|
@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
|
|||
|
||||
static inline int __gen_sigismember(sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned char ret;
|
||||
asm("btl %2,%1\n\tsetc %0"
|
||||
: "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
|
||||
bool ret;
|
||||
asm("btl %2,%1" CC_SET(c)
|
||||
: CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void)
|
|||
return (unsigned long)frame;
|
||||
}
|
||||
|
||||
void show_opcodes(u8 *rip, const char *loglvl);
|
||||
void show_opcodes(struct pt_regs *regs, const char *loglvl);
|
||||
void show_ip(struct pt_regs *regs, const char *loglvl);
|
||||
#endif /* _ASM_X86_STACKTRACE_H */
|
||||
|
|
|
@ -175,8 +175,16 @@ struct tlb_state {
|
|||
* are on. This means that it may not match current->active_mm,
|
||||
* which will contain the previous user mm when we're in lazy TLB
|
||||
* mode even if we've already switched back to swapper_pg_dir.
|
||||
*
|
||||
* During switch_mm_irqs_off(), loaded_mm will be set to
|
||||
* LOADED_MM_SWITCHING during the brief interrupts-off window
|
||||
* when CR3 and loaded_mm would otherwise be inconsistent. This
|
||||
* is for nmi_uaccess_okay()'s benefit.
|
||||
*/
|
||||
struct mm_struct *loaded_mm;
|
||||
|
||||
#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
|
||||
|
||||
u16 loaded_mm_asid;
|
||||
u16 next_asid;
|
||||
/* last user mm's ctx id */
|
||||
|
@ -246,6 +254,38 @@ struct tlb_state {
|
|||
};
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
|
||||
|
||||
/*
|
||||
* Blindly accessing user memory from NMI context can be dangerous
|
||||
* if we're in the middle of switching the current user task or
|
||||
* switching the loaded mm. It can also be dangerous if we
|
||||
* interrupted some kernel code that was temporarily using a
|
||||
* different mm.
|
||||
*/
|
||||
static inline bool nmi_uaccess_okay(void)
|
||||
{
|
||||
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
|
||||
struct mm_struct *current_mm = current->mm;
|
||||
|
||||
VM_WARN_ON_ONCE(!loaded_mm);
|
||||
|
||||
/*
|
||||
* The condition we want to check is
|
||||
* current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
|
||||
* if we're running in a VM with shadow paging, and nmi_uaccess_okay()
|
||||
* is supposed to be reasonably fast.
|
||||
*
|
||||
* Instead, we check the almost equivalent but somewhat conservative
|
||||
* condition below, and we rely on the fact that switch_mm_irqs_off()
|
||||
* sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
|
||||
*/
|
||||
if (loaded_mm != current_mm)
|
||||
return false;
|
||||
|
||||
VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Initialize cr4 shadow for this CPU. */
|
||||
static inline void cr4_init_shadow(void)
|
||||
{
|
||||
|
|
|
@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
|
|||
*
|
||||
* If RDPID is available, use it.
|
||||
*/
|
||||
alternative_io ("lsl %[p],%[seg]",
|
||||
alternative_io ("lsl %[seg],%[p]",
|
||||
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
|
||||
X86_FEATURE_RDPID,
|
||||
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
|
||||
|
|
|
@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
|
|||
* It means the size must be writable atomically and the address must be aligned
|
||||
* in a way that permits an atomic write. It also makes sure we fit on a single
|
||||
* page.
|
||||
*
|
||||
* Note: Must be called under text_mutex.
|
||||
*/
|
||||
void *text_poke(void *addr, const void *opcode, size_t len)
|
||||
{
|
||||
|
@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len)
|
|||
*/
|
||||
BUG_ON(!after_bootmem);
|
||||
|
||||
lockdep_assert_held(&text_mutex);
|
||||
|
||||
if (!core_kernel_text((unsigned long)addr)) {
|
||||
pages[0] = vmalloc_to_page(addr);
|
||||
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
|
||||
|
@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs)
|
|||
* - replace the first byte (int3) by the first byte of
|
||||
* replacing opcode
|
||||
* - sync cores
|
||||
*
|
||||
* Note: must be called under text_mutex.
|
||||
*/
|
||||
void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
|
||||
{
|
||||
|
@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
|
|||
bp_int3_handler = handler;
|
||||
bp_int3_addr = (u8 *)addr + sizeof(int3);
|
||||
bp_patching_in_progress = true;
|
||||
|
||||
lockdep_assert_held(&text_mutex);
|
||||
|
||||
/*
|
||||
* Corresponding read barrier in int3 notifier for making sure the
|
||||
* in_progress and handler are correctly ordered wrt. patching.
|
||||
|
|
|
@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
|
|||
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
|
||||
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
|
||||
|
||||
/*
|
||||
* These CPUs all support 44bits physical address space internally in the
|
||||
* cache but CPUID can report a smaller number of physical address bits.
|
||||
*
|
||||
* The L1TF mitigation uses the top most address bit for the inversion of
|
||||
* non present PTEs. When the installed memory reaches into the top most
|
||||
* address bit due to memory holes, which has been observed on machines
|
||||
* which report 36bits physical address bits and have 32G RAM installed,
|
||||
* then the mitigation range check in l1tf_select_mitigation() triggers.
|
||||
* This is a false positive because the mitigation is still possible due to
|
||||
* the fact that the cache uses 44bit internally. Use the cache bits
|
||||
* instead of the reported physical bits and adjust them on the affected
|
||||
* machines to 44bit if the reported bits are less than 44.
|
||||
*/
|
||||
static void override_cache_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86 != 6)
|
||||
return;
|
||||
|
||||
switch (c->x86_model) {
|
||||
case INTEL_FAM6_NEHALEM:
|
||||
case INTEL_FAM6_WESTMERE:
|
||||
case INTEL_FAM6_SANDYBRIDGE:
|
||||
case INTEL_FAM6_IVYBRIDGE:
|
||||
case INTEL_FAM6_HASWELL_CORE:
|
||||
case INTEL_FAM6_HASWELL_ULT:
|
||||
case INTEL_FAM6_HASWELL_GT3E:
|
||||
case INTEL_FAM6_BROADWELL_CORE:
|
||||
case INTEL_FAM6_BROADWELL_GT3E:
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
if (c->x86_cache_bits < 44)
|
||||
c->x86_cache_bits = 44;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init l1tf_select_mitigation(void)
|
||||
{
|
||||
u64 half_pa;
|
||||
|
@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void)
|
|||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return;
|
||||
|
||||
override_cache_bits(&boot_cpu_data);
|
||||
|
||||
switch (l1tf_mitigation) {
|
||||
case L1TF_MITIGATION_OFF:
|
||||
case L1TF_MITIGATION_FLUSH_NOWARN:
|
||||
|
@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void)
|
|||
return;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is extremely unlikely to happen because almost all
|
||||
* systems have far more MAX_PA/2 than RAM can be fit into
|
||||
* DIMM slots.
|
||||
*/
|
||||
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
||||
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
|
||||
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
||||
|
|
|
@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
|
|||
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
|
||||
c->x86_phys_bits = 36;
|
||||
#endif
|
||||
c->x86_cache_bits = c->x86_phys_bits;
|
||||
}
|
||||
|
||||
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
||||
|
|
|
@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
|
|||
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||
return false;
|
||||
|
||||
if (c->x86 != 6)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
|
||||
if (c->x86_model == spectre_bad_microcodes[i].model &&
|
||||
c->x86_stepping == spectre_bad_microcodes[i].stepping)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/bug.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
#include <asm/cpu_entry_area.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
@ -89,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable,
|
|||
* Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
|
||||
* guesstimate in attempt to achieve all of the above.
|
||||
*/
|
||||
void show_opcodes(u8 *rip, const char *loglvl)
|
||||
void show_opcodes(struct pt_regs *regs, const char *loglvl)
|
||||
{
|
||||
#define PROLOGUE_SIZE 42
|
||||
#define EPILOGUE_SIZE 21
|
||||
#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
|
||||
u8 opcodes[OPCODE_BUFSIZE];
|
||||
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
|
||||
bool bad_ip;
|
||||
|
||||
if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
|
||||
/*
|
||||
* Make sure userspace isn't trying to trick us into dumping kernel
|
||||
* memory by pointing the userspace instruction pointer at it.
|
||||
*/
|
||||
bad_ip = user_mode(regs) &&
|
||||
__chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
|
||||
|
||||
if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue,
|
||||
OPCODE_BUFSIZE)) {
|
||||
printk("%sCode: Bad RIP value.\n", loglvl);
|
||||
} else {
|
||||
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
|
||||
|
@ -112,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl)
|
|||
#else
|
||||
printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
|
||||
#endif
|
||||
show_opcodes((u8 *)regs->ip, loglvl);
|
||||
show_opcodes(regs, loglvl);
|
||||
}
|
||||
|
||||
void show_iret_regs(struct pt_regs *regs)
|
||||
|
@ -346,7 +357,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
|
|||
* We're not going to return, but we might be on an IST stack or
|
||||
* have very little stack space left. Rewind the stack and kill
|
||||
* the task.
|
||||
* Before we rewind the stack, we have to tell KASAN that we're going to
|
||||
* reuse the task stack and that existing poisons are invalid.
|
||||
*/
|
||||
kasan_unpoison_task_stack(current);
|
||||
rewind_stack_do_exit(signr);
|
||||
}
|
||||
NOKPROBE_SYMBOL(oops_end);
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
/*
|
||||
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
||||
* nested NMI paths are careful to preserve CR2.
|
||||
|
@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|||
if (__range_not_ok(from, n, TASK_SIZE))
|
||||
return n;
|
||||
|
||||
if (!nmi_uaccess_okay())
|
||||
return n;
|
||||
|
||||
/*
|
||||
* Even though this function is typically called from NMI/IRQ context
|
||||
* disable pagefaults so that its behaviour is consistent even when
|
||||
|
|
|
@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
|
|||
|
||||
printk(KERN_CONT "\n");
|
||||
|
||||
show_opcodes((u8 *)regs->ip, loglvl);
|
||||
show_opcodes(regs, loglvl);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Machine check recovery code needs to change cache mode of poisoned
|
||||
* pages to UC to avoid speculative access logging another error. But
|
||||
* passing the address of the 1:1 mapping to set_memory_uc() is a fine
|
||||
* way to encourage a speculative access. So we cheat and flip the top
|
||||
* bit of the address. This works fine for the code that updates the
|
||||
* page tables. But at the end of the process we need to flush the cache
|
||||
* and the non-canonical address causes a #GP fault when used by the
|
||||
* CLFLUSH instruction.
|
||||
*
|
||||
* But in the common case we already have a canonical address. This code
|
||||
* will fix the top bit if needed and is a no-op otherwise.
|
||||
*/
|
||||
static inline unsigned long make_addr_canonical_again(unsigned long addr)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
return (long)(addr << 1) >> 1;
|
||||
#else
|
||||
return addr;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
||||
pgprot_t mask_set, pgprot_t mask_clr,
|
||||
int force_split, int in_flag,
|
||||
|
@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
|||
* Save address for cache flush. *addr is modified in the call
|
||||
* to __change_page_attr_set_clr() below.
|
||||
*/
|
||||
baddr = *addr;
|
||||
baddr = make_addr_canonical_again(*addr);
|
||||
}
|
||||
|
||||
/* Must avoid aliasing mappings in the highmem code */
|
||||
|
|
|
@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
|
|||
*
|
||||
* Returns a pointer to a PTE on success, or NULL on failure.
|
||||
*/
|
||||
static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
||||
static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
||||
{
|
||||
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||
pmd_t *pmd;
|
||||
|
|
|
@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
|
||||
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
|
||||
|
||||
/* Let nmi_uaccess_okay() know that we're changing CR3. */
|
||||
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
|
||||
barrier();
|
||||
|
||||
if (need_flush) {
|
||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
||||
|
@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
if (next != &init_mm)
|
||||
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
|
||||
|
||||
/* Make sure we write CR3 before loaded_mm. */
|
||||
barrier();
|
||||
|
||||
this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
||||
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
|
||||
}
|
||||
|
|
|
@ -85,14 +85,10 @@ pgd_t * __init efi_call_phys_prolog(void)
|
|||
|
||||
void __init efi_call_phys_epilog(pgd_t *save_pgd)
|
||||
{
|
||||
struct desc_ptr gdt_descr;
|
||||
|
||||
gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0);
|
||||
gdt_descr.size = GDT_SIZE - 1;
|
||||
load_gdt(&gdt_descr);
|
||||
|
||||
load_cr3(save_pgd);
|
||||
__flush_tlb_all();
|
||||
|
||||
load_fixmap_gdt(0);
|
||||
}
|
||||
|
||||
void __init efi_runtime_update_mappings(void)
|
||||
|
|
|
@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
|
|||
static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
trace_xen_mmu_set_pte_atomic(ptep, pte);
|
||||
set_64bit((u64 *)ptep, native_pte_val(pte));
|
||||
__xen_set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
trace_xen_mmu_pte_clear(mm, addr, ptep);
|
||||
if (!xen_batched_set_pte(ptep, native_make_pte(0)))
|
||||
native_pte_clear(mm, addr, ptep);
|
||||
__xen_set_pte(ptep, native_make_pte(0));
|
||||
}
|
||||
|
||||
static void xen_pmd_clear(pmd_t *pmdp)
|
||||
|
@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
|
|||
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
|
||||
pte_val_ma(pte));
|
||||
#endif
|
||||
native_set_pte(ptep, pte);
|
||||
__xen_set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
/* Early in boot, while setting up the initial pagetable, assume
|
||||
|
@ -2061,7 +2060,6 @@ void __init xen_relocate_p2m(void)
|
|||
pud_t *pud;
|
||||
pgd_t *pgd;
|
||||
unsigned long *new_p2m;
|
||||
int save_pud;
|
||||
|
||||
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
|
||||
n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
@ -2091,7 +2089,6 @@ void __init xen_relocate_p2m(void)
|
|||
|
||||
pgd = __va(read_cr3_pa());
|
||||
new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
|
||||
save_pud = n_pud;
|
||||
for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
|
||||
pud = early_memremap(pud_phys, PAGE_SIZE);
|
||||
clear_page(pud);
|
||||
|
|
|
@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb)
|
|||
}
|
||||
}
|
||||
|
||||
static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
|
||||
static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
|
||||
enum wbt_flags wb_acct)
|
||||
{
|
||||
struct rq_wb *rwb = RQWB(rqos);
|
||||
struct rq_wait *rqw;
|
||||
int inflight, limit;
|
||||
|
||||
if (!(wb_acct & WBT_TRACKED))
|
||||
return;
|
||||
|
||||
rqw = get_rq_wait(rwb, wb_acct);
|
||||
inflight = atomic_dec_return(&rqw->inflight);
|
||||
|
||||
/*
|
||||
|
@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
|
|||
int diff = limit - inflight;
|
||||
|
||||
if (!inflight || diff >= rwb->wb_background / 2)
|
||||
wake_up(&rqw->wait);
|
||||
wake_up_all(&rqw->wait);
|
||||
}
|
||||
}
|
||||
|
||||
static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
|
||||
{
|
||||
struct rq_wb *rwb = RQWB(rqos);
|
||||
struct rq_wait *rqw;
|
||||
|
||||
if (!(wb_acct & WBT_TRACKED))
|
||||
return;
|
||||
|
||||
rqw = get_rq_wait(rwb, wb_acct);
|
||||
wbt_rqw_done(rwb, rqw, wb_acct);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called on completion of a request. Note that it's also called when
|
||||
* a request is merged, when the request gets freed.
|
||||
|
@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
|
|||
return limit;
|
||||
}
|
||||
|
||||
struct wbt_wait_data {
|
||||
struct wait_queue_entry wq;
|
||||
struct task_struct *task;
|
||||
struct rq_wb *rwb;
|
||||
struct rq_wait *rqw;
|
||||
unsigned long rw;
|
||||
bool got_token;
|
||||
};
|
||||
|
||||
static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
|
||||
int wake_flags, void *key)
|
||||
{
|
||||
struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
|
||||
wq);
|
||||
|
||||
/*
|
||||
* If we fail to get a budget, return -1 to interrupt the wake up
|
||||
* loop in __wake_up_common.
|
||||
*/
|
||||
if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
|
||||
return -1;
|
||||
|
||||
data->got_token = true;
|
||||
list_del_init(&curr->entry);
|
||||
wake_up_process(data->task);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Block if we will exceed our limit, or if we are currently waiting for
|
||||
* the timer to kick off queuing again.
|
||||
|
@ -491,31 +526,52 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
|
|||
__acquires(lock)
|
||||
{
|
||||
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
struct wbt_wait_data data = {
|
||||
.wq = {
|
||||
.func = wbt_wake_function,
|
||||
.entry = LIST_HEAD_INIT(data.wq.entry),
|
||||
},
|
||||
.task = current,
|
||||
.rwb = rwb,
|
||||
.rqw = rqw,
|
||||
.rw = rw,
|
||||
};
|
||||
bool has_sleeper;
|
||||
|
||||
has_sleeper = wq_has_sleeper(&rqw->wait);
|
||||
if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
|
||||
return;
|
||||
|
||||
add_wait_queue_exclusive(&rqw->wait, &wait);
|
||||
prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
|
||||
do {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
||||
if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
|
||||
if (data.got_token)
|
||||
break;
|
||||
|
||||
if (!has_sleeper &&
|
||||
rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
|
||||
finish_wait(&rqw->wait, &data.wq);
|
||||
|
||||
/*
|
||||
* We raced with wbt_wake_function() getting a token,
|
||||
* which means we now have two. Put our local token
|
||||
* and wake anyone else potentially waiting for one.
|
||||
*/
|
||||
if (data.got_token)
|
||||
wbt_rqw_done(rwb, rqw, wb_acct);
|
||||
break;
|
||||
}
|
||||
|
||||
if (lock) {
|
||||
spin_unlock_irq(lock);
|
||||
io_schedule();
|
||||
spin_lock_irq(lock);
|
||||
} else
|
||||
io_schedule();
|
||||
|
||||
has_sleeper = false;
|
||||
} while (1);
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&rqw->wait, &wait);
|
||||
finish_wait(&rqw->wait, &data.wq);
|
||||
}
|
||||
|
||||
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
|
||||
|
@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
|
|||
return;
|
||||
}
|
||||
|
||||
if (current_is_kswapd())
|
||||
flags |= WBT_KSWAPD;
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
flags |= WBT_DISCARD;
|
||||
|
||||
__wbt_wait(rwb, flags, bio->bi_opf, lock);
|
||||
|
||||
if (!blk_stat_is_active(rwb->cb))
|
||||
|
|
|
@ -37,7 +37,7 @@ struct bsg_device {
|
|||
struct request_queue *queue;
|
||||
spinlock_t lock;
|
||||
struct hlist_node dev_list;
|
||||
atomic_t ref_count;
|
||||
refcount_t ref_count;
|
||||
char name[20];
|
||||
int max_queue;
|
||||
};
|
||||
|
@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd)
|
|||
|
||||
mutex_lock(&bsg_mutex);
|
||||
|
||||
if (!atomic_dec_and_test(&bd->ref_count)) {
|
||||
if (!refcount_dec_and_test(&bd->ref_count)) {
|
||||
mutex_unlock(&bsg_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
|
|||
|
||||
bd->queue = rq;
|
||||
|
||||
atomic_set(&bd->ref_count, 1);
|
||||
refcount_set(&bd->ref_count, 1);
|
||||
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
|
||||
|
||||
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
|
||||
|
@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
|
|||
|
||||
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
|
||||
if (bd->queue == q) {
|
||||
atomic_inc(&bd->ref_count);
|
||||
refcount_inc(&bd->ref_count);
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -895,7 +895,6 @@ int elv_register(struct elevator_type *e)
|
|||
spin_lock(&elv_list_lock);
|
||||
if (elevator_find(e->elevator_name, e->uses_mq)) {
|
||||
spin_unlock(&elv_list_lock);
|
||||
if (e->icq_cache)
|
||||
kmem_cache_destroy(e->icq_cache);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
|
|
@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = {
|
|||
.qc_issue = ftide010_qc_issue,
|
||||
};
|
||||
|
||||
static struct ata_port_info ftide010_port_info[] = {
|
||||
{
|
||||
static struct ata_port_info ftide010_port_info = {
|
||||
.flags = ATA_FLAG_SLAVE_POSS,
|
||||
.mwdma_mask = ATA_MWDMA2,
|
||||
.udma_mask = ATA_UDMA6,
|
||||
.pio_mask = ATA_PIO4,
|
||||
.port_ops = &pata_ftide010_port_ops,
|
||||
},
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_SATA_GEMINI)
|
||||
|
@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
|
|||
}
|
||||
|
||||
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
||||
struct ata_port_info *pi,
|
||||
bool is_ata1)
|
||||
{
|
||||
struct device *dev = ftide->dev;
|
||||
|
@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
|||
|
||||
/* Flag port as SATA-capable */
|
||||
if (gemini_sata_bridge_enabled(sg, is_ata1))
|
||||
ftide010_port_info[0].flags |= ATA_FLAG_SATA;
|
||||
pi->flags |= ATA_FLAG_SATA;
|
||||
|
||||
/* This device has broken DMA, only PIO works */
|
||||
if (of_machine_is_compatible("itian,sq201")) {
|
||||
pi->mwdma_mask = 0;
|
||||
pi->udma_mask = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We assume that a simple 40-wire cable is used in the PATA mode.
|
||||
|
@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
|||
}
|
||||
#else
|
||||
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
||||
struct ata_port_info *pi,
|
||||
bool is_ata1)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
|
@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
const struct ata_port_info pi = ftide010_port_info[0];
|
||||
struct ata_port_info pi = ftide010_port_info;
|
||||
const struct ata_port_info *ppi[] = { &pi, NULL };
|
||||
struct ftide010 *ftide;
|
||||
struct resource *res;
|
||||
|
@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
|
|||
* are ATA0. This will also set up the cable types.
|
||||
*/
|
||||
ret = pata_ftide010_gemini_init(ftide,
|
||||
&pi,
|
||||
(res->start == 0x63400000));
|
||||
if (ret)
|
||||
goto err_dis_clk;
|
||||
|
|
|
@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
|
|||
int of_pm_clk_add_clks(struct device *dev)
|
||||
{
|
||||
struct clk **clks;
|
||||
unsigned int i, count;
|
||||
int i, count;
|
||||
int ret;
|
||||
|
||||
if (!dev || !dev->of_node)
|
||||
|
|
|
@ -83,6 +83,18 @@ module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
|
|||
MODULE_PARM_DESC(max_persistent_grants,
|
||||
"Maximum number of grants to map persistently");
|
||||
|
||||
/*
|
||||
* How long a persistent grant is allowed to remain allocated without being in
|
||||
* use. The time is in seconds, 0 means indefinitely long.
|
||||
*/
|
||||
|
||||
static unsigned int xen_blkif_pgrant_timeout = 60;
|
||||
module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
|
||||
uint, 0644);
|
||||
MODULE_PARM_DESC(persistent_grant_unused_seconds,
|
||||
"Time in seconds an unused persistent grant is allowed to "
|
||||
"remain allocated. Default is 60, 0 means unlimited.");
|
||||
|
||||
/*
|
||||
* Maximum number of rings/queues blkback supports, allow as many queues as there
|
||||
* are CPUs if user has not specified a value.
|
||||
|
@ -123,6 +135,13 @@ module_param(log_stats, int, 0644);
|
|||
/* Number of free pages to remove on each call to gnttab_free_pages */
|
||||
#define NUM_BATCH_FREE_PAGES 10
|
||||
|
||||
static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
|
||||
{
|
||||
return xen_blkif_pgrant_timeout &&
|
||||
(jiffies - persistent_gnt->last_used >=
|
||||
HZ * xen_blkif_pgrant_timeout);
|
||||
}
|
||||
|
||||
static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
|
|||
}
|
||||
}
|
||||
|
||||
bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
|
||||
set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
|
||||
persistent_gnt->active = true;
|
||||
/* Add new node and rebalance tree. */
|
||||
rb_link_node(&(persistent_gnt->node), parent, new);
|
||||
rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
|
||||
|
@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
|
|||
else if (gref > data->gnt)
|
||||
node = node->rb_right;
|
||||
else {
|
||||
if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
|
||||
if (data->active) {
|
||||
pr_alert_ratelimited("requesting a grant already in use\n");
|
||||
return NULL;
|
||||
}
|
||||
set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
|
||||
data->active = true;
|
||||
atomic_inc(&ring->persistent_gnt_in_use);
|
||||
return data;
|
||||
}
|
||||
|
@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
|
|||
static void put_persistent_gnt(struct xen_blkif_ring *ring,
|
||||
struct persistent_gnt *persistent_gnt)
|
||||
{
|
||||
if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
|
||||
if (!persistent_gnt->active)
|
||||
pr_alert_ratelimited("freeing a grant already unused\n");
|
||||
set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
|
||||
clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
|
||||
persistent_gnt->last_used = jiffies;
|
||||
persistent_gnt->active = false;
|
||||
atomic_dec(&ring->persistent_gnt_in_use);
|
||||
}
|
||||
|
||||
|
@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
|
|||
struct persistent_gnt *persistent_gnt;
|
||||
struct rb_node *n;
|
||||
unsigned int num_clean, total;
|
||||
bool scan_used = false, clean_used = false;
|
||||
bool scan_used = false;
|
||||
struct rb_root *root;
|
||||
|
||||
if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
|
||||
(ring->persistent_gnt_c == xen_blkif_max_pgrants &&
|
||||
!ring->blkif->vbd.overflow_max_grants)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (work_busy(&ring->persistent_purge_work)) {
|
||||
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
|
||||
(ring->persistent_gnt_c == xen_blkif_max_pgrants &&
|
||||
!ring->blkif->vbd.overflow_max_grants)) {
|
||||
num_clean = 0;
|
||||
} else {
|
||||
num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
|
||||
num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
|
||||
num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
|
||||
num_clean;
|
||||
num_clean = min(ring->persistent_gnt_c, num_clean);
|
||||
if ((num_clean == 0) ||
|
||||
(num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
|
||||
goto out;
|
||||
pr_debug("Going to purge at least %u persistent grants\n",
|
||||
num_clean);
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point, we can assure that there will be no calls
|
||||
|
@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
|
|||
* number of grants.
|
||||
*/
|
||||
|
||||
total = num_clean;
|
||||
|
||||
pr_debug("Going to purge %u persistent grants\n", num_clean);
|
||||
total = 0;
|
||||
|
||||
BUG_ON(!list_empty(&ring->persistent_purge_list));
|
||||
root = &ring->persistent_gnts;
|
||||
|
@ -412,46 +428,37 @@ purge_list:
|
|||
BUG_ON(persistent_gnt->handle ==
|
||||
BLKBACK_INVALID_HANDLE);
|
||||
|
||||
if (clean_used) {
|
||||
clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
|
||||
if (persistent_gnt->active)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
|
||||
if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
|
||||
continue;
|
||||
if (!scan_used &&
|
||||
(test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
|
||||
if (scan_used && total >= num_clean)
|
||||
continue;
|
||||
|
||||
rb_erase(&persistent_gnt->node, root);
|
||||
list_add(&persistent_gnt->remove_node,
|
||||
&ring->persistent_purge_list);
|
||||
if (--num_clean == 0)
|
||||
goto finished;
|
||||
total++;
|
||||
}
|
||||
/*
|
||||
* If we get here it means we also need to start cleaning
|
||||
* Check whether we also need to start cleaning
|
||||
* grants that were used since last purge in order to cope
|
||||
* with the requested num
|
||||
*/
|
||||
if (!scan_used && !clean_used) {
|
||||
pr_debug("Still missing %u purged frames\n", num_clean);
|
||||
if (!scan_used && total < num_clean) {
|
||||
pr_debug("Still missing %u purged frames\n", num_clean - total);
|
||||
scan_used = true;
|
||||
goto purge_list;
|
||||
}
|
||||
finished:
|
||||
if (!clean_used) {
|
||||
pr_debug("Finished scanning for grants to clean, removing used flag\n");
|
||||
clean_used = true;
|
||||
goto purge_list;
|
||||
}
|
||||
|
||||
ring->persistent_gnt_c -= (total - num_clean);
|
||||
if (total) {
|
||||
ring->persistent_gnt_c -= total;
|
||||
ring->blkif->vbd.overflow_max_grants = 0;
|
||||
|
||||
/* We can defer this work */
|
||||
schedule_work(&ring->persistent_purge_work);
|
||||
pr_debug("Purged %u/%u\n", (total - num_clean), total);
|
||||
pr_debug("Purged %u/%u\n", num_clean, total);
|
||||
}
|
||||
|
||||
out:
|
||||
return;
|
||||
|
|
|
@ -233,16 +233,6 @@ struct xen_vbd {
|
|||
|
||||
struct backend_info;
|
||||
|
||||
/* Number of available flags */
|
||||
#define PERSISTENT_GNT_FLAGS_SIZE 2
|
||||
/* This persistent grant is currently in use */
|
||||
#define PERSISTENT_GNT_ACTIVE 0
|
||||
/*
|
||||
* This persistent grant has been used, this flag is set when we remove the
|
||||
* PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
|
||||
*/
|
||||
#define PERSISTENT_GNT_WAS_ACTIVE 1
|
||||
|
||||
/* Number of requests that we can fit in a ring */
|
||||
#define XEN_BLKIF_REQS_PER_PAGE 32
|
||||
|
||||
|
@ -250,7 +240,8 @@ struct persistent_gnt {
|
|||
struct page *page;
|
||||
grant_ref_t gnt;
|
||||
grant_handle_t handle;
|
||||
DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
|
||||
unsigned long last_used;
|
||||
bool active;
|
||||
struct rb_node node;
|
||||
struct list_head remove_node;
|
||||
};
|
||||
|
@ -278,7 +269,6 @@ struct xen_blkif_ring {
|
|||
wait_queue_head_t pending_free_wq;
|
||||
|
||||
/* Tree to store persistent grants. */
|
||||
spinlock_t pers_gnts_lock;
|
||||
struct rb_root persistent_gnts;
|
||||
unsigned int persistent_gnt_c;
|
||||
atomic_t persistent_gnt_in_use;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <linux/scatterlist.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/xenbus.h>
|
||||
|
@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq)
|
|||
|
||||
static DEFINE_MUTEX(blkfront_mutex);
|
||||
static const struct block_device_operations xlvbd_block_fops;
|
||||
static struct delayed_work blkfront_work;
|
||||
static LIST_HEAD(info_list);
|
||||
|
||||
/*
|
||||
* Maximum number of segments in indirect requests, the actual value used by
|
||||
|
@ -216,6 +219,7 @@ struct blkfront_info
|
|||
/* Save uncomplete reqs and bios for migration. */
|
||||
struct list_head requests;
|
||||
struct bio_list bio_list;
|
||||
struct list_head info_list;
|
||||
};
|
||||
|
||||
static unsigned int nr_minors;
|
||||
|
@ -1759,6 +1763,12 @@ abort_transaction:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void free_info(struct blkfront_info *info)
|
||||
{
|
||||
list_del(&info->info_list);
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
/* Common code used when first setting up, and when resuming. */
|
||||
static int talk_to_blkback(struct xenbus_device *dev,
|
||||
struct blkfront_info *info)
|
||||
|
@ -1880,7 +1890,10 @@ again:
|
|||
destroy_blkring:
|
||||
blkif_free(info, 0);
|
||||
|
||||
kfree(info);
|
||||
mutex_lock(&blkfront_mutex);
|
||||
free_info(info);
|
||||
mutex_unlock(&blkfront_mutex);
|
||||
|
||||
dev_set_drvdata(&dev->dev, NULL);
|
||||
|
||||
return err;
|
||||
|
@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev,
|
|||
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
|
||||
dev_set_drvdata(&dev->dev, info);
|
||||
|
||||
mutex_lock(&blkfront_mutex);
|
||||
list_add(&info->info_list, &info_list);
|
||||
mutex_unlock(&blkfront_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
|
|||
if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||
indirect_segments = 0;
|
||||
info->max_indirect_segments = indirect_segments;
|
||||
|
||||
if (info->feature_persistent) {
|
||||
mutex_lock(&blkfront_mutex);
|
||||
schedule_delayed_work(&blkfront_work, HZ * 10);
|
||||
mutex_unlock(&blkfront_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
|
|||
mutex_unlock(&info->mutex);
|
||||
|
||||
if (!bdev) {
|
||||
kfree(info);
|
||||
mutex_lock(&blkfront_mutex);
|
||||
free_info(info);
|
||||
mutex_unlock(&blkfront_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
|
|||
if (info && !bdev->bd_openers) {
|
||||
xlvbd_release_gendisk(info);
|
||||
disk->private_data = NULL;
|
||||
kfree(info);
|
||||
mutex_lock(&blkfront_mutex);
|
||||
free_info(info);
|
||||
mutex_unlock(&blkfront_mutex);
|
||||
}
|
||||
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
|
@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
|
|||
dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
|
||||
xlvbd_release_gendisk(info);
|
||||
disk->private_data = NULL;
|
||||
kfree(info);
|
||||
free_info(info);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = {
|
|||
.is_ready = blkfront_is_ready,
|
||||
};
|
||||
|
||||
static void purge_persistent_grants(struct blkfront_info *info)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned long flags;
|
||||
|
||||
for (i = 0; i < info->nr_rings; i++) {
|
||||
struct blkfront_ring_info *rinfo = &info->rinfo[i];
|
||||
struct grant *gnt_list_entry, *tmp;
|
||||
|
||||
spin_lock_irqsave(&rinfo->ring_lock, flags);
|
||||
|
||||
if (rinfo->persistent_gnts_c == 0) {
|
||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
|
||||
node) {
|
||||
if (gnt_list_entry->gref == GRANT_INVALID_REF ||
|
||||
gnttab_query_foreign_access(gnt_list_entry->gref))
|
||||
continue;
|
||||
|
||||
list_del(&gnt_list_entry->node);
|
||||
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
|
||||
rinfo->persistent_gnts_c--;
|
||||
__free_page(gnt_list_entry->page);
|
||||
kfree(gnt_list_entry);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void blkfront_delay_work(struct work_struct *work)
|
||||
{
|
||||
struct blkfront_info *info;
|
||||
bool need_schedule_work = false;
|
||||
|
||||
mutex_lock(&blkfront_mutex);
|
||||
|
||||
list_for_each_entry(info, &info_list, info_list) {
|
||||
if (info->feature_persistent) {
|
||||
need_schedule_work = true;
|
||||
mutex_lock(&info->mutex);
|
||||
purge_persistent_grants(info);
|
||||
mutex_unlock(&info->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
if (need_schedule_work)
|
||||
schedule_delayed_work(&blkfront_work, HZ * 10);
|
||||
|
||||
mutex_unlock(&blkfront_mutex);
|
||||
}
|
||||
|
||||
static int __init xlblk_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -2626,6 +2708,15 @@ static int __init xlblk_init(void)
|
|||
if (!xen_domain())
|
||||
return -ENODEV;
|
||||
|
||||
if (!xen_has_pv_disk_devices())
|
||||
return -ENODEV;
|
||||
|
||||
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
|
||||
pr_warn("xen_blk: can't get major %d with name %s\n",
|
||||
XENVBD_MAJOR, DEV_NAME);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||
xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||
|
||||
|
@ -2641,14 +2732,7 @@ static int __init xlblk_init(void)
|
|||
xen_blkif_max_queues = nr_cpus;
|
||||
}
|
||||
|
||||
if (!xen_has_pv_disk_devices())
|
||||
return -ENODEV;
|
||||
|
||||
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
|
||||
printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
|
||||
XENVBD_MAJOR, DEV_NAME);
|
||||
return -ENODEV;
|
||||
}
|
||||
INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
|
||||
|
||||
ret = xenbus_register_frontend(&blkfront_driver);
|
||||
if (ret) {
|
||||
|
@ -2663,6 +2747,8 @@ module_init(xlblk_init);
|
|||
|
||||
static void __exit xlblk_exit(void)
|
||||
{
|
||||
cancel_delayed_work_sync(&blkfront_work);
|
||||
|
||||
xenbus_unregister_driver(&blkfront_driver);
|
||||
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
|
||||
kfree(minors);
|
||||
|
|
|
@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata)
|
|||
|
||||
/**
|
||||
* syc_ioremap - ioremap register space for the interconnect target module
|
||||
* @ddata: deviec driver data
|
||||
* @ddata: device driver data
|
||||
*
|
||||
* Note that the interconnect target module registers can be anywhere
|
||||
* within the first child device address space. For example, SGX has
|
||||
* them at offset 0x1fc00 in the 32MB module address space. We just
|
||||
* what we need around the interconnect target module registers.
|
||||
* within the interconnect target module range. For example, SGX has
|
||||
* them at offset 0x1fc00 in the 32MB module address space. And cpsw
|
||||
* has them at offset 0x1200 in the CPSW_WR child. Usually the
|
||||
* the interconnect target module registers are at the beginning of
|
||||
* the module range though.
|
||||
*/
|
||||
static int sysc_ioremap(struct sysc *ddata)
|
||||
{
|
||||
u32 size = 0;
|
||||
int size;
|
||||
|
||||
if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
|
||||
size = ddata->offsets[SYSC_SYSSTATUS];
|
||||
else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
|
||||
size = ddata->offsets[SYSC_SYSCONFIG];
|
||||
else if (ddata->offsets[SYSC_REVISION] >= 0)
|
||||
size = ddata->offsets[SYSC_REVISION];
|
||||
else
|
||||
size = max3(ddata->offsets[SYSC_REVISION],
|
||||
ddata->offsets[SYSC_SYSCONFIG],
|
||||
ddata->offsets[SYSC_SYSSTATUS]);
|
||||
|
||||
if (size < 0 || (size + sizeof(u32)) > ddata->module_size)
|
||||
return -EINVAL;
|
||||
|
||||
size &= 0xfff00;
|
||||
size += SZ_256;
|
||||
|
||||
ddata->module_va = devm_ioremap(ddata->dev,
|
||||
ddata->module_pa,
|
||||
size);
|
||||
size + sizeof(u32));
|
||||
if (!ddata->module_va)
|
||||
return -EIO;
|
||||
|
||||
|
@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev)
|
|||
if (!pm_runtime_status_suspended(dev)) {
|
||||
error = pm_generic_runtime_suspend(dev);
|
||||
if (error) {
|
||||
dev_err(dev, "%s error at %i: %i\n",
|
||||
dev_warn(dev, "%s busy at %i: %i\n",
|
||||
__func__, __LINE__, error);
|
||||
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = sysc_runtime_suspend(ddata->dev);
|
||||
|
|
|
@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
|
|||
if (!CDROM_CAN(CDC_SELECT_DISC) ||
|
||||
(arg == CDSL_CURRENT || arg == CDSL_NONE))
|
||||
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
|
||||
if (((int)arg >= cdi->capacity))
|
||||
if (arg >= cdi->capacity)
|
||||
return -EINVAL;
|
||||
return cdrom_slot_status(cdi, arg);
|
||||
}
|
||||
|
|
|
@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
|
|||
if (!clk_base)
|
||||
goto npcm7xx_init_error;
|
||||
|
||||
npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) *
|
||||
NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL);
|
||||
npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws,
|
||||
NPCM7XX_NUM_CLOCKS), GFP_KERNEL);
|
||||
if (!npcm7xx_clk_data)
|
||||
goto npcm7xx_init_np_err;
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev)
|
|||
clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
|
||||
0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL);
|
||||
|
||||
clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk);
|
||||
clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
|
||||
|
||||
hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux",
|
||||
0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
|
||||
|
|
|
@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
if (idx == -1)
|
||||
idx = i; /* first enabled state */
|
||||
if (s->target_residency > data->predicted_us) {
|
||||
if (!tick_nohz_tick_stopped())
|
||||
if (data->predicted_us < TICK_USEC)
|
||||
break;
|
||||
|
||||
if (!tick_nohz_tick_stopped()) {
|
||||
/*
|
||||
* If the state selected so far is shallow,
|
||||
* waking up early won't hurt, so retain the
|
||||
* tick in that case and let the governor run
|
||||
* again in the next iteration of the loop.
|
||||
*/
|
||||
expected_interval = drv->states[idx].target_residency;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the state selected so far is shallow and this
|
||||
* state's target residency matches the time till the
|
||||
|
|
|
@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
|||
int ret = 0;
|
||||
|
||||
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
|
||||
crypto_ablkcipher_set_flags(ablkcipher,
|
||||
CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
dev_err(jrdev, "key size mismatch\n");
|
||||
return -EINVAL;
|
||||
goto badkey;
|
||||
}
|
||||
|
||||
ctx->cdata.keylen = keylen;
|
||||
|
@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
|||
return ret;
|
||||
badkey:
|
||||
crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
|
|||
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
|
||||
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
|
||||
|
@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
|
|||
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
|
||||
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
/* RSA Job Completion handler */
|
||||
|
@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
|
|||
goto unmap_p;
|
||||
}
|
||||
|
||||
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
|
||||
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
|
||||
dev_err(dev, "Unable to map RSA tmp1 memory\n");
|
||||
goto unmap_q;
|
||||
}
|
||||
|
||||
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
|
||||
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
|
||||
dev_err(dev, "Unable to map RSA tmp2 memory\n");
|
||||
goto unmap_tmp1;
|
||||
|
@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
|
|||
return 0;
|
||||
|
||||
unmap_tmp1:
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
|
||||
unmap_q:
|
||||
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
|
||||
unmap_p:
|
||||
|
@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
|
|||
goto unmap_dq;
|
||||
}
|
||||
|
||||
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
|
||||
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
|
||||
dev_err(dev, "Unable to map RSA tmp1 memory\n");
|
||||
goto unmap_qinv;
|
||||
}
|
||||
|
||||
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
|
||||
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
|
||||
dev_err(dev, "Unable to map RSA tmp2 memory\n");
|
||||
goto unmap_tmp1;
|
||||
|
@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
|
|||
return 0;
|
||||
|
||||
unmap_tmp1:
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
|
||||
unmap_qinv:
|
||||
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
|
||||
unmap_dq:
|
||||
|
|
|
@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
|
|||
BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
|
||||
|
||||
/* Unmap just-run descriptor so we can post-process */
|
||||
dma_unmap_single(dev, jrp->outring[hw_idx].desc,
|
||||
dma_unmap_single(dev,
|
||||
caam_dma_to_cpu(jrp->outring[hw_idx].desc),
|
||||
jrp->entinfo[sw_idx].desc_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ struct nitrox_cmdq {
|
|||
/* requests in backlog queues */
|
||||
atomic_t backlog_count;
|
||||
|
||||
int write_idx;
|
||||
/* command size 32B/64B */
|
||||
u8 instr_size;
|
||||
u8 qno;
|
||||
|
@ -87,7 +88,7 @@ struct nitrox_bh {
|
|||
struct bh_data *slc;
|
||||
};
|
||||
|
||||
/* NITROX-5 driver state */
|
||||
/* NITROX-V driver state */
|
||||
#define NITROX_UCODE_LOADED 0
|
||||
#define NITROX_READY 1
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
|
|||
cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
|
||||
cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
|
||||
cmdq->qsize = (qsize + PKT_IN_ALIGN);
|
||||
cmdq->write_idx = 0;
|
||||
|
||||
spin_lock_init(&cmdq->response_lock);
|
||||
spin_lock_init(&cmdq->cmdq_lock);
|
||||
|
|
|
@ -42,6 +42,16 @@
|
|||
* Invalid flag options in AES-CCM IV.
|
||||
*/
|
||||
|
||||
static inline int incr_index(int index, int count, int max)
|
||||
{
|
||||
if ((index + count) >= max)
|
||||
index = index + count - max;
|
||||
else
|
||||
index += count;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_free_sglist - unmap and free the sg lists.
|
||||
* @ndev: N5 device
|
||||
|
@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
|
|||
struct nitrox_cmdq *cmdq)
|
||||
{
|
||||
struct nitrox_device *ndev = sr->ndev;
|
||||
union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
|
||||
u64 offset;
|
||||
int idx;
|
||||
u8 *ent;
|
||||
|
||||
spin_lock_bh(&cmdq->cmdq_lock);
|
||||
|
||||
/* get the next write offset */
|
||||
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
|
||||
pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
|
||||
idx = cmdq->write_idx;
|
||||
/* copy the instruction */
|
||||
ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
|
||||
ent = cmdq->head + (idx * cmdq->instr_size);
|
||||
memcpy(ent, &sr->instr, cmdq->instr_size);
|
||||
/* flush the command queue updates */
|
||||
dma_wmb();
|
||||
|
||||
sr->tstamp = jiffies;
|
||||
atomic_set(&sr->status, REQ_POSTED);
|
||||
response_list_add(sr, cmdq);
|
||||
sr->tstamp = jiffies;
|
||||
/* flush the command queue updates */
|
||||
dma_wmb();
|
||||
|
||||
/* Ring doorbell with count 1 */
|
||||
writeq(1, cmdq->dbell_csr_addr);
|
||||
/* orders the doorbell rings */
|
||||
mmiowb();
|
||||
|
||||
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
|
||||
|
||||
spin_unlock_bh(&cmdq->cmdq_lock);
|
||||
}
|
||||
|
||||
|
@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
|
|||
struct nitrox_softreq *sr, *tmp;
|
||||
int ret = 0;
|
||||
|
||||
if (!atomic_read(&cmdq->backlog_count))
|
||||
return 0;
|
||||
|
||||
spin_lock_bh(&cmdq->backlog_lock);
|
||||
|
||||
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
|
||||
|
@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
|
|||
|
||||
/* submit until space available */
|
||||
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
|
||||
ret = -EBUSY;
|
||||
ret = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
/* delete from backlog list */
|
||||
|
@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
|
|||
{
|
||||
struct nitrox_cmdq *cmdq = sr->cmdq;
|
||||
struct nitrox_device *ndev = sr->ndev;
|
||||
int ret = -EBUSY;
|
||||
|
||||
/* try to post backlog requests */
|
||||
post_backlog_cmds(cmdq);
|
||||
|
||||
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
|
||||
if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
return -EAGAIN;
|
||||
|
||||
return -ENOSPC;
|
||||
/* add to backlog list */
|
||||
backlog_list_add(sr, cmdq);
|
||||
} else {
|
||||
ret = post_backlog_cmds(cmdq);
|
||||
if (ret) {
|
||||
backlog_list_add(sr, cmdq);
|
||||
return ret;
|
||||
return -EBUSY;
|
||||
}
|
||||
post_se_instr(sr, cmdq);
|
||||
ret = -EINPROGRESS;
|
||||
}
|
||||
return ret;
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
|
|||
*/
|
||||
sr->instr.fdata[0] = *((u64 *)&req->gph);
|
||||
sr->instr.fdata[1] = 0;
|
||||
/* flush the soft_req changes before posting the cmd */
|
||||
wmb();
|
||||
|
||||
ret = nitrox_enqueue_request(sr);
|
||||
if (ret == -EAGAIN)
|
||||
if (ret == -ENOSPC)
|
||||
goto send_fail;
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -96,6 +96,10 @@ enum csk_flags {
|
|||
CSK_CONN_INLINE, /* Connection on HW */
|
||||
};
|
||||
|
||||
enum chtls_cdev_state {
|
||||
CHTLS_CDEV_STATE_UP = 1
|
||||
};
|
||||
|
||||
struct listen_ctx {
|
||||
struct sock *lsk;
|
||||
struct chtls_dev *cdev;
|
||||
|
@ -146,6 +150,7 @@ struct chtls_dev {
|
|||
unsigned int send_page_order;
|
||||
int max_host_sndbuf;
|
||||
struct key_map kmap;
|
||||
unsigned int cdev_state;
|
||||
};
|
||||
|
||||
struct chtls_hws {
|
||||
|
|
|
@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
|
|||
tlsdev->hash = chtls_create_hash;
|
||||
tlsdev->unhash = chtls_destroy_hash;
|
||||
tls_register_device(&cdev->tlsdev);
|
||||
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
|
||||
}
|
||||
|
||||
static void chtls_unregister_dev(struct chtls_dev *cdev)
|
||||
|
@ -281,8 +282,10 @@ static void chtls_free_all_uld(void)
|
|||
struct chtls_dev *cdev, *tmp;
|
||||
|
||||
mutex_lock(&cdev_mutex);
|
||||
list_for_each_entry_safe(cdev, tmp, &cdev_list, list)
|
||||
list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
|
||||
if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
|
||||
chtls_free_uld(cdev);
|
||||
}
|
||||
mutex_unlock(&cdev_mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
|||
ret = crypto_skcipher_encrypt(req);
|
||||
skcipher_request_zero(req);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
aes_p8_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK,
|
||||
&ctx->enc_key, walk.iv, 1);
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
|||
ret = crypto_skcipher_decrypt(req);
|
||||
skcipher_request_zero(req);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
aes_p8_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK,
|
||||
&ctx->dec_key, walk.iv, 0);
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
|
|||
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
|
||||
skcipher_request_zero(req);
|
||||
} else {
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
iv = walk.iv;
|
||||
memset(tweak, 0, AES_BLOCK_SIZE);
|
||||
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
|
||||
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
if (enc)
|
||||
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
|
||||
else
|
||||
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
|
||||
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
|
||||
if (!parser->ctx->preamble_presented) {
|
||||
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
|
||||
parser->ctx->preamble_presented = true;
|
||||
}
|
||||
}
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
|
||||
parser->job->preamble_status |=
|
||||
AMDGPU_PREAMBLE_IB_PRESENT;
|
||||
|
||||
if (parser->ring && parser->ring != ring)
|
||||
return -EINVAL;
|
||||
|
@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||
|
||||
int r;
|
||||
|
||||
job = p->job;
|
||||
p->job = NULL;
|
||||
|
||||
r = drm_sched_job_init(&job->base, entity, p->filp);
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
/* No memory allocation is allowed while holding the mn lock */
|
||||
amdgpu_mn_lock(p->mn);
|
||||
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
||||
struct amdgpu_bo *bo = e->robj;
|
||||
|
||||
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
|
||||
amdgpu_mn_unlock(p->mn);
|
||||
return -ERESTARTSYS;
|
||||
r = -ERESTARTSYS;
|
||||
goto error_abort;
|
||||
}
|
||||
}
|
||||
|
||||
job = p->job;
|
||||
p->job = NULL;
|
||||
|
||||
r = drm_sched_job_init(&job->base, entity, p->filp);
|
||||
if (r) {
|
||||
amdgpu_job_free(job);
|
||||
amdgpu_mn_unlock(p->mn);
|
||||
return r;
|
||||
}
|
||||
|
||||
job->owner = p->filp;
|
||||
p->fence = dma_fence_get(&job->base.s_fence->finished);
|
||||
|
||||
|
@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||
|
||||
amdgpu_cs_post_dependencies(p);
|
||||
|
||||
if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
|
||||
!p->ctx->preamble_presented) {
|
||||
job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
|
||||
p->ctx->preamble_presented = true;
|
||||
}
|
||||
|
||||
cs->out.handle = seq;
|
||||
job->uf_sequence = seq;
|
||||
|
||||
|
@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||
amdgpu_mn_unlock(p->mn);
|
||||
|
||||
return 0;
|
||||
|
||||
error_abort:
|
||||
dma_fence_put(&job->base.s_fence->finished);
|
||||
job->base.s_fence = NULL;
|
||||
|
||||
error_unlock:
|
||||
amdgpu_job_free(job);
|
||||
amdgpu_mn_unlock(p->mn);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
|
|
|
@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
return r;
|
||||
}
|
||||
|
||||
need_ctx_switch = ring->current_ctx != fence_ctx;
|
||||
if (ring->funcs->emit_pipeline_sync && job &&
|
||||
((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
|
||||
(amdgpu_sriov_vf(adev) && need_ctx_switch) ||
|
||||
amdgpu_vm_need_pipeline_sync(ring, job))) {
|
||||
need_pipe_sync = true;
|
||||
dma_fence_put(tmp);
|
||||
|
@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
}
|
||||
|
||||
skip_preamble = ring->current_ctx == fence_ctx;
|
||||
need_ctx_switch = ring->current_ctx != fence_ctx;
|
||||
if (job && ring->funcs->emit_cntxcntl) {
|
||||
if (need_ctx_switch)
|
||||
status |= AMDGPU_HAVE_CTX_SWITCH;
|
||||
|
|
|
@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
|||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
/* update battery/ac status */
|
||||
if (power_supply_is_system_supplied() > 0)
|
||||
adev->pm.ac_power = true;
|
||||
else
|
||||
adev->pm.ac_power = false;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
if (!amdgpu_device_has_dc_support(adev)) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
|
|
@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
|||
* is validated on next vm use to avoid fault.
|
||||
* */
|
||||
list_move_tail(&base->vm_status, &vm->evicted);
|
||||
base->moved = true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||
uint64_t addr;
|
||||
int r;
|
||||
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
entries = amdgpu_bo_size(bo) / 8;
|
||||
|
||||
if (pte_support_ats) {
|
||||
|
@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
goto error;
|
||||
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
if (ats_entries) {
|
||||
uint64_t ats_value;
|
||||
|
||||
|
@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
|
|||
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm_size: the default vm size if it's set auto
|
||||
* @min_vm_size: the minimum vm size in GB if it's set auto
|
||||
* @fragment_size_default: Default PTE fragment size
|
||||
* @max_level: max VMPT level
|
||||
* @max_bits: max address space size in bits
|
||||
*
|
||||
*/
|
||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
|
||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
||||
uint32_t fragment_size_default, unsigned max_level,
|
||||
unsigned max_bits)
|
||||
{
|
||||
unsigned int max_size = 1 << (max_bits - 30);
|
||||
unsigned int vm_size;
|
||||
uint64_t tmp;
|
||||
|
||||
/* adjust vm size first */
|
||||
if (amdgpu_vm_size != -1) {
|
||||
unsigned max_size = 1 << (max_bits - 30);
|
||||
|
||||
vm_size = amdgpu_vm_size;
|
||||
if (vm_size > max_size) {
|
||||
dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
|
||||
amdgpu_vm_size, max_size);
|
||||
vm_size = max_size;
|
||||
}
|
||||
} else {
|
||||
struct sysinfo si;
|
||||
unsigned int phys_ram_gb;
|
||||
|
||||
/* Optimal VM size depends on the amount of physical
|
||||
* RAM available. Underlying requirements and
|
||||
* assumptions:
|
||||
*
|
||||
* - Need to map system memory and VRAM from all GPUs
|
||||
* - VRAM from other GPUs not known here
|
||||
* - Assume VRAM <= system memory
|
||||
* - On GFX8 and older, VM space can be segmented for
|
||||
* different MTYPEs
|
||||
* - Need to allow room for fragmentation, guard pages etc.
|
||||
*
|
||||
* This adds up to a rough guess of system memory x3.
|
||||
* Round up to power of two to maximize the available
|
||||
* VM size with the given page table size.
|
||||
*/
|
||||
si_meminfo(&si);
|
||||
phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
|
||||
(1 << 30) - 1) >> 30;
|
||||
vm_size = roundup_pow_of_two(
|
||||
min(max(phys_ram_gb * 3, min_vm_size), max_size));
|
||||
}
|
||||
|
||||
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
|
||||
|
|
|
@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
|
|||
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
|
||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va);
|
||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
|
||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
||||
uint32_t fragment_size_default, unsigned max_level,
|
||||
unsigned max_bits);
|
||||
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
|
||||
|
|
|
@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
|
|||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GFX_DMG))
|
||||
adev->gfx.rlc.funcs->enter_safe_mode(adev);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
|
@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
|
|||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GFX_DMG))
|
||||
adev->gfx.rlc.funcs->exit_safe_mode(adev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
|
|||
amdgpu_gart_table_vram_unpin(adev);
|
||||
}
|
||||
|
||||
static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_gart_table_vram_free(adev);
|
||||
amdgpu_gart_fini(adev);
|
||||
}
|
||||
|
||||
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
|
||||
u32 status, u32 addr, u32 mc_client)
|
||||
{
|
||||
|
@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle)
|
|||
|
||||
amdgpu_gem_force_release(adev);
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
gmc_v6_0_gart_fini(adev);
|
||||
amdgpu_gart_table_vram_free(adev);
|
||||
amdgpu_bo_fini(adev);
|
||||
amdgpu_gart_fini(adev);
|
||||
release_firmware(adev->gmc.fw);
|
||||
adev->gmc.fw = NULL;
|
||||
|
||||
|
|
|
@ -746,19 +746,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
|
|||
amdgpu_gart_table_vram_unpin(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v7_0_gart_fini - vm fini callback
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Tears down the driver GART/VM setup (CIK).
|
||||
*/
|
||||
static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_gart_table_vram_free(adev);
|
||||
amdgpu_gart_fini(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v7_0_vm_decode_fault - print human readable fault info
|
||||
*
|
||||
|
@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle)
|
|||
amdgpu_gem_force_release(adev);
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
kfree(adev->gmc.vm_fault_info);
|
||||
gmc_v7_0_gart_fini(adev);
|
||||
amdgpu_gart_table_vram_free(adev);
|
||||
amdgpu_bo_fini(adev);
|
||||
amdgpu_gart_fini(adev);
|
||||
release_firmware(adev->gmc.fw);
|
||||
adev->gmc.fw = NULL;
|
||||
|
||||
|
|
|
@ -968,19 +968,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
|
|||
amdgpu_gart_table_vram_unpin(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v8_0_gart_fini - vm fini callback
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Tears down the driver GART/VM setup (CIK).
|
||||
*/
|
||||
static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_gart_table_vram_free(adev);
|
||||
amdgpu_gart_fini(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v8_0_vm_decode_fault - print human readable fault info
|
||||
*
|
||||
|
@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle)
|
|||
amdgpu_gem_force_release(adev);
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
kfree(adev->gmc.vm_fault_info);
|
||||
gmc_v8_0_gart_fini(adev);
|
||||
amdgpu_gart_table_vram_free(adev);
|
||||
amdgpu_bo_fini(adev);
|
||||
amdgpu_gart_fini(adev);
|
||||
release_firmware(adev->gmc.fw);
|
||||
adev->gmc.fw = NULL;
|
||||
|
||||
|
|
|
@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v9_0_gart_fini - vm fini callback
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Tears down the driver GART/VM setup (CIK).
|
||||
*/
|
||||
static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_gart_table_vram_free(adev);
|
||||
amdgpu_gart_fini(adev);
|
||||
}
|
||||
|
||||
static int gmc_v9_0_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_gem_force_release(adev);
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
gmc_v9_0_gart_fini(adev);
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
|
@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle)
|
|||
*/
|
||||
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
|
||||
|
||||
amdgpu_gart_table_vram_free(adev);
|
||||
amdgpu_bo_fini(adev);
|
||||
amdgpu_gart_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
|
|||
int min_temp, int max_temp);
|
||||
static int kv_init_fps_limits(struct amdgpu_device *adev);
|
||||
|
||||
static void kv_dpm_powergate_uvd(void *handle, bool gate);
|
||||
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
|
||||
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
|
||||
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
|
||||
|
||||
|
@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
|
||||
|
||||
if (adev->irq.installed &&
|
||||
amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
|
||||
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
|
||||
|
@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
|
|||
|
||||
static void kv_dpm_disable(struct amdgpu_device *adev)
|
||||
{
|
||||
struct kv_power_info *pi = kv_get_pi(adev);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
|
||||
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
|
||||
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
|
||||
|
@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
|
|||
/* powerup blocks */
|
||||
kv_dpm_powergate_acp(adev, false);
|
||||
kv_dpm_powergate_samu(adev, false);
|
||||
kv_dpm_powergate_vce(adev, false);
|
||||
kv_dpm_powergate_uvd(adev, false);
|
||||
if (pi->caps_vce_pg) /* power on the VCE block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
|
||||
if (pi->caps_uvd_pg) /* power on the UVD block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
|
||||
|
||||
kv_enable_smc_cac(adev, false);
|
||||
kv_enable_didt(adev, false);
|
||||
|
@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
|
|||
int ret;
|
||||
|
||||
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
|
||||
kv_dpm_powergate_vce(adev, false);
|
||||
if (pi->caps_stable_p_state)
|
||||
pi->vce_boot_level = table->count - 1;
|
||||
else
|
||||
|
@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
|
|||
kv_enable_vce_dpm(adev, true);
|
||||
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
|
||||
kv_enable_vce_dpm(adev, false);
|
||||
kv_dpm_powergate_vce(adev, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
|
|||
}
|
||||
}
|
||||
|
||||
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
|
||||
static void kv_dpm_powergate_vce(void *handle, bool gate)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct kv_power_info *pi = kv_get_pi(adev);
|
||||
|
||||
if (pi->vce_power_gated == gate)
|
||||
return;
|
||||
int ret;
|
||||
|
||||
pi->vce_power_gated = gate;
|
||||
|
||||
if (!pi->caps_vce_pg)
|
||||
return;
|
||||
|
||||
if (gate)
|
||||
if (gate) {
|
||||
/* stop the VCE block */
|
||||
ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
kv_enable_vce_dpm(adev, false);
|
||||
if (pi->caps_vce_pg) /* power off the VCE block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
|
||||
else
|
||||
} else {
|
||||
if (pi->caps_vce_pg) /* power on the VCE block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
|
||||
kv_enable_vce_dpm(adev, true);
|
||||
/* re-init the VCE block */
|
||||
ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
|
||||
{
|
||||
struct kv_power_info *pi = kv_get_pi(adev);
|
||||
|
@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle)
|
|||
else
|
||||
adev->pm.dpm_enabled = true;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle,
|
|||
case AMD_IP_BLOCK_TYPE_UVD:
|
||||
kv_dpm_powergate_uvd(handle, gate);
|
||||
break;
|
||||
case AMD_IP_BLOCK_TYPE_VCE:
|
||||
kv_dpm_powergate_vce(handle, gate);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
|
|||
|
||||
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
|
||||
si_thermal_start_thermal_controller(adev);
|
||||
ni_update_current_ps(adev, boot_ps);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle)
|
|||
else
|
||||
adev->pm.dpm_enabled = true;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
|
|||
{
|
||||
struct dc_context *ctx = pp->ctx;
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
struct pp_display_clock_request clock = {0};
|
||||
|
||||
if (!pp_funcs || !pp_funcs->display_configuration_changed)
|
||||
if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
|
||||
return;
|
||||
|
||||
amdgpu_dpm_display_configuration_changed(adev);
|
||||
clock.clock_type = amd_pp_dcf_clock;
|
||||
clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
|
||||
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
|
||||
|
||||
clock.clock_type = amd_pp_f_clock;
|
||||
clock.clock_freq_in_khz = req->hard_min_fclk_khz;
|
||||
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
|
||||
}
|
||||
|
||||
void pp_rv_set_wm_ranges(struct pp_smu *pp,
|
||||
|
|
|
@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
* fail-safe mode
|
||||
*/
|
||||
if (dc_is_hdmi_signal(link->connector_signal) ||
|
||||
dc_is_dvi_signal(link->connector_signal))
|
||||
dc_is_dvi_signal(link->connector_signal)) {
|
||||
if (prev_sink != NULL)
|
||||
dc_sink_release(prev_sink);
|
||||
|
||||
return false;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj,
|
|||
vma->flags |= I915_VMA_GGTT;
|
||||
list_add(&vma->obj_link, &obj->vma_list);
|
||||
} else {
|
||||
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
|
||||
list_add_tail(&vma->obj_link, &obj->vma_list);
|
||||
}
|
||||
|
||||
|
@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
|
|||
if (vma->obj)
|
||||
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
|
||||
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
|
||||
GEM_BUG_ON(i915_gem_active_isset(&iter->base));
|
||||
kfree(iter);
|
||||
|
|
|
@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->num_pipes == 0)
|
||||
return;
|
||||
|
||||
ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to add audio component (%d)\n", ret);
|
||||
|
|
|
@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
|
|||
int w = drm_rect_width(&plane_state->base.src) >> 16;
|
||||
int h = drm_rect_height(&plane_state->base.src) >> 16;
|
||||
int dst_x = plane_state->base.dst.x1;
|
||||
int dst_w = drm_rect_width(&plane_state->base.dst);
|
||||
int pipe_src_w = crtc_state->pipe_src_w;
|
||||
int max_width = skl_max_plane_width(fb, 0, rotation);
|
||||
int max_height = 4096;
|
||||
|
@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
|
|||
* screen may cause FIFO underflow and display corruption.
|
||||
*/
|
||||
if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
|
||||
(dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
|
||||
(dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
|
||||
DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
|
||||
dst_x + w < 4 ? "end" : "start",
|
||||
dst_x + w < 4 ? dst_x + w : dst_x,
|
||||
dst_x + dst_w < 4 ? "end" : "start",
|
||||
dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
|
||||
4, pipe_src_w - 4);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
|
|
@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
|
|||
|
||||
ret = i2c_transfer(adapter, &msg, 1);
|
||||
if (ret == 1)
|
||||
return 0;
|
||||
return ret >= 0 ? -EIO : ret;
|
||||
ret = 0;
|
||||
else if (ret >= 0)
|
||||
ret = -EIO;
|
||||
|
||||
kfree(write_buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static
|
||||
|
|
|
@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
|
|||
DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
|
||||
lspcon_mode_name(mode));
|
||||
|
||||
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
|
||||
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
|
||||
if (current_mode != mode)
|
||||
DRM_ERROR("LSPCON mode hasn't settled\n");
|
||||
|
||||
|
|
|
@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
|
|||
writel(0x0, comp->regs + DISP_REG_OVL_RST);
|
||||
}
|
||||
|
||||
static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
|
||||
{
|
||||
return 4;
|
||||
}
|
||||
|
||||
static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
|
||||
{
|
||||
unsigned int reg;
|
||||
|
@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
|
|||
|
||||
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
|
||||
{
|
||||
/* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
|
||||
* is defined in mediatek HW data sheet.
|
||||
* The alphabet order in XXX is no relation to data
|
||||
* arrangement in memory.
|
||||
*/
|
||||
switch (fmt) {
|
||||
default:
|
||||
case DRM_FORMAT_RGB565:
|
||||
|
@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
|
|||
.stop = mtk_ovl_stop,
|
||||
.enable_vblank = mtk_ovl_enable_vblank,
|
||||
.disable_vblank = mtk_ovl_disable_vblank,
|
||||
.layer_nr = mtk_ovl_layer_nr,
|
||||
.layer_on = mtk_ovl_layer_on,
|
||||
.layer_off = mtk_ovl_layer_off,
|
||||
.layer_config = mtk_ovl_layer_config,
|
||||
|
|
|
@ -31,14 +31,31 @@
|
|||
#define RDMA_REG_UPDATE_INT BIT(0)
|
||||
#define DISP_REG_RDMA_GLOBAL_CON 0x0010
|
||||
#define RDMA_ENGINE_EN BIT(0)
|
||||
#define RDMA_MODE_MEMORY BIT(1)
|
||||
#define DISP_REG_RDMA_SIZE_CON_0 0x0014
|
||||
#define RDMA_MATRIX_ENABLE BIT(17)
|
||||
#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20)
|
||||
#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20)
|
||||
#define DISP_REG_RDMA_SIZE_CON_1 0x0018
|
||||
#define DISP_REG_RDMA_TARGET_LINE 0x001c
|
||||
#define DISP_RDMA_MEM_CON 0x0024
|
||||
#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4)
|
||||
#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4)
|
||||
#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4)
|
||||
#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4)
|
||||
#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4)
|
||||
#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4)
|
||||
#define MEM_MODE_INPUT_SWAP BIT(8)
|
||||
#define DISP_RDMA_MEM_SRC_PITCH 0x002c
|
||||
#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030
|
||||
#define DISP_REG_RDMA_FIFO_CON 0x0040
|
||||
#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
|
||||
#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
|
||||
#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
|
||||
#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
|
||||
#define DISP_RDMA_MEM_START_ADDR 0x0f00
|
||||
|
||||
#define RDMA_MEM_GMC 0x40402020
|
||||
|
||||
struct mtk_disp_rdma_data {
|
||||
unsigned int fifo_size;
|
||||
|
@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
|
|||
writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
|
||||
}
|
||||
|
||||
static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
|
||||
unsigned int fmt)
|
||||
{
|
||||
/* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
|
||||
* is defined in mediatek HW data sheet.
|
||||
* The alphabet order in XXX is no relation to data
|
||||
* arrangement in memory.
|
||||
*/
|
||||
switch (fmt) {
|
||||
default:
|
||||
case DRM_FORMAT_RGB565:
|
||||
return MEM_MODE_INPUT_FORMAT_RGB565;
|
||||
case DRM_FORMAT_BGR565:
|
||||
return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
|
||||
case DRM_FORMAT_RGB888:
|
||||
return MEM_MODE_INPUT_FORMAT_RGB888;
|
||||
case DRM_FORMAT_BGR888:
|
||||
return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
|
||||
case DRM_FORMAT_RGBX8888:
|
||||
case DRM_FORMAT_RGBA8888:
|
||||
return MEM_MODE_INPUT_FORMAT_ARGB8888;
|
||||
case DRM_FORMAT_BGRX8888:
|
||||
case DRM_FORMAT_BGRA8888:
|
||||
return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
return MEM_MODE_INPUT_FORMAT_RGBA8888;
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
|
||||
case DRM_FORMAT_UYVY:
|
||||
return MEM_MODE_INPUT_FORMAT_UYVY;
|
||||
case DRM_FORMAT_YUYV:
|
||||
return MEM_MODE_INPUT_FORMAT_YUYV;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
|
||||
struct mtk_plane_state *state)
|
||||
{
|
||||
struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
|
||||
struct mtk_plane_pending_state *pending = &state->pending;
|
||||
unsigned int addr = pending->addr;
|
||||
unsigned int pitch = pending->pitch & 0xffff;
|
||||
unsigned int fmt = pending->format;
|
||||
unsigned int con;
|
||||
|
||||
con = rdma_fmt_convert(rdma, fmt);
|
||||
writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
|
||||
|
||||
if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
|
||||
rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
|
||||
RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
|
||||
rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
|
||||
RDMA_MATRIX_INT_MTX_SEL,
|
||||
RDMA_MATRIX_INT_MTX_BT601_to_RGB);
|
||||
} else {
|
||||
rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
|
||||
RDMA_MATRIX_ENABLE, 0);
|
||||
}
|
||||
|
||||
writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
|
||||
writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
|
||||
writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
|
||||
rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
|
||||
RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
|
||||
}
|
||||
|
||||
static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
|
||||
.config = mtk_rdma_config,
|
||||
.start = mtk_rdma_start,
|
||||
.stop = mtk_rdma_stop,
|
||||
.enable_vblank = mtk_rdma_enable_vblank,
|
||||
.disable_vblank = mtk_rdma_disable_vblank,
|
||||
.layer_nr = mtk_rdma_layer_nr,
|
||||
.layer_config = mtk_rdma_layer_config,
|
||||
};
|
||||
|
||||
static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
|
||||
|
|
|
@ -45,7 +45,8 @@ struct mtk_drm_crtc {
|
|||
bool pending_needs_vblank;
|
||||
struct drm_pending_vblank_event *event;
|
||||
|
||||
struct drm_plane planes[OVL_LAYER_NR];
|
||||
struct drm_plane *planes;
|
||||
unsigned int layer_nr;
|
||||
bool pending_planes;
|
||||
|
||||
void __iomem *config_regs;
|
||||
|
@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
|||
static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||
struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
|
||||
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
|
||||
|
||||
mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
|
||||
mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
|
|||
static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||
struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
|
||||
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
|
||||
|
||||
mtk_ddp_comp_disable_vblank(ovl);
|
||||
mtk_ddp_comp_disable_vblank(comp);
|
||||
}
|
||||
|
||||
static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
|
||||
|
@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
|
|||
}
|
||||
|
||||
/* Initially configure all planes */
|
||||
for (i = 0; i < OVL_LAYER_NR; i++) {
|
||||
for (i = 0; i < mtk_crtc->layer_nr; i++) {
|
||||
struct drm_plane *plane = &mtk_crtc->planes[i];
|
||||
struct mtk_plane_state *plane_state;
|
||||
|
||||
|
@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
|
|||
{
|
||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
|
||||
struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
|
||||
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
|
@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
|
|||
* queue update module registers on vblank.
|
||||
*/
|
||||
if (state->pending_config) {
|
||||
mtk_ddp_comp_config(ovl, state->pending_width,
|
||||
mtk_ddp_comp_config(comp, state->pending_width,
|
||||
state->pending_height,
|
||||
state->pending_vrefresh, 0);
|
||||
|
||||
|
@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
if (mtk_crtc->pending_planes) {
|
||||
for (i = 0; i < OVL_LAYER_NR; i++) {
|
||||
for (i = 0; i < mtk_crtc->layer_nr; i++) {
|
||||
struct drm_plane *plane = &mtk_crtc->planes[i];
|
||||
struct mtk_plane_state *plane_state;
|
||||
|
||||
plane_state = to_mtk_plane_state(plane->state);
|
||||
|
||||
if (plane_state->pending.config) {
|
||||
mtk_ddp_comp_layer_config(ovl, i, plane_state);
|
||||
mtk_ddp_comp_layer_config(comp, i, plane_state);
|
||||
plane_state->pending.config = false;
|
||||
}
|
||||
}
|
||||
|
@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||
struct drm_crtc_state *old_state)
|
||||
{
|
||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||
struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
|
||||
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
|
||||
|
||||
ret = mtk_smi_larb_get(ovl->larb_dev);
|
||||
ret = mtk_smi_larb_get(comp->larb_dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to get larb: %d\n", ret);
|
||||
return;
|
||||
|
@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||
|
||||
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
|
||||
if (ret) {
|
||||
mtk_smi_larb_put(ovl->larb_dev);
|
||||
mtk_smi_larb_put(comp->larb_dev);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
struct drm_crtc_state *old_state)
|
||||
{
|
||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||
struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
|
||||
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
|
||||
int i;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
|
||||
|
@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
return;
|
||||
|
||||
/* Set all pending plane state to disabled */
|
||||
for (i = 0; i < OVL_LAYER_NR; i++) {
|
||||
for (i = 0; i < mtk_crtc->layer_nr; i++) {
|
||||
struct drm_plane *plane = &mtk_crtc->planes[i];
|
||||
struct mtk_plane_state *plane_state;
|
||||
|
||||
|
@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
|
||||
drm_crtc_vblank_off(crtc);
|
||||
mtk_crtc_ddp_hw_fini(mtk_crtc);
|
||||
mtk_smi_larb_put(ovl->larb_dev);
|
||||
mtk_smi_larb_put(comp->larb_dev);
|
||||
|
||||
mtk_crtc->enabled = false;
|
||||
}
|
||||
|
@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
|
|||
|
||||
if (mtk_crtc->event)
|
||||
mtk_crtc->pending_needs_vblank = true;
|
||||
for (i = 0; i < OVL_LAYER_NR; i++) {
|
||||
for (i = 0; i < mtk_crtc->layer_nr; i++) {
|
||||
struct drm_plane *plane = &mtk_crtc->planes[i];
|
||||
struct mtk_plane_state *plane_state;
|
||||
|
||||
|
@ -516,7 +517,7 @@ err_cleanup_crtc:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
|
||||
void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
|
||||
{
|
||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||
struct mtk_drm_private *priv = crtc->dev->dev_private;
|
||||
|
@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
|||
mtk_crtc->ddp_comp[i] = comp;
|
||||
}
|
||||
|
||||
for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) {
|
||||
mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
|
||||
mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
|
||||
sizeof(struct drm_plane),
|
||||
GFP_KERNEL);
|
||||
|
||||
for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
|
||||
type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
|
||||
(zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
|
||||
DRM_PLANE_TYPE_OVERLAY;
|
||||
|
@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
|||
}
|
||||
|
||||
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
|
||||
&mtk_crtc->planes[1], pipe);
|
||||
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
|
||||
NULL, pipe);
|
||||
if (ret < 0)
|
||||
goto unprepare;
|
||||
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
|
||||
|
|
|
@ -18,13 +18,12 @@
|
|||
#include "mtk_drm_ddp_comp.h"
|
||||
#include "mtk_drm_plane.h"
|
||||
|
||||
#define OVL_LAYER_NR 4
|
||||
#define MTK_LUT_SIZE 512
|
||||
#define MTK_MAX_BPC 10
|
||||
#define MTK_MIN_BPC 3
|
||||
|
||||
void mtk_drm_crtc_commit(struct drm_crtc *crtc);
|
||||
void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
|
||||
void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
|
||||
int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
||||
const enum mtk_ddp_comp_id *path,
|
||||
unsigned int path_len);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue