Linux 4.11-rc4

-----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJY2C9qAAoJEHm+PkMAQRiGaBQIAIGzdlZ6ImiP6zoukrRv7qUr
 44ITm0lsBiL85QGedhQQL+Y9UqwUmlqgFqnH0Gr8YHNbLJWXzdjGbl5aVo4KjASq
 104NLUDXtPww/xZdH4wJMzhuwucYwZOUyDOjOr0ak3cGxOE2xjNjHMZXxWUf20GO
 EpRr6WhV1DUAvAdjdNa9KlcOjMluNpMLLyL1CFLjrkkArrWAyqOURKHAb6ZLghfv
 iZV1qJTVPyYGpnlI3kuEgu2GuDjxqpoNLSr3wHyEHm/pBPEl7MX6zPbzcegBV8TY
 cRRlXo4notdsuknmSNcj0hHuTQvw1kl7BhieLKVsnCyCIM6jjX4TSQZFutmbzwM=
 =5iRl
 -----END PGP SIGNATURE-----

Backmerge tag 'v4.11-rc4' into drm-next

Linux 4.11-rc4

The i915 GVT team need the rc4 code to base some more code on.
This commit is contained in:
Dave Airlie 2017-03-28 17:34:19 +10:00
commit e5c1ff1475
379 changed files with 3470 additions and 2578 deletions

View File

@ -45,7 +45,7 @@ The following clocks are available:
- 1 15 SATA - 1 15 SATA
- 1 16 SATA USB - 1 16 SATA USB
- 1 17 Main - 1 17 Main
- 1 18 SD/MMC - 1 18 SD/MMC/GOP
- 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART) - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART)
- 1 22 USB3H0 - 1 22 USB3H0
- 1 23 USB3H1 - 1 23 USB3H1
@ -65,7 +65,7 @@ Required properties:
"cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
"cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
"cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
"cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
Example: Example:
@ -78,6 +78,6 @@ Example:
gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
"cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
"cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
"cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
}; };

View File

@ -4,7 +4,6 @@ Required properties:
- compatible: value should be one of the following - compatible: value should be one of the following
"samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */ "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
"samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
"samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
"samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */

View File

@ -11,7 +11,6 @@ Required properties:
"samsung,s5pv210-fimd"; /* for S5PV210 SoC */ "samsung,s5pv210-fimd"; /* for S5PV210 SoC */
"samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
"samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
"samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
"samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */

View File

@ -13,7 +13,7 @@ Required Properties:
- "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following, - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
before RK3288 before RK3288
- "rockchip,rk3288-dw-mshc": for Rockchip RK3288 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
- "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108 - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036 - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
- "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368 - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
- "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399 - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399

View File

@ -1,39 +0,0 @@
Broadcom USB3 phy binding for northstar plus SoC
The USB3 phy is internal to the SoC and is accessed using mdio interface.
Required mdio bus properties:
- reg: Should be 0x0 for SoC internal USB3 phy
- #address-cells: must be 1
- #size-cells: must be 0
Required USB3 PHY properties:
- compatible: should be "brcm,nsp-usb3-phy"
- reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10.
- usb3-ctrl-syscon: handler of syscon node defining physical address
of usb3 control register.
- #phy-cells: must be 0
Required usb3 control properties:
- compatible: should be "brcm,nsp-usb3-ctrl"
- reg: offset and length of the control registers
Example:
mdio@0 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
usb3_phy: usb-phy@10 {
compatible = "brcm,nsp-usb3-phy";
reg = <0x10>;
usb3-ctrl-syscon = <&usb3_ctrl>;
#phy-cells = <0>;
status = "disabled";
};
};
usb3_ctrl: syscon@104408 {
compatible = "brcm,nsp-usb3-ctrl", "syscon";
reg = <0x104408 0x3fc>;
};

View File

@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg
Index 2: The output gpio for muxing of the data pins between the USB host and Index 2: The output gpio for muxing of the data pins between the USB host and
the USB peripheral controller, write 1 to mux to the peripheral the USB peripheral controller, write 1 to mux to the peripheral
controller controller
There is a mapping between indices and GPIO connection IDs as follows
id index 0
vbus index 1
mux index 2

View File

@ -18,8 +18,8 @@ because gcc versions 4.5 and 4.6 are compiled by a C compiler,
gcc-4.7 can be compiled by a C or a C++ compiler, gcc-4.7 can be compiled by a C or a C++ compiler,
and versions 4.8+ can only be compiled by a C++ compiler. and versions 4.8+ can only be compiled by a C++ compiler.
Currently the GCC plugin infrastructure supports only the x86, arm and arm64 Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
architectures. powerpc architectures.
This infrastructure was ported from grsecurity [6] and PaX [7]. This infrastructure was ported from grsecurity [6] and PaX [7].

View File

@ -3216,7 +3216,6 @@ F: drivers/platform/chrome/
CISCO VIC ETHERNET NIC DRIVER CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com> M: Christian Benvenuti <benve@cisco.com>
M: Sujith Sankar <ssujith@cisco.com>
M: Govindarajulu Varadarajan <_govind@gmx.com> M: Govindarajulu Varadarajan <_govind@gmx.com>
M: Neel Patel <neepatel@cisco.com> M: Neel Patel <neepatel@cisco.com>
S: Supported S: Supported
@ -7780,13 +7779,6 @@ F: include/net/mac80211.h
F: net/mac80211/ F: net/mac80211/
F: drivers/net/wireless/mac80211_hwsim.[ch] F: drivers/net/wireless/mac80211_hwsim.[ch]
MACVLAN DRIVER
M: Patrick McHardy <kaber@trash.net>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/macvlan.c
F: include/linux/if_macvlan.h
MAILBOX API MAILBOX API
M: Jassi Brar <jassisinghbrar@gmail.com> M: Jassi Brar <jassisinghbrar@gmail.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
@ -7859,6 +7851,8 @@ F: drivers/net/ethernet/marvell/mvneta.*
MARVELL MWIFIEX WIRELESS DRIVER MARVELL MWIFIEX WIRELESS DRIVER
M: Amitkumar Karwar <akarwar@marvell.com> M: Amitkumar Karwar <akarwar@marvell.com>
M: Nishant Sarmukadam <nishants@marvell.com> M: Nishant Sarmukadam <nishants@marvell.com>
M: Ganapathi Bhat <gbhat@marvell.com>
M: Xinming Hu <huxm@marvell.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/wireless/marvell/mwifiex/ F: drivers/net/wireless/marvell/mwifiex/
@ -13397,14 +13391,6 @@ W: https://linuxtv.org
S: Maintained S: Maintained
F: drivers/media/platform/vivid/* F: drivers/media/platform/vivid/*
VLAN (802.1Q)
M: Patrick McHardy <kaber@trash.net>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/macvlan.c
F: include/linux/if_*vlan.h
F: net/8021q/
VLYNQ BUS VLYNQ BUS
M: Florian Fainelli <f.fainelli@gmail.com> M: Florian Fainelli <f.fainelli@gmail.com>
L: openwrt-devel@lists.openwrt.org (subscribers-only) L: openwrt-devel@lists.openwrt.org (subscribers-only)

View File

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 11 PATCHLEVEL = 11
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc3 EXTRAVERSION = -rc4
NAME = Fearless Coyote NAME = Fearless Coyote
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -63,14 +63,14 @@
label = "home"; label = "home";
linux,code = <KEY_HOME>; linux,code = <KEY_HOME>;
gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>; gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
gpio-key,wakeup; wakeup-source;
}; };
button@1 { button@1 {
label = "menu"; label = "menu";
linux,code = <KEY_MENU>; linux,code = <KEY_MENU>;
gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>; gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
gpio-key,wakeup; wakeup-source;
}; };
}; };

View File

@ -315,6 +315,13 @@
/* ID & VBUS GPIOs provided in board dts */ /* ID & VBUS GPIOs provided in board dts */
}; };
}; };
tpic2810: tpic2810@60 {
compatible = "ti,tpic2810";
reg = <0x60>;
gpio-controller;
#gpio-cells = <2>;
};
}; };
&mcspi3 { &mcspi3 {
@ -330,13 +337,6 @@
spi-max-frequency = <1000000>; spi-max-frequency = <1000000>;
spi-cpol; spi-cpol;
}; };
tpic2810: tpic2810@60 {
compatible = "ti,tpic2810";
reg = <0x60>;
gpio-controller;
#gpio-cells = <2>;
};
}; };
&uart3 { &uart3 {

View File

@ -66,14 +66,14 @@
timer@20200 { timer@20200 {
compatible = "arm,cortex-a9-global-timer"; compatible = "arm,cortex-a9-global-timer";
reg = <0x20200 0x100>; reg = <0x20200 0x100>;
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>; clocks = <&periph_clk>;
}; };
local-timer@20600 { local-timer@20600 {
compatible = "arm,cortex-a9-twd-timer"; compatible = "arm,cortex-a9-twd-timer";
reg = <0x20600 0x100>; reg = <0x20600 0x100>;
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>; clocks = <&periph_clk>;
}; };

View File

@ -48,15 +48,14 @@
}; };
memory { memory {
reg = <0x00000000 0x10000000>; reg = <0x80000000 0x10000000>;
}; };
}; };
&uart0 { &uart0 {
clock-frequency = <62499840>; status = "okay";
}; };
&uart1 { &uart1 {
clock-frequency = <62499840>;
status = "okay"; status = "okay";
}; };

View File

@ -55,6 +55,7 @@
gpio-restart { gpio-restart {
compatible = "gpio-restart"; compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
open-source;
priority = <200>; priority = <200>;
}; };
}; };

View File

@ -55,6 +55,7 @@
gpio-restart { gpio-restart {
compatible = "gpio-restart"; compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
open-source;
priority = <200>; priority = <200>;
}; };
}; };

View File

@ -55,6 +55,7 @@
gpio-restart { gpio-restart {
compatible = "gpio-restart"; compatible = "gpio-restart";
gpios = <&gpioa 31 GPIO_ACTIVE_LOW>; gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
open-source;
priority = <200>; priority = <200>;
}; };
}; };

View File

@ -55,6 +55,7 @@
gpio-restart { gpio-restart {
compatible = "gpio-restart"; compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
open-source;
priority = <200>; priority = <200>;
}; };
}; };

View File

@ -55,6 +55,7 @@
gpio-restart { gpio-restart {
compatible = "gpio-restart"; compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
open-source;
priority = <200>; priority = <200>;
}; };
}; };

View File

@ -55,6 +55,7 @@
gpio-restart { gpio-restart {
compatible = "gpio-restart"; compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
open-source;
priority = <200>; priority = <200>;
}; };
}; };

View File

@ -55,6 +55,7 @@
gpio-restart { gpio-restart {
compatible = "gpio-restart"; compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
open-source;
priority = <200>; priority = <200>;
}; };
}; };

View File

@ -121,11 +121,6 @@
}; };
}; };
&cpu0 {
arm-supply = <&sw1a_reg>;
soc-supply = <&sw1c_reg>;
};
&fec1 { &fec1 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>; pinctrl-0 = <&pinctrl_enet1>;

View File

@ -266,7 +266,7 @@
}; };
usb1: ohci@00400000 { usb1: ohci@00400000 {
compatible = "atmel,sama5d2-ohci", "usb-ohci"; compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00400000 0x100000>; reg = <0x00400000 0x100000>;
interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>; interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;

View File

@ -14,6 +14,7 @@
#include <dt-bindings/mfd/dbx500-prcmu.h> #include <dt-bindings/mfd/dbx500-prcmu.h>
#include <dt-bindings/arm/ux500_pm_domains.h> #include <dt-bindings/arm/ux500_pm_domains.h>
#include <dt-bindings/gpio/gpio.h> #include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/ste-ab8500.h>
#include "skeleton.dtsi" #include "skeleton.dtsi"
/ { / {
@ -603,6 +604,11 @@
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>; #interrupt-cells = <2>;
ab8500_clock: clock-controller {
compatible = "stericsson,ab8500-clk";
#clock-cells = <1>;
};
ab8500_gpio: ab8500-gpio { ab8500_gpio: ab8500-gpio {
compatible = "stericsson,ab8500-gpio"; compatible = "stericsson,ab8500-gpio";
gpio-controller; gpio-controller;
@ -686,6 +692,8 @@
ab8500-pwm { ab8500-pwm {
compatible = "stericsson,ab8500-pwm"; compatible = "stericsson,ab8500-pwm";
clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
clock-names = "intclk";
}; };
ab8500-debugfs { ab8500-debugfs {
@ -700,6 +708,9 @@
V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>; V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
V-DMIC-supply = <&ab8500_ldo_dmic_reg>; V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
clock-names = "audioclk";
stericsson,earpeice-cmv = <950>; /* Units in mV. */ stericsson,earpeice-cmv = <950>; /* Units in mV. */
}; };
@ -1095,6 +1106,14 @@
status = "disabled"; status = "disabled";
}; };
sound {
compatible = "stericsson,snd-soc-mop500";
stericsson,cpu-dai = <&msp1 &msp3>;
stericsson,audio-codec = <&codec>;
clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
clock-names = "sysclk", "ulpclk", "intclk";
};
msp0: msp@80123000 { msp0: msp@80123000 {
compatible = "stericsson,ux500-msp-i2s"; compatible = "stericsson,ux500-msp-i2s";
reg = <0x80123000 0x1000>; reg = <0x80123000 0x1000>;

View File

@ -186,15 +186,6 @@
status = "okay"; status = "okay";
}; };
sound {
compatible = "stericsson,snd-soc-mop500";
stericsson,cpu-dai = <&msp1 &msp3>;
stericsson,audio-codec = <&codec>;
clocks = <&prcmu_clk PRCMU_SYSCLK>;
clock-names = "sysclk";
};
msp0: msp@80123000 { msp0: msp@80123000 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&msp0_default_mode>; pinctrl-0 = <&msp0_default_mode>;

View File

@ -159,15 +159,6 @@
"", "", "", "", "", "", "", ""; "", "", "", "", "", "", "", "";
}; };
sound {
compatible = "stericsson,snd-soc-mop500";
stericsson,cpu-dai = <&msp1 &msp3>;
stericsson,audio-codec = <&codec>;
clocks = <&prcmu_clk PRCMU_SYSCLK>;
clock-names = "sysclk";
};
msp0: msp@80123000 { msp0: msp@80123000 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&msp0_default_mode>; pinctrl-0 = <&msp0_default_mode>;

View File

@ -167,7 +167,7 @@
reg = <8>; reg = <8>;
label = "cpu"; label = "cpu";
ethernet = <&gmac>; ethernet = <&gmac>;
phy-mode = "rgmii"; phy-mode = "rgmii-txid";
fixed-link { fixed-link {
speed = <1000>; speed = <1000>;
full-duplex; full-duplex;

View File

@ -495,7 +495,7 @@
resets = <&ccu RST_BUS_GPU>; resets = <&ccu RST_BUS_GPU>;
assigned-clocks = <&ccu CLK_GPU>; assigned-clocks = <&ccu CLK_GPU>;
assigned-clock-rates = <408000000>; assigned-clock-rates = <384000000>;
}; };
gic: interrupt-controller@01c81000 { gic: interrupt-controller@01c81000 {

View File

@ -50,8 +50,6 @@
backlight: backlight { backlight: backlight {
compatible = "pwm-backlight"; compatible = "pwm-backlight";
pinctrl-names = "default";
pinctrl-0 = <&bl_en_pin>;
pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>; default-brightness-level = <8>;
@ -93,11 +91,6 @@
}; };
&pio { &pio {
bl_en_pin: bl_en_pin@0 {
pins = "PH6";
function = "gpio_in";
};
mmc0_cd_pin: mmc0_cd_pin@0 { mmc0_cd_pin: mmc0_cd_pin@0 {
pins = "PB4"; pins = "PB4";
function = "gpio_in"; function = "gpio_in";

View File

@ -188,6 +188,7 @@ CONFIG_WL12XX=m
CONFIG_WL18XX=m CONFIG_WL18XX=m
CONFIG_WLCORE_SPI=m CONFIG_WLCORE_SPI=m
CONFIG_WLCORE_SDIO=m CONFIG_WLCORE_SDIO=m
CONFIG_INPUT_MOUSEDEV=m
CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=m CONFIG_INPUT_EVDEV=m
CONFIG_KEYBOARD_ATKBD=m CONFIG_KEYBOARD_ATKBD=m

View File

@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
} }
static void sama5d3_ddr_standby(void)
{
u32 lpr0;
u32 saved_lpr0;
saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
cpu_do_idle();
at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
}
/* We manage both DDRAM/SDRAM controllers, we need more than one value to /* We manage both DDRAM/SDRAM controllers, we need more than one value to
* remember. * remember.
*/ */
@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
{ .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby }, { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
{ /*sentinel*/ } { /*sentinel*/ }
}; };

View File

@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
obj-y += $(onenand-m) $(onenand-y) obj-y += $(onenand-m) $(onenand-y)
nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o
obj-y += $(nand-m) $(nand-y)

View File

@ -1,154 +0,0 @@
/*
* gpmc-nand.c
*
* Copyright (C) 2009 Texas Instruments
* Vimal Singh <vimalsingh@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/omap-gpmc.h>
#include <linux/mtd/nand.h>
#include <linux/platform_data/mtd-nand-omap2.h>
#include <asm/mach/flash.h>
#include "soc.h"
/* minimum size for IO mapping */
#define NAND_IO_SIZE 4
static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
{
/* platforms which support all ECC schemes */
if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
soc_is_omap54xx() || soc_is_dra7xx())
return 1;
if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
if (cpu_is_omap24xx())
return 0;
else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
return 0;
else
return 1;
}
/* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
* which require H/W based ECC error detection */
if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
(ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
return 0;
/* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
ecc_opt == OMAP_ECC_HAM1_CODE_SW)
return 1;
else
return 0;
}
/* This function will go away once the device-tree convertion is complete */
static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
struct gpmc_settings *s)
{
/* Enable RD PIN Monitoring Reg */
if (gpmc_nand_data->dev_ready) {
s->wait_on_read = true;
s->wait_on_write = true;
}
if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
s->device_width = GPMC_DEVWIDTH_16BIT;
else
s->device_width = GPMC_DEVWIDTH_8BIT;
}
int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
struct gpmc_timings *gpmc_t)
{
int err = 0;
struct gpmc_settings s;
struct platform_device *pdev;
struct resource gpmc_nand_res[] = {
{ .flags = IORESOURCE_MEM, },
{ .flags = IORESOURCE_IRQ, },
{ .flags = IORESOURCE_IRQ, },
};
BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
(unsigned long *)&gpmc_nand_res[0].start);
if (err < 0) {
pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
gpmc_nand_data->cs, err);
return err;
}
gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
memset(&s, 0, sizeof(struct gpmc_settings));
gpmc_set_legacy(gpmc_nand_data, &s);
s.device_nand = true;
if (gpmc_t) {
err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
if (err < 0) {
pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
err);
return err;
}
}
err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
if (err < 0)
goto out_free_cs;
err = gpmc_configure(GPMC_CONFIG_WP, 0);
if (err < 0)
goto out_free_cs;
if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
err = -EINVAL;
goto out_free_cs;
}
pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
if (pdev) {
err = platform_device_add_resources(pdev, gpmc_nand_res,
ARRAY_SIZE(gpmc_nand_res));
if (!err)
pdev->dev.platform_data = gpmc_nand_data;
} else {
err = -ENOMEM;
}
if (err)
goto out_free_pdev;
err = platform_device_add(pdev);
if (err) {
dev_err(&pdev->dev, "Unable to register NAND device\n");
goto out_free_pdev;
}
return 0;
out_free_pdev:
platform_device_put(pdev);
out_free_cs:
gpmc_cs_free(gpmc_nand_data->cs);
return err;
}

View File

@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
return ret; return ret;
} }
void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
{ {
int err; int err;
struct device *dev = &gpmc_onenand_device.dev; struct device *dev = &gpmc_onenand_device.dev;
@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
if (err < 0) { if (err < 0) {
dev_err(dev, "Cannot request GPMC CS %d, error %d\n", dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
gpmc_onenand_data->cs, err); gpmc_onenand_data->cs, err);
return; return err;
} }
gpmc_onenand_resource.end = gpmc_onenand_resource.start + gpmc_onenand_resource.end = gpmc_onenand_resource.start +
ONENAND_IO_SIZE - 1; ONENAND_IO_SIZE - 1;
if (platform_device_register(&gpmc_onenand_device) < 0) { err = platform_device_register(&gpmc_onenand_device);
if (err) {
dev_err(dev, "Unable to register OneNAND device\n"); dev_err(dev, "Unable to register OneNAND device\n");
gpmc_cs_free(gpmc_onenand_data->cs); gpmc_cs_free(gpmc_onenand_data->cs);
return;
} }
return err;
} }

View File

@ -17,6 +17,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h>
#include "omap44xx.h" #include "omap44xx.h"
@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
cmp r0, r4 cmp r0, r4
bne wait_2 bne wait_2
ldr r12, =API_HYP_ENTRY ldr r12, =API_HYP_ENTRY
adr r0, hyp_boot badr r0, hyp_boot
smc #0 smc #0
hyp_boot: hyp_boot:
b omap_secondary_startup b omap_secondary_startup

View File

@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
}; };
/* L4 CORE -> SR1 interface */ /* L4 CORE -> SR1 interface */
static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
{
.pa_start = OMAP34XX_SR1_BASE,
.pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
.flags = ADDR_TYPE_RT,
},
{ },
};
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
.master = &omap3xxx_l4_core_hwmod, .master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_sr1_hwmod, .slave = &omap34xx_sr1_hwmod,
.clk = "sr_l4_ick", .clk = "sr_l4_ick",
.addr = omap3_sr1_addr_space,
.user = OCP_USER_MPU, .user = OCP_USER_MPU,
}; };
@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
.master = &omap3xxx_l4_core_hwmod, .master = &omap3xxx_l4_core_hwmod,
.slave = &omap36xx_sr1_hwmod, .slave = &omap36xx_sr1_hwmod,
.clk = "sr_l4_ick", .clk = "sr_l4_ick",
.addr = omap3_sr1_addr_space,
.user = OCP_USER_MPU, .user = OCP_USER_MPU,
}; };
/* L4 CORE -> SR1 interface */ /* L4 CORE -> SR1 interface */
static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
{
.pa_start = OMAP34XX_SR2_BASE,
.pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
.flags = ADDR_TYPE_RT,
},
{ },
};
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
.master = &omap3xxx_l4_core_hwmod, .master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_sr2_hwmod, .slave = &omap34xx_sr2_hwmod,
.clk = "sr_l4_ick", .clk = "sr_l4_ick",
.addr = omap3_sr2_addr_space,
.user = OCP_USER_MPU, .user = OCP_USER_MPU,
}; };
@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
.master = &omap3xxx_l4_core_hwmod, .master = &omap3xxx_l4_core_hwmod,
.slave = &omap36xx_sr2_hwmod, .slave = &omap36xx_sr2_hwmod,
.clk = "sr_l4_ick", .clk = "sr_l4_ick",
.addr = omap3_sr2_addr_space,
.user = OCP_USER_MPU, .user = OCP_USER_MPU,
}; };
@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
* Return: 0 if device named @dev_name is not likely to be accessible, * Return: 0 if device named @dev_name is not likely to be accessible,
* or 1 if it is likely to be accessible. * or 1 if it is likely to be accessible.
*/ */
static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
const char *dev_name) const char *dev_name)
{ {
struct device_node *node;
bool available;
if (!bus) if (!bus)
return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0; return omap_type() == OMAP2_DEVICE_TYPE_GP;
if (of_device_is_available(of_find_node_by_name(bus, dev_name))) node = of_get_child_by_name(bus, dev_name);
return 1; available = of_device_is_available(node);
of_node_put(node);
return 0; return available;
} }
int __init omap3xxx_hwmod_init(void) int __init omap3xxx_hwmod_init(void)
@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) { if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
r = omap_hwmod_register_links(h_sham); r = omap_hwmod_register_links(h_sham);
if (r < 0) if (r < 0) {
of_node_put(bus);
return r; return r;
}
} }
if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) { if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
r = omap_hwmod_register_links(h_aes); r = omap_hwmod_register_links(h_aes);
if (r < 0) if (r < 0) {
of_node_put(bus);
return r; return r;
}
} }
of_node_put(bus);
/* /*
* Register hwmod links specific to certain ES levels of a * Register hwmod links specific to certain ES levels of a

View File

@ -114,6 +114,7 @@
pcie0: pcie@20020000 { pcie0: pcie@20020000 {
compatible = "brcm,iproc-pcie"; compatible = "brcm,iproc-pcie";
reg = <0 0x20020000 0 0x1000>; reg = <0 0x20020000 0 0x1000>;
dma-coherent;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>; interrupt-map-mask = <0 0 0 0>;
@ -144,6 +145,7 @@
pcie4: pcie@50020000 { pcie4: pcie@50020000 {
compatible = "brcm,iproc-pcie"; compatible = "brcm,iproc-pcie";
reg = <0 0x50020000 0 0x1000>; reg = <0 0x50020000 0 0x1000>;
dma-coherent;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>; interrupt-map-mask = <0 0 0 0>;
@ -174,6 +176,7 @@
pcie8: pcie@60c00000 { pcie8: pcie@60c00000 {
compatible = "brcm,iproc-pcie-paxc"; compatible = "brcm,iproc-pcie-paxc";
reg = <0 0x60c00000 0 0x1000>; reg = <0 0x60c00000 0 0x1000>;
dma-coherent;
linux,pci-domain = <8>; linux,pci-domain = <8>;
bus-range = <0x0 0x1>; bus-range = <0x0 0x1>;
@ -203,6 +206,7 @@
<0x61030000 0x100>; <0x61030000 0x100>;
reg-names = "amac_base", "idm_base", "nicpm_base"; reg-names = "amac_base", "idm_base", "nicpm_base";
interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
phy-handle = <&gphy0>; phy-handle = <&gphy0>;
phy-mode = "rgmii"; phy-mode = "rgmii";
status = "disabled"; status = "disabled";
@ -213,6 +217,7 @@
reg = <0x612c0000 0x445>; /* PDC FS0 regs */ reg = <0x612c0000 0x445>; /* PDC FS0 regs */
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>; #mbox-cells = <1>;
dma-coherent;
brcm,rx-status-len = <32>; brcm,rx-status-len = <32>;
brcm,use-bcm-hdr; brcm,use-bcm-hdr;
}; };
@ -222,6 +227,7 @@
reg = <0x612e0000 0x445>; /* PDC FS1 regs */ reg = <0x612e0000 0x445>; /* PDC FS1 regs */
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>; #mbox-cells = <1>;
dma-coherent;
brcm,rx-status-len = <32>; brcm,rx-status-len = <32>;
brcm,use-bcm-hdr; brcm,use-bcm-hdr;
}; };
@ -231,6 +237,7 @@
reg = <0x61300000 0x445>; /* PDC FS2 regs */ reg = <0x61300000 0x445>; /* PDC FS2 regs */
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>; #mbox-cells = <1>;
dma-coherent;
brcm,rx-status-len = <32>; brcm,rx-status-len = <32>;
brcm,use-bcm-hdr; brcm,use-bcm-hdr;
}; };
@ -240,6 +247,7 @@
reg = <0x61320000 0x445>; /* PDC FS3 regs */ reg = <0x61320000 0x445>; /* PDC FS3 regs */
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>; #mbox-cells = <1>;
dma-coherent;
brcm,rx-status-len = <32>; brcm,rx-status-len = <32>;
brcm,use-bcm-hdr; brcm,use-bcm-hdr;
}; };
@ -644,6 +652,7 @@
sata: ahci@663f2000 { sata: ahci@663f2000 {
compatible = "brcm,iproc-ahci", "generic-ahci"; compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x663f2000 0x1000>; reg = <0x663f2000 0x1000>;
dma-coherent;
reg-names = "ahci"; reg-names = "ahci";
interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>; #address-cells = <1>;
@ -667,6 +676,7 @@
compatible = "brcm,sdhci-iproc-cygnus"; compatible = "brcm,sdhci-iproc-cygnus";
reg = <0x66420000 0x100>; reg = <0x66420000 0x100>;
interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
bus-width = <8>; bus-width = <8>;
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
status = "disabled"; status = "disabled";
@ -676,6 +686,7 @@
compatible = "brcm,sdhci-iproc-cygnus"; compatible = "brcm,sdhci-iproc-cygnus";
reg = <0x66430000 0x100>; reg = <0x66430000 0x100>;
interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
bus-width = <8>; bus-width = <8>;
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
status = "disabled"; status = "disabled";

View File

@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
#define __NR_compat_syscalls 394 #define __NR_compat_syscalls 398
#endif #endif
#define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE

View File

@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
__SYSCALL(__NR_preadv2, compat_sys_preadv2) __SYSCALL(__NR_preadv2, compat_sys_preadv2)
#define __NR_pwritev2 393 #define __NR_pwritev2 393
__SYSCALL(__NR_pwritev2, compat_sys_pwritev2) __SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
#define __NR_pkey_mprotect 394
__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
#define __NR_pkey_alloc 395
__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
#define __NR_pkey_free 396
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 397
__SYSCALL(__NR_statx, sys_statx)
/* /*
* Please add new compat syscalls above this comment and update * Please add new compat syscalls above this comment and update

View File

@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
/* /*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment * The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
* happens, increase the KASLR offset by the size of the kernel image. * happens, increase the KASLR offset by the size of the kernel image
* rounded up by SWAPPER_BLOCK_SIZE.
*/ */
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
offset = (offset + (u64)(_end - _text)) & mask; u64 kimg_sz = _end - _text;
offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
& mask;
}
if (IS_ENABLED(CONFIG_KASAN)) if (IS_ENABLED(CONFIG_KASAN))
/* /*

View File

@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
_GLOBAL(pnv_wakeup_tb_loss) _GLOBAL(pnv_wakeup_tb_loss)
ld r1,PACAR1(r13) ld r1,PACAR1(r13)
/* /*
* Before entering any idle state, the NVGPRs are saved in the stack * Before entering any idle state, the NVGPRs are saved in the stack.
* and they are restored before switching to the process context. Hence * If there was a state loss, or PACA_NAPSTATELOST was set, then the
* until they are restored, they are free to be used. * NVGPRs are restored. If we are here, it is likely that state is lost,
* but not guaranteed -- neither ISA207 nor ISA300 tests to reach
* here are the same as the test to restore NVGPRS:
* PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
* and SRR1 test for restoring NVGPRs.
*
* We are about to clobber NVGPRs now, so set NAPSTATELOST to
* guarantee they will always be restored. This might be tightened
* with careful reading of specs (particularly for ISA300) but this
* is already a slow wakeup path and it's simpler to be safe.
*/
li r0,1
stb r0,PACA_NAPSTATELOST(r13)
/*
* *
* Save SRR1 and LR in NVGPRs as they might be clobbered in * Save SRR1 and LR in NVGPRs as they might be clobbered in
* opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required

View File

@ -397,8 +397,7 @@ static void early_check_vec5(void)
void __init mmu_early_init_devtree(void) void __init mmu_early_init_devtree(void)
{ {
/* Disable radix mode based on kernel command line. */ /* Disable radix mode based on kernel command line. */
/* We don't yet have the machinery to do radix as a guest. */ if (disable_radix)
if (disable_radix || !(mfmsr() & MSR_HV))
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
/* /*

View File

@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
{ {
struct blk_mq_timeout_data *data = priv; struct blk_mq_timeout_data *data = priv;
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
/*
* If a request wasn't started before the queue was
* marked dying, kill it here or it'll go unnoticed.
*/
if (unlikely(blk_queue_dying(rq->q))) {
rq->errors = -EIO;
blk_mq_end_request(rq, rq->errors);
}
return; return;
}
if (time_after_eq(jiffies, rq->deadline)) { if (time_after_eq(jiffies, rq->deadline)) {
if (!blk_mark_rq_complete(rq)) if (!blk_mark_rq_complete(rq))

View File

@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat)
static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{ {
blk_stat_flush_batch(src);
if (!src->nr_samples) if (!src->nr_samples)
return; return;
blk_stat_flush_batch(src);
dst->min = min(dst->min, src->min); dst->min = min(dst->min, src->min);
dst->max = max(dst->max, src->max); dst->max = max(dst->max, src->max);

View File

@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
return true; return true;
if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) && if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) &&
h->oem_revision == 0) h->oem_revision == 1)
return true; return true;
return false; return false;

View File

@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = {
{ .compatible = "img,boston-lcd", .data = &boston_config }, { .compatible = "img,boston-lcd", .data = &boston_config },
{ .compatible = "mti,malta-lcd", .data = &malta_config }, { .compatible = "mti,malta-lcd", .data = &malta_config },
{ .compatible = "mti,sead3-lcd", .data = &sead3_config }, { .compatible = "mti,sead3-lcd", .data = &sead3_config },
{ /* sentinel */ }
}; };
/** /**

View File

@ -344,7 +344,8 @@ config BT_WILINK
config BT_QCOMSMD config BT_QCOMSMD
tristate "Qualcomm SMD based HCI support" tristate "Qualcomm SMD based HCI support"
depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
select BT_QCA select BT_QCA
help help
Qualcomm SMD based HCI driver. Qualcomm SMD based HCI driver.

View File

@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
struct amd768_priv { struct amd768_priv {
void __iomem *iobase; void __iomem *iobase;
struct pci_dev *pcidev; struct pci_dev *pcidev;
u32 pmbase;
}; };
static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@ -148,33 +149,58 @@ found:
if (pmbase == 0) if (pmbase == 0)
return -EIO; return -EIO;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
PMBASE_SIZE, DRV_NAME)) {
dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
pmbase + 0xF0); pmbase + 0xF0);
return -EBUSY; err = -EBUSY;
goto out;
} }
priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
PMBASE_SIZE);
if (!priv->iobase) { if (!priv->iobase) {
pr_err(DRV_NAME "Cannot map ioport\n"); pr_err(DRV_NAME "Cannot map ioport\n");
return -ENOMEM; err = -EINVAL;
goto err_iomap;
} }
amd_rng.priv = (unsigned long)priv; amd_rng.priv = (unsigned long)priv;
priv->pmbase = pmbase;
priv->pcidev = pdev; priv->pcidev = pdev;
pr_info(DRV_NAME " detected\n"); pr_info(DRV_NAME " detected\n");
return devm_hwrng_register(&pdev->dev, &amd_rng); err = hwrng_register(&amd_rng);
if (err) {
pr_err(DRV_NAME " registering failed (%d)\n", err);
goto err_hwrng;
}
return 0;
err_hwrng:
ioport_unmap(priv->iobase);
err_iomap:
release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
out:
kfree(priv);
return err;
} }
static void __exit mod_exit(void) static void __exit mod_exit(void)
{ {
struct amd768_priv *priv;
priv = (struct amd768_priv *)amd_rng.priv;
hwrng_unregister(&amd_rng);
ioport_unmap(priv->iobase);
release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
kfree(priv);
} }
module_init(mod_init); module_init(mod_init);

View File

@ -31,6 +31,9 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#define PFX KBUILD_MODNAME ": "
#define GEODE_RNG_DATA_REG 0x50 #define GEODE_RNG_DATA_REG 0x50
#define GEODE_RNG_STATUS_REG 0x54 #define GEODE_RNG_STATUS_REG 0x54
@ -82,6 +85,7 @@ static struct hwrng geode_rng = {
static int __init mod_init(void) static int __init mod_init(void)
{ {
int err = -ENODEV;
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
const struct pci_device_id *ent; const struct pci_device_id *ent;
void __iomem *mem; void __iomem *mem;
@ -89,27 +93,43 @@ static int __init mod_init(void)
for_each_pci_dev(pdev) { for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev); ent = pci_match_id(pci_tbl, pdev);
if (ent) { if (ent)
rng_base = pci_resource_start(pdev, 0); goto found;
if (rng_base == 0)
return -ENODEV;
mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
if (!mem)
return -ENOMEM;
geode_rng.priv = (unsigned long)mem;
pr_info("AMD Geode RNG detected\n");
return devm_hwrng_register(&pdev->dev, &geode_rng);
}
} }
/* Device not found. */ /* Device not found. */
return -ENODEV; goto out;
found:
rng_base = pci_resource_start(pdev, 0);
if (rng_base == 0)
goto out;
err = -ENOMEM;
mem = ioremap(rng_base, 0x58);
if (!mem)
goto out;
geode_rng.priv = (unsigned long)mem;
pr_info("AMD Geode RNG detected\n");
err = hwrng_register(&geode_rng);
if (err) {
pr_err(PFX "RNG registering failed (%d)\n",
err);
goto err_unmap;
}
out:
return err;
err_unmap:
iounmap(mem);
goto out;
} }
static void __exit mod_exit(void) static void __exit mod_exit(void)
{ {
void __iomem *mem = (void __iomem *)geode_rng.priv;
hwrng_unregister(&geode_rng);
iounmap(mem);
} }
module_init(mod_init); module_init(mod_init);

View File

@ -84,11 +84,14 @@ struct pp_struct {
struct ieee1284_info state; struct ieee1284_info state;
struct ieee1284_info saved_state; struct ieee1284_info saved_state;
long default_inactivity; long default_inactivity;
int index;
}; };
/* should we use PARDEVICE_MAX here? */ /* should we use PARDEVICE_MAX here? */
static struct device *devices[PARPORT_MAX]; static struct device *devices[PARPORT_MAX];
static DEFINE_IDA(ida_index);
/* pp_struct.flags bitfields */ /* pp_struct.flags bitfields */
#define PP_CLAIMED (1<<0) #define PP_CLAIMED (1<<0)
#define PP_EXCL (1<<1) #define PP_EXCL (1<<1)
@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp)
struct pardevice *pdev = NULL; struct pardevice *pdev = NULL;
char *name; char *name;
struct pardev_cb ppdev_cb; struct pardev_cb ppdev_cb;
int rc = 0; int rc = 0, index;
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL) if (name == NULL)
@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp)
goto err; goto err;
} }
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
memset(&ppdev_cb, 0, sizeof(ppdev_cb)); memset(&ppdev_cb, 0, sizeof(ppdev_cb));
ppdev_cb.irq_func = pp_irq; ppdev_cb.irq_func = pp_irq;
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
ppdev_cb.private = pp; ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
parport_put_port(port); parport_put_port(port);
if (!pdev) { if (!pdev) {
pr_warn("%s: failed to register device!\n", name); pr_warn("%s: failed to register device!\n", name);
rc = -ENXIO; rc = -ENXIO;
ida_simple_remove(&ida_index, index);
goto err; goto err;
} }
pp->pdev = pdev; pp->pdev = pdev;
pp->index = index;
dev_dbg(&pdev->dev, "registered pardevice\n"); dev_dbg(&pdev->dev, "registered pardevice\n");
err: err:
kfree(name); kfree(name);
@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file)
if (pp->pdev) { if (pp->pdev) {
parport_unregister_device(pp->pdev); parport_unregister_device(pp->pdev);
ida_simple_remove(&ida_index, pp->index);
pp->pdev = NULL; pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
} }

View File

@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
clk->core = hw->core; clk->core = hw->core;
clk->dev_id = dev_id; clk->dev_id = dev_id;
clk->con_id = con_id; clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
clk->max_rate = ULONG_MAX; clk->max_rate = ULONG_MAX;
clk_prepare_lock(); clk_prepare_lock();
@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk)
hlist_del(&clk->clks_node); hlist_del(&clk->clks_node);
clk_prepare_unlock(); clk_prepare_unlock();
kfree_const(clk->con_id);
kfree(clk); kfree(clk);
} }

View File

@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" }; PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" };
PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll" "usb480m" }; PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np)
return; return;
} }
/*
* Make uart_pll_clk a child of the gpll, as all other sources are
* not that usable / stable.
*/
writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10),
reg_base + RK2928_CLKSEL_CON(13));
ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__); pr_err("%s: rockchip clk init failed\n", __func__);

View File

@ -80,6 +80,7 @@ config SUN6I_A31_CCU
select SUNXI_CCU_DIV select SUNXI_CCU_DIV
select SUNXI_CCU_NK select SUNXI_CCU_NK
select SUNXI_CCU_NKM select SUNXI_CCU_NKM
select SUNXI_CCU_NKMP
select SUNXI_CCU_NM select SUNXI_CCU_NM
select SUNXI_CCU_MP select SUNXI_CCU_MP
select SUNXI_CCU_PHASE select SUNXI_CCU_PHASE

View File

@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
/* Fixed Factor clocks */ /* Fixed Factor clocks */
static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0); static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
/* We hardcode the divider to 4 for now */ /* We hardcode the divider to 4 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",

View File

@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
0x150, 0, 4, 24, 2, BIT(31), 0x150, 0, 4, 24, 2, BIT(31),
CLK_SET_RATE_PARENT); CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0); static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);

View File

@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
unsigned int m, p; unsigned int m, p;
u32 reg; u32 reg;
/* Adjust parent_rate according to pre-dividers */
ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
-1, &parent_rate);
reg = readl(cmp->common.base + cmp->common.reg); reg = readl(cmp->common.base + cmp->common.reg);
m = reg >> cmp->m.shift; m = reg >> cmp->m.shift;
@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned int m, p; unsigned int m, p;
u32 reg; u32 reg;
/* Adjust parent_rate according to pre-dividers */
ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
-1, &parent_rate);
max_m = cmp->m.max ?: 1 << cmp->m.width; max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);

View File

@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
p = reg >> nkmp->p.shift; p = reg >> nkmp->p.shift;
p &= (1 << nkmp->p.width) - 1; p &= (1 << nkmp->p.width) - 1;
return parent_rate * n * k >> p / m; return (parent_rate * n * k >> p) / m;
} }
static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,

View File

@ -1184,6 +1184,9 @@ static int cpufreq_online(unsigned int cpu)
for_each_cpu(j, policy->related_cpus) for_each_cpu(j, policy->related_cpus)
per_cpu(cpufreq_cpu_data, j) = policy; per_cpu(cpufreq_cpu_data, j) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
} else {
policy->min = policy->user_policy.min;
policy->max = policy->user_policy.max;
} }
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {

View File

@ -364,9 +364,7 @@ static bool driver_registered __read_mostly;
static bool acpi_ppc; static bool acpi_ppc;
#endif #endif
static struct perf_limits performance_limits; static struct perf_limits global;
static struct perf_limits powersave_limits;
static struct perf_limits *limits;
static void intel_pstate_init_limits(struct perf_limits *limits) static void intel_pstate_init_limits(struct perf_limits *limits)
{ {
@ -377,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits)
limits->max_sysfs_pct = 100; limits->max_sysfs_pct = 100;
} }
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
{
intel_pstate_init_limits(limits);
limits->min_perf_pct = 100;
limits->min_perf = int_ext_tofp(1);
limits->min_sysfs_pct = 100;
}
static DEFINE_MUTEX(intel_pstate_driver_lock); static DEFINE_MUTEX(intel_pstate_driver_lock);
static DEFINE_MUTEX(intel_pstate_limits_lock); static DEFINE_MUTEX(intel_pstate_limits_lock);
@ -507,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
* correct max turbo frequency based on the turbo state. * correct max turbo frequency based on the turbo state.
* Also need to convert to MHz as _PSS freq is in MHz. * Also need to convert to MHz as _PSS freq is in MHz.
*/ */
if (!limits->turbo_disabled) if (!global.turbo_disabled)
cpu->acpi_perf_data.states[0].core_frequency = cpu->acpi_perf_data.states[0].core_frequency =
policy->cpuinfo.max_freq / 1000; policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true; cpu->valid_pss_table = true;
@ -626,7 +616,7 @@ static inline void update_turbo_state(void)
cpu = all_cpu_data[0]; cpu = all_cpu_data[0];
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
limits->turbo_disabled = global.turbo_disabled =
(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
} }
@ -851,7 +841,7 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
static void intel_pstate_hwp_set(struct cpufreq_policy *policy) static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
{ {
int min, hw_min, max, hw_max, cpu; int min, hw_min, max, hw_max, cpu;
struct perf_limits *perf_limits = limits; struct perf_limits *perf_limits = &global;
u64 value, cap; u64 value, cap;
for_each_cpu(cpu, policy->cpus) { for_each_cpu(cpu, policy->cpus) {
@ -863,19 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
hw_min = HWP_LOWEST_PERF(cap); hw_min = HWP_LOWEST_PERF(cap);
if (limits->no_turbo) if (global.no_turbo)
hw_max = HWP_GUARANTEED_PERF(cap); hw_max = HWP_GUARANTEED_PERF(cap);
else else
hw_max = HWP_HIGHEST_PERF(cap); hw_max = HWP_HIGHEST_PERF(cap);
min = fp_ext_toint(hw_max * perf_limits->min_perf); max = fp_ext_toint(hw_max * perf_limits->max_perf);
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
else
min = fp_ext_toint(hw_max * perf_limits->min_perf);
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
value &= ~HWP_MIN_PERF(~0L); value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min); value |= HWP_MIN_PERF(min);
max = fp_ext_toint(hw_max * perf_limits->max_perf);
value &= ~HWP_MAX_PERF(~0L); value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max); value |= HWP_MAX_PERF(max);
@ -968,20 +961,11 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
} }
static void intel_pstate_update_policies(void) static void intel_pstate_update_policies(void)
__releases(&intel_pstate_limits_lock)
__acquires(&intel_pstate_limits_lock)
{ {
struct perf_limits *saved_limits = limits;
int cpu; int cpu;
mutex_unlock(&intel_pstate_limits_lock);
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
cpufreq_update_policy(cpu); cpufreq_update_policy(cpu);
mutex_lock(&intel_pstate_limits_lock);
limits = saved_limits;
} }
/************************** debugfs begin ************************/ /************************** debugfs begin ************************/
@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void)
static ssize_t show_##file_name \ static ssize_t show_##file_name \
(struct kobject *kobj, struct attribute *attr, char *buf) \ (struct kobject *kobj, struct attribute *attr, char *buf) \
{ \ { \
return sprintf(buf, "%u\n", limits->object); \ return sprintf(buf, "%u\n", global.object); \
} }
static ssize_t intel_pstate_show_status(char *buf); static ssize_t intel_pstate_show_status(char *buf);
@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
} }
update_turbo_state(); update_turbo_state();
if (limits->turbo_disabled) if (global.turbo_disabled)
ret = sprintf(buf, "%u\n", limits->turbo_disabled); ret = sprintf(buf, "%u\n", global.turbo_disabled);
else else
ret = sprintf(buf, "%u\n", limits->no_turbo); ret = sprintf(buf, "%u\n", global.no_turbo);
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
mutex_lock(&intel_pstate_limits_lock); mutex_lock(&intel_pstate_limits_lock);
update_turbo_state(); update_turbo_state();
if (limits->turbo_disabled) { if (global.turbo_disabled) {
pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_limits_lock);
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
return -EPERM; return -EPERM;
} }
limits->no_turbo = clamp_t(int, input, 0, 1); global.no_turbo = clamp_t(int, input, 0, 1);
intel_pstate_update_policies();
mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_limits_lock);
intel_pstate_update_policies();
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
return count; return count;
@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
mutex_lock(&intel_pstate_limits_lock); mutex_lock(&intel_pstate_limits_lock);
limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
limits->max_perf_pct = min(limits->max_policy_pct, global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
limits->max_sysfs_pct); global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
limits->max_perf_pct = max(limits->min_policy_pct, global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
limits->max_perf_pct); global.max_perf = percent_ext_fp(global.max_perf_pct);
limits->max_perf_pct = max(limits->min_perf_pct,
limits->max_perf_pct);
limits->max_perf = percent_ext_fp(limits->max_perf_pct);
intel_pstate_update_policies();
mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_limits_lock);
intel_pstate_update_policies();
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
return count; return count;
@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
mutex_lock(&intel_pstate_limits_lock); mutex_lock(&intel_pstate_limits_lock);
limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
limits->min_perf_pct = max(limits->min_policy_pct, global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
limits->min_sysfs_pct); global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
limits->min_perf_pct = min(limits->max_policy_pct, global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
limits->min_perf_pct); global.min_perf = percent_ext_fp(global.min_perf_pct);
limits->min_perf_pct = min(limits->max_perf_pct,
limits->min_perf_pct);
limits->min_perf = percent_ext_fp(limits->min_perf_pct);
intel_pstate_update_policies();
mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_limits_lock);
intel_pstate_update_policies();
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
return count; return count;
@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid; u32 vid;
val = (u64)pstate << 8; val = (u64)pstate << 8;
if (limits->no_turbo && !limits->turbo_disabled) if (global.no_turbo && !global.turbo_disabled)
val |= (u64)1 << 32; val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp( vid_fp = cpudata->vid.min + mul_fp(
@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val; u64 val;
val = (u64)pstate << 8; val = (u64)pstate << 8;
if (limits->no_turbo && !limits->turbo_disabled) if (global.no_turbo && !global.turbo_disabled)
val |= (u64)1 << 32; val |= (u64)1 << 32;
return val; return val;
@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
int max_perf = cpu->pstate.turbo_pstate; int max_perf = cpu->pstate.turbo_pstate;
int max_perf_adj; int max_perf_adj;
int min_perf; int min_perf;
struct perf_limits *perf_limits = limits; struct perf_limits *perf_limits = &global;
if (limits->no_turbo || limits->turbo_disabled) if (global.no_turbo || global.turbo_disabled)
max_perf = cpu->pstate.max_pstate; max_perf = cpu->pstate.max_pstate;
if (per_cpu_limits) if (per_cpu_limits)
@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
sample->busy_scaled = busy_frac * 100; sample->busy_scaled = busy_frac * 100;
target = limits->no_turbo || limits->turbo_disabled ? target = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
target += target >> 2; target += target >> 2;
target = mul_fp(target, busy_frac); target = mul_fp(target, busy_frac);
@ -2116,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
static int intel_pstate_set_policy(struct cpufreq_policy *policy) static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{ {
struct cpudata *cpu; struct cpudata *cpu;
struct perf_limits *perf_limits = NULL; struct perf_limits *perf_limits = &global;
if (!policy->cpuinfo.max_freq) if (!policy->cpuinfo.max_freq)
return -ENODEV; return -ENODEV;
@ -2139,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
mutex_lock(&intel_pstate_limits_lock); mutex_lock(&intel_pstate_limits_lock);
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
pr_debug("set performance\n");
if (!perf_limits) {
limits = &performance_limits;
perf_limits = limits;
}
} else {
pr_debug("set powersave\n");
if (!perf_limits) {
limits = &powersave_limits;
perf_limits = limits;
}
}
intel_pstate_update_perf_limits(policy, perf_limits); intel_pstate_update_perf_limits(policy, perf_limits);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
@ -2177,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static int intel_pstate_verify_policy(struct cpufreq_policy *policy) static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{ {
struct cpudata *cpu = all_cpu_data[policy->cpu]; struct cpudata *cpu = all_cpu_data[policy->cpu];
struct perf_limits *perf_limits;
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
perf_limits = &performance_limits;
else
perf_limits = &powersave_limits;
update_turbo_state(); update_turbo_state();
policy->cpuinfo.max_freq = perf_limits->turbo_disabled || policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ?
perf_limits->no_turbo ?
cpu->pstate.max_freq : cpu->pstate.max_freq :
cpu->pstate.turbo_freq; cpu->pstate.turbo_freq;
@ -2201,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
unsigned int max_freq, min_freq; unsigned int max_freq, min_freq;
max_freq = policy->cpuinfo.max_freq * max_freq = policy->cpuinfo.max_freq *
perf_limits->max_sysfs_pct / 100; global.max_sysfs_pct / 100;
min_freq = policy->cpuinfo.max_freq * min_freq = policy->cpuinfo.max_freq *
perf_limits->min_sysfs_pct / 100; global.min_sysfs_pct / 100;
cpufreq_verify_within_limits(policy, min_freq, max_freq); cpufreq_verify_within_limits(policy, min_freq, max_freq);
} }
@ -2255,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */ /* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
update_turbo_state(); update_turbo_state();
policy->cpuinfo.max_freq = limits->turbo_disabled ? policy->cpuinfo.max_freq = global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling; policy->cpuinfo.max_freq *= cpu->pstate.scaling;
@ -2275,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
return ret; return ret;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
policy->policy = CPUFREQ_POLICY_PERFORMANCE; policy->policy = CPUFREQ_POLICY_PERFORMANCE;
else else
policy->policy = CPUFREQ_POLICY_POWERSAVE; policy->policy = CPUFREQ_POLICY_POWERSAVE;
@ -2301,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
struct cpudata *cpu = all_cpu_data[policy->cpu]; struct cpudata *cpu = all_cpu_data[policy->cpu];
update_turbo_state(); update_turbo_state();
policy->cpuinfo.max_freq = limits->turbo_disabled ? policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq; cpu->pstate.max_freq : cpu->pstate.turbo_freq;
cpufreq_verify_within_cpu_limits(policy); cpufreq_verify_within_cpu_limits(policy);
@ -2309,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
return 0; return 0;
} }
static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
struct cpufreq_policy *policy,
unsigned int target_freq)
{
unsigned int max_freq;
update_turbo_state();
max_freq = limits->no_turbo || limits->turbo_disabled ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
policy->cpuinfo.max_freq = max_freq;
if (policy->max > max_freq)
policy->max = max_freq;
if (target_freq > max_freq)
target_freq = max_freq;
return target_freq;
}
static int intel_cpufreq_target(struct cpufreq_policy *policy, static int intel_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int target_freq,
unsigned int relation) unsigned int relation)
@ -2337,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs; struct cpufreq_freqs freqs;
int target_pstate; int target_pstate;
update_turbo_state();
freqs.old = policy->cur; freqs.old = policy->cur;
freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq); freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs); cpufreq_freq_transition_begin(policy, &freqs);
switch (relation) { switch (relation) {
@ -2370,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
struct cpudata *cpu = all_cpu_data[policy->cpu]; struct cpudata *cpu = all_cpu_data[policy->cpu];
int target_pstate; int target_pstate;
target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); update_turbo_state();
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate); target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
intel_pstate_update_pstate(cpu, target_pstate); intel_pstate_update_pstate(cpu, target_pstate);
@ -2425,13 +2364,7 @@ static int intel_pstate_register_driver(void)
{ {
int ret; int ret;
intel_pstate_init_limits(&powersave_limits); intel_pstate_init_limits(&global);
intel_pstate_set_performance_limits(&performance_limits);
if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
intel_pstate_driver == &intel_pstate)
limits = &performance_limits;
else
limits = &powersave_limits;
ret = cpufreq_register_driver(intel_pstate_driver); ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) { if (ret) {

View File

@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
int error; int error;
/*
* Return if cpu_device is not setup for this CPU.
*
* This could happen if the arch did not set up cpu_device
* since this CPU is not in cpu_present mask and the
* driver did not send a correct CPU mask during registration.
* Without this check we would end up passing bogus
* value for &cpu_dev->kobj in kobject_init_and_add()
*/
if (!cpu_dev)
return -ENODEV;
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
if (!kdev) if (!kdev)
return -ENOMEM; return -ENOMEM;

View File

@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version);
*/ */
int ccp_enqueue_cmd(struct ccp_cmd *cmd) int ccp_enqueue_cmd(struct ccp_cmd *cmd)
{ {
struct ccp_device *ccp = ccp_get_device(); struct ccp_device *ccp;
unsigned long flags; unsigned long flags;
unsigned int i; unsigned int i;
int ret; int ret;
/* Some commands might need to be sent to a specific device */
ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
if (!ccp) if (!ccp)
return -ENODEV; return -ENODEV;

View File

@ -390,6 +390,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
goto err; goto err;
ccp_cmd = &cmd->ccp_cmd; ccp_cmd = &cmd->ccp_cmd;
ccp_cmd->ccp = chan->ccp;
ccp_pt = &ccp_cmd->u.passthru_nomap; ccp_pt = &ccp_cmd->u.passthru_nomap;
ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;

View File

@ -44,7 +44,7 @@ config EXTCON_GPIO
config EXTCON_INTEL_INT3496 config EXTCON_INTEL_INT3496
tristate "Intel INT3496 ACPI device extcon driver" tristate "Intel INT3496 ACPI device extcon driver"
depends on GPIOLIB && ACPI depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
help help
Say Y here to enable extcon support for USB OTG ports controlled by Say Y here to enable extcon support for USB OTG ports controlled by
an Intel INT3496 ACPI device. an Intel INT3496 ACPI device.

View File

@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = {
EXTCON_NONE, EXTCON_NONE,
}; };
static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false };
static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false };
static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false };
static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
{ "id-gpios", &id_gpios, 1 },
{ "vbus-gpios", &vbus_gpios, 1 },
{ "mux-gpios", &mux_gpios, 1 },
{ },
};
static void int3496_do_usb_id(struct work_struct *work) static void int3496_do_usb_id(struct work_struct *work)
{ {
struct int3496_data *data = struct int3496_data *data =
@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev)
struct int3496_data *data; struct int3496_data *data;
int ret; int ret;
ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
acpi_int3496_default_gpios);
if (ret) {
dev_err(dev, "can't add GPIO ACPI mapping\n");
return ret;
}
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev)
data->dev = dev; data->dev = dev;
INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
data->gpio_usb_id = devm_gpiod_get_index(dev, "id", data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
INT3496_GPIO_USB_ID,
GPIOD_IN);
if (IS_ERR(data->gpio_usb_id)) { if (IS_ERR(data->gpio_usb_id)) {
ret = PTR_ERR(data->gpio_usb_id); ret = PTR_ERR(data->gpio_usb_id);
dev_err(dev, "can't request USB ID GPIO: %d\n", ret); dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
return ret; return ret;
} else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) {
dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n");
gpiod_direction_input(data->gpio_usb_id);
} }
data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
if (data->usb_id_irq <= 0) { if (data->usb_id_irq < 0) {
dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
return -EINVAL; return data->usb_id_irq;
} }
data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
INT3496_GPIO_VBUS_EN,
GPIOD_ASIS);
if (IS_ERR(data->gpio_vbus_en)) if (IS_ERR(data->gpio_vbus_en))
dev_info(dev, "can't request VBUS EN GPIO\n"); dev_info(dev, "can't request VBUS EN GPIO\n");
data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
INT3496_GPIO_USB_MUX,
GPIOD_ASIS);
if (IS_ERR(data->gpio_usb_mux)) if (IS_ERR(data->gpio_usb_mux))
dev_info(dev, "can't request USB MUX GPIO\n"); dev_info(dev, "can't request USB MUX GPIO\n");
@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, data->usb_id_irq, data); devm_free_irq(&pdev->dev, data->usb_id_irq, data);
cancel_delayed_work_sync(&data->work); cancel_delayed_work_sync(&data->work);
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
return 0; return 0;
} }

View File

@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
gpio->regmap = a10sr->regmap; gpio->regmap = a10sr->regmap;
gpio->gp = altr_a10sr_gc; gpio->gp = altr_a10sr_gc;
gpio->gp.parent = pdev->dev.parent;
gpio->gp.of_node = pdev->dev.of_node; gpio->gp.of_node = pdev->dev.of_node;
ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio); ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);

View File

@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d)); altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
if (type == IRQ_TYPE_NONE) if (type == IRQ_TYPE_NONE) {
irq_set_handler_locked(d, handle_bad_irq);
return 0; return 0;
if (type == IRQ_TYPE_LEVEL_HIGH && }
altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH) if (type == altera_gc->interrupt_trigger) {
if (type == IRQ_TYPE_LEVEL_HIGH)
irq_set_handler_locked(d, handle_level_irq);
else
irq_set_handler_locked(d, handle_simple_irq);
return 0; return 0;
if (type == IRQ_TYPE_EDGE_RISING && }
altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING) irq_set_handler_locked(d, handle_bad_irq);
return 0;
if (type == IRQ_TYPE_EDGE_FALLING &&
altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
return 0;
if (type == IRQ_TYPE_EDGE_BOTH &&
altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
return 0;
return -EINVAL; return -EINVAL;
} }
@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc); chained_irq_exit(chip, desc);
} }
static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
{ {
struct altera_gpio_chip *altera_gc; struct altera_gpio_chip *altera_gc;
@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->interrupt_trigger = reg; altera_gc->interrupt_trigger = reg;
ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0, ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
handle_simple_irq, IRQ_TYPE_NONE); handle_bad_irq, IRQ_TYPE_NONE);
if (ret) { if (ret) {
dev_err(&pdev->dev, "could not add irqchip\n"); dev_err(&pdev->dev, "could not add irqchip\n");

View File

@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
static irqreturn_t mcp23s08_irq(int irq, void *data) static irqreturn_t mcp23s08_irq(int irq, void *data)
{ {
struct mcp23s08 *mcp = data; struct mcp23s08 *mcp = data;
int intcap, intf, i; int intcap, intf, i, gpio, gpio_orig, intcap_mask;
unsigned int child_irq; unsigned int child_irq;
bool intf_set, intcap_changed, gpio_bit_changed,
defval_changed, gpio_set;
mutex_lock(&mcp->lock); mutex_lock(&mcp->lock);
if (mcp_read(mcp, MCP_INTF, &intf) < 0) { if (mcp_read(mcp, MCP_INTF, &intf) < 0) {
@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
} }
mcp->cache[MCP_INTCAP] = intcap; mcp->cache[MCP_INTCAP] = intcap;
/* This clears the interrupt(configurable on S18) */
if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) {
mutex_unlock(&mcp->lock);
return IRQ_HANDLED;
}
gpio_orig = mcp->cache[MCP_GPIO];
mcp->cache[MCP_GPIO] = gpio;
mutex_unlock(&mcp->lock); mutex_unlock(&mcp->lock);
if (mcp->cache[MCP_INTF] == 0) {
/* There is no interrupt pending */
return IRQ_HANDLED;
}
dev_dbg(mcp->chip.parent,
"intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
intcap, intf, gpio_orig, gpio);
for (i = 0; i < mcp->chip.ngpio; i++) { for (i = 0; i < mcp->chip.ngpio; i++) {
if ((BIT(i) & mcp->cache[MCP_INTF]) && /* We must check all of the inputs on the chip,
((BIT(i) & intcap & mcp->irq_rise) || * otherwise we may not notice a change on >=2 pins.
(mcp->irq_fall & ~intcap & BIT(i)) || *
(BIT(i) & mcp->cache[MCP_INTCON]))) { * On at least the mcp23s17, INTCAP is only updated
* one byte at a time(INTCAPA and INTCAPB are
* not written to at the same time - only on a per-bank
* basis).
*
* INTF only contains the single bit that caused the
* interrupt per-bank. On the mcp23s17, there is
* INTFA and INTFB. If two pins are changed on the A
* side at the same time, INTF will only have one bit
* set. If one pin on the A side and one pin on the B
* side are changed at the same time, INTF will have
* two bits set. Thus, INTF can't be the only check
* to see if the input has changed.
*/
intf_set = BIT(i) & mcp->cache[MCP_INTF];
if (i < 8 && intf_set)
intcap_mask = 0x00FF;
else if (i >= 8 && intf_set)
intcap_mask = 0xFF00;
else
intcap_mask = 0x00;
intcap_changed = (intcap_mask &
(BIT(i) & mcp->cache[MCP_INTCAP])) !=
(intcap_mask & (BIT(i) & gpio_orig));
gpio_set = BIT(i) & mcp->cache[MCP_GPIO];
gpio_bit_changed = (BIT(i) & gpio_orig) !=
(BIT(i) & mcp->cache[MCP_GPIO]);
defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) &&
((BIT(i) & mcp->cache[MCP_GPIO]) !=
(BIT(i) & mcp->cache[MCP_DEFVAL]));
if (((gpio_bit_changed || intcap_changed) &&
(BIT(i) & mcp->irq_rise) && gpio_set) ||
((gpio_bit_changed || intcap_changed) &&
(BIT(i) & mcp->irq_fall) && !gpio_set) ||
defval_changed) {
child_irq = irq_find_mapping(mcp->chip.irqdomain, i); child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
handle_nested_irq(child_irq); handle_nested_irq(child_irq);
} }

View File

@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file,
struct seq_file *sfile; struct seq_file *sfile;
struct gpio_desc *desc; struct gpio_desc *desc;
struct gpio_chip *gc; struct gpio_chip *gc;
int status, val; int val;
char buf; char buf;
sfile = file->private_data; sfile = file->private_data;
@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file,
chip = priv->chip; chip = priv->chip;
gc = &chip->gc; gc = &chip->gc;
status = copy_from_user(&buf, usr_buf, 1); if (copy_from_user(&buf, usr_buf, 1))
if (status) return -EFAULT;
return status;
if (buf == '0') if (buf == '0')
val = 0; val = 0;

View File

@ -42,9 +42,7 @@ struct xgene_gpio {
struct gpio_chip chip; struct gpio_chip chip;
void __iomem *base; void __iomem *base;
spinlock_t lock; spinlock_t lock;
#ifdef CONFIG_PM
u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; u32 set_dr_val[XGENE_MAX_GPIO_BANKS];
#endif
}; };
static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset) static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset)
@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
return 0; return 0;
} }
#ifdef CONFIG_PM static __maybe_unused int xgene_gpio_suspend(struct device *dev)
static int xgene_gpio_suspend(struct device *dev)
{ {
struct xgene_gpio *gpio = dev_get_drvdata(dev); struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset; unsigned long bank_offset;
@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev)
return 0; return 0;
} }
static int xgene_gpio_resume(struct device *dev) static __maybe_unused int xgene_gpio_resume(struct device *dev)
{ {
struct xgene_gpio *gpio = dev_get_drvdata(dev); struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset; unsigned long bank_offset;
@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev)
} }
static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm)
#else
#define XGENE_GPIO_PM_OPS NULL
#endif
static int xgene_gpio_probe(struct platform_device *pdev) static int xgene_gpio_probe(struct platform_device *pdev)
{ {
@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = {
.name = "xgene-gpio", .name = "xgene-gpio",
.of_match_table = xgene_gpio_of_match, .of_match_table = xgene_gpio_of_match,
.acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match), .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
.pm = XGENE_GPIO_PM_OPS, .pm = &xgene_gpio_pm,
}, },
.probe = xgene_gpio_probe, .probe = xgene_gpio_probe,
}; };

View File

@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r; int r;
if (adev->wb.wb_obj == NULL) { if (adev->wb.wb_obj == NULL) {
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->wb.wb_obj, &adev->wb.gpu_addr, &adev->wb.wb_obj, &adev->wb.gpu_addr,
(void **)&adev->wb.wb); (void **)&adev->wb.wb);
@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
memset(&adev->wb.used, 0, sizeof(adev->wb.used)); memset(&adev->wb.used, 0, sizeof(adev->wb.used));
/* clear wb memory */ /* clear wb memory */
memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE); memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
} }
return 0; return 0;

View File

@ -421,6 +421,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0, 0, 0} {0, 0, 0}

View File

@ -3465,9 +3465,13 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
max_sclk = 75000; max_sclk = 75000;
} }
} else if (adev->asic_type == CHIP_OLAND) { } else if (adev->asic_type == CHIP_OLAND) {
if ((adev->pdev->device == 0x6604) && if ((adev->pdev->revision == 0xC7) ||
(adev->pdev->subsystem_vendor == 0x1028) && (adev->pdev->revision == 0x80) ||
(adev->pdev->subsystem_device == 0x066F)) { (adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0x87) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605)) {
max_sclk = 75000; max_sclk = 75000;
} }
} }

View File

@ -1332,9 +1332,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
* to KMS, hence fail if different settings are requested. * to KMS, hence fail if different settings are requested.
*/ */
if (var->bits_per_pixel != fb->format->cpp[0] * 8 || if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
var->xres != fb->width || var->yres != fb->height || var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual != fb->width || var->yres_virtual != fb->height) { var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp different than current fb " DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel, var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual, var->xres_virtual, var->yres_virtual,

View File

@ -68,6 +68,8 @@ struct decon_context {
unsigned long flags; unsigned long flags;
unsigned long out_type; unsigned long out_type;
int first_win; int first_win;
spinlock_t vblank_lock;
u32 frame_id;
}; };
static const uint32_t decon_formats[] = { static const uint32_t decon_formats[] = {
@ -103,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
if (ctx->out_type & IFTYPE_I80) if (ctx->out_type & IFTYPE_I80)
val |= VIDINTCON0_FRAMEDONE; val |= VIDINTCON0_FRAMEDONE;
else else
val |= VIDINTCON0_INTFRMEN; val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
writel(val, ctx->addr + DECON_VIDINTCON0); writel(val, ctx->addr + DECON_VIDINTCON0);
} }
@ -122,14 +124,56 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
writel(0, ctx->addr + DECON_VIDINTCON0); writel(0, ctx->addr + DECON_VIDINTCON0);
} }
/* return number of starts/ends of frame transmissions since reset */
static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
{
u32 frm, pfrm, status, cnt = 2;
/* To get consistent result repeat read until frame id is stable.
* Usually the loop will be executed once, in rare cases when the loop
* is executed at frame change time 2nd pass will be needed.
*/
frm = readl(ctx->addr + DECON_CRFMID);
do {
status = readl(ctx->addr + DECON_VIDCON1);
pfrm = frm;
frm = readl(ctx->addr + DECON_CRFMID);
} while (frm != pfrm && --cnt);
/* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case
* of RGB, it should be taken into account.
*/
if (!frm)
return 0;
switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
case VIDCON1_VSTATUS_VS:
if (!(ctx->out_type & IFTYPE_I80))
--frm;
break;
case VIDCON1_VSTATUS_BP:
--frm;
break;
case VIDCON1_I80_ACTIVE:
case VIDCON1_VSTATUS_AC:
if (end)
--frm;
break;
default:
break;
}
return frm;
}
static void decon_setup_trigger(struct decon_context *ctx) static void decon_setup_trigger(struct decon_context *ctx)
{ {
if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
return; return;
if (!(ctx->out_type & I80_HW_TRG)) { if (!(ctx->out_type & I80_HW_TRG)) {
writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
| TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
ctx->addr + DECON_TRIGCON); ctx->addr + DECON_TRIGCON);
return; return;
} }
@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
static void decon_atomic_flush(struct exynos_drm_crtc *crtc) static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
{ {
struct decon_context *ctx = crtc->ctx; struct decon_context *ctx = crtc->ctx;
unsigned long flags;
int i; int i;
if (test_bit(BIT_SUSPENDED, &ctx->flags)) if (test_bit(BIT_SUSPENDED, &ctx->flags))
return; return;
spin_lock_irqsave(&ctx->vblank_lock, flags);
for (i = ctx->first_win; i < WINDOWS_NR; i++) for (i = ctx->first_win; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false); decon_shadow_protect_win(ctx, i, false);
@ -378,11 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
if (ctx->out_type & IFTYPE_I80) if (ctx->out_type & IFTYPE_I80)
set_bit(BIT_WIN_UPDATED, &ctx->flags); set_bit(BIT_WIN_UPDATED, &ctx->flags);
ctx->frame_id = decon_get_frame_count(ctx, true);
exynos_crtc_handle_event(crtc);
spin_unlock_irqrestore(&ctx->vblank_lock, flags);
} }
static void decon_swreset(struct decon_context *ctx) static void decon_swreset(struct decon_context *ctx)
{ {
unsigned int tries; unsigned int tries;
unsigned long flags;
writel(0, ctx->addr + DECON_VIDCON0); writel(0, ctx->addr + DECON_VIDCON0);
for (tries = 2000; tries; --tries) { for (tries = 2000; tries; --tries) {
@ -400,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx)
WARN(tries == 0, "failed to software reset DECON\n"); WARN(tries == 0, "failed to software reset DECON\n");
spin_lock_irqsave(&ctx->vblank_lock, flags);
ctx->frame_id = 0;
spin_unlock_irqrestore(&ctx->vblank_lock, flags);
if (!(ctx->out_type & IFTYPE_HDMI)) if (!(ctx->out_type & IFTYPE_HDMI))
return; return;
@ -578,6 +636,24 @@ static const struct component_ops decon_component_ops = {
.unbind = decon_unbind, .unbind = decon_unbind,
}; };
static void decon_handle_vblank(struct decon_context *ctx)
{
u32 frm;
spin_lock(&ctx->vblank_lock);
frm = decon_get_frame_count(ctx, true);
if (frm != ctx->frame_id) {
/* handle only if incremented, take care of wrap-around */
if ((s32)(frm - ctx->frame_id) > 0)
drm_crtc_handle_vblank(&ctx->crtc->base);
ctx->frame_id = frm;
}
spin_unlock(&ctx->vblank_lock);
}
static irqreturn_t decon_irq_handler(int irq, void *dev_id) static irqreturn_t decon_irq_handler(int irq, void *dev_id)
{ {
struct decon_context *ctx = dev_id; struct decon_context *ctx = dev_id;
@ -598,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
(VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
return IRQ_HANDLED; return IRQ_HANDLED;
} }
drm_crtc_handle_vblank(&ctx->crtc->base); decon_handle_vblank(ctx);
} }
out: out:
@ -671,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
__set_bit(BIT_SUSPENDED, &ctx->flags); __set_bit(BIT_SUSPENDED, &ctx->flags);
ctx->dev = dev; ctx->dev = dev;
ctx->out_type = (unsigned long)of_device_get_match_data(dev); ctx->out_type = (unsigned long)of_device_get_match_data(dev);
spin_lock_init(&ctx->vblank_lock);
if (ctx->out_type & IFTYPE_HDMI) { if (ctx->out_type & IFTYPE_HDMI) {
ctx->first_win = 1; ctx->first_win = 1;
@ -678,7 +755,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->out_type |= IFTYPE_I80; ctx->out_type |= IFTYPE_I80;
} }
if (ctx->out_type | I80_HW_TRG) { if (ctx->out_type & I80_HW_TRG) {
ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,disp-sysreg"); "samsung,disp-sysreg");
if (IS_ERR(ctx->sysreg)) { if (IS_ERR(ctx->sysreg)) {

View File

@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
for (i = 0; i < WINDOWS_NR; i++) for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false); decon_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
} }
static void decon_init(struct decon_context *ctx) static void decon_init(struct decon_context *ctx)

View File

@ -85,24 +85,9 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state) struct drm_crtc_state *old_crtc_state)
{ {
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_pending_vblank_event *event;
unsigned long flags;
if (exynos_crtc->ops->atomic_flush) if (exynos_crtc->ops->atomic_flush)
exynos_crtc->ops->atomic_flush(exynos_crtc); exynos_crtc->ops->atomic_flush(exynos_crtc);
event = crtc->state->event;
if (event) {
crtc->state->event = NULL;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
} }
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@ -114,6 +99,24 @@ static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.atomic_flush = exynos_crtc_atomic_flush, .atomic_flush = exynos_crtc_atomic_flush,
}; };
void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
{
struct drm_crtc *crtc = &exynos_crtc->base;
struct drm_pending_vblank_event *event = crtc->state->event;
unsigned long flags;
if (event) {
crtc->state->event = NULL;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
}
static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
{ {
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);

View File

@ -38,4 +38,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
*/ */
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc);
#endif #endif

View File

@ -86,7 +86,7 @@
#define DSIM_SYNC_INFORM (1 << 27) #define DSIM_SYNC_INFORM (1 << 27)
#define DSIM_EOT_DISABLE (1 << 28) #define DSIM_EOT_DISABLE (1 << 28)
#define DSIM_MFLUSH_VS (1 << 29) #define DSIM_MFLUSH_VS (1 << 29)
/* This flag is valid only for exynos3250/3472/4415/5260/5430 */ /* This flag is valid only for exynos3250/3472/5260/5430 */
#define DSIM_CLKLANE_STOP (1 << 30) #define DSIM_CLKLANE_STOP (1 << 30)
/* DSIM_ESCMODE */ /* DSIM_ESCMODE */
@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
.reg_values = reg_values, .reg_values = reg_values,
}; };
static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x58,
.has_clklane_stop = 1,
.num_clks = 2,
.max_freq = 1000,
.wait_for_reset = 1,
.num_bits_resol = 11,
.reg_values = reg_values,
};
static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs, .reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x58, .plltmr_reg = 0x58,
@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = {
.data = &exynos3_dsi_driver_data }, .data = &exynos3_dsi_driver_data },
{ .compatible = "samsung,exynos4210-mipi-dsi", { .compatible = "samsung,exynos4210-mipi-dsi",
.data = &exynos4_dsi_driver_data }, .data = &exynos4_dsi_driver_data },
{ .compatible = "samsung,exynos4415-mipi-dsi",
.data = &exynos4415_dsi_driver_data },
{ .compatible = "samsung,exynos5410-mipi-dsi", { .compatible = "samsung,exynos5410-mipi-dsi",
.data = &exynos5_dsi_driver_data }, .data = &exynos5_dsi_driver_data },
{ .compatible = "samsung,exynos5422-mipi-dsi", { .compatible = "samsung,exynos5422-mipi-dsi",
@ -979,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
bool first = !xfer->tx_done; bool first = !xfer->tx_done;
u32 reg; u32 reg;
dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
if (length > DSI_TX_FIFO_SIZE) if (length > DSI_TX_FIFO_SIZE)
@ -1177,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
spin_unlock_irqrestore(&dsi->transfer_lock, flags); spin_unlock_irqrestore(&dsi->transfer_lock, flags);
dev_dbg(dsi->dev, dev_dbg(dsi->dev,
"> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
xfer->rx_done); xfer->rx_done);
@ -1348,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
int te_gpio_irq; int te_gpio_irq;
dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
if (dsi->te_gpio == -ENOENT)
return 0;
if (!gpio_is_valid(dsi->te_gpio)) { if (!gpio_is_valid(dsi->te_gpio)) {
dev_err(dsi->dev, "no te-gpios specified\n");
ret = dsi->te_gpio; ret = dsi->te_gpio;
dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret);
goto out; goto out;
} }

View File

@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev)
goto err_put_clk; goto err_put_clk;
} }
DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
spin_lock_init(&ctx->lock); spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx); platform_set_drvdata(pdev, ctx);

View File

@ -71,10 +71,10 @@
#define TRIGCON 0x1A4 #define TRIGCON 0x1A4
#define TRGMODE_ENABLE (1 << 0) #define TRGMODE_ENABLE (1 << 0)
#define SWTRGCMD_ENABLE (1 << 1) #define SWTRGCMD_ENABLE (1 << 1)
/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */ /* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */
#define HWTRGEN_ENABLE (1 << 3) #define HWTRGEN_ENABLE (1 << 3)
#define HWTRGMASK_ENABLE (1 << 4) #define HWTRGMASK_ENABLE (1 << 4)
/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */ /* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */
#define HWTRIGEN_PER_ENABLE (1 << 31) #define HWTRIGEN_PER_ENABLE (1 << 31)
/* display mode change control register except exynos4 */ /* display mode change control register except exynos4 */
@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
.has_vtsel = 1, .has_vtsel = 1,
}; };
static struct fimd_driver_data exynos4415_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x210,
.lcdblk_vt_shift = 10,
.lcdblk_bypass_shift = 1,
.trg_type = I80_HW_TRG,
.has_shadowcon = 1,
.has_vidoutcon = 1,
.has_vtsel = 1,
.has_trigger_per_te = 1,
};
static struct fimd_driver_data exynos5_fimd_driver_data = { static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000, .timing_base = 0x20000,
.lcdblk_offset = 0x214, .lcdblk_offset = 0x214,
@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos3_fimd_driver_data }, .data = &exynos3_fimd_driver_data },
{ .compatible = "samsung,exynos4210-fimd", { .compatible = "samsung,exynos4210-fimd",
.data = &exynos4_fimd_driver_data }, .data = &exynos4_fimd_driver_data },
{ .compatible = "samsung,exynos4415-fimd",
.data = &exynos4415_fimd_driver_data },
{ .compatible = "samsung,exynos5250-fimd", { .compatible = "samsung,exynos5250-fimd",
.data = &exynos5_fimd_driver_data }, .data = &exynos5_fimd_driver_data },
{ .compatible = "samsung,exynos5420-fimd", { .compatible = "samsung,exynos5420-fimd",
@ -257,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
val |= VIDINTCON0_INT_FRAME; val |= VIDINTCON0_INT_FRAME;
val &= ~VIDINTCON0_FRAMESEL0_MASK; val &= ~VIDINTCON0_FRAMESEL0_MASK;
val |= VIDINTCON0_FRAMESEL0_VSYNC; val |= VIDINTCON0_FRAMESEL0_FRONTPORCH;
val &= ~VIDINTCON0_FRAMESEL1_MASK; val &= ~VIDINTCON0_FRAMESEL1_MASK;
val |= VIDINTCON0_FRAMESEL1_NONE; val |= VIDINTCON0_FRAMESEL1_NONE;
} }
@ -723,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc)
for (i = 0; i < WINDOWS_NR; i++) for (i = 0; i < WINDOWS_NR; i++)
fimd_shadow_protect_win(ctx, i, false); fimd_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
} }
static void fimd_update_plane(struct exynos_drm_crtc *crtc, static void fimd_update_plane(struct exynos_drm_crtc *crtc,

View File

@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
DRM_DEBUG_KMS("created file object = %p\n", obj->filp); DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
return exynos_gem; return exynos_gem;
} }

View File

@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
return ret; return ret;
} }
DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
mutex_init(&ctx->lock); mutex_init(&ctx->lock);
platform_set_drvdata(pdev, ctx); platform_set_drvdata(pdev, ctx);

View File

@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
* e.g PAUSE state, queue buf, command control. * e.g PAUSE state, queue buf, command control.
*/ */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv); DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv);
mutex_lock(&ippdrv->cmd_lock); mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry(c_node, &ippdrv->cmd_list, list) { list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
} }
property->prop_id = ret; property->prop_id = ret;
DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n", DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n",
property->prop_id, property->cmd, ippdrv); property->prop_id, property->cmd, ippdrv);
/* stored property information and ippdrv in private data */ /* stored property information and ippdrv in private data */
@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
{ {
int i; int i;
DRM_DEBUG_KMS("node[%p]\n", m_node); DRM_DEBUG_KMS("node[%pK]\n", m_node);
if (!m_node) { if (!m_node) {
DRM_ERROR("invalid dequeue node.\n"); DRM_ERROR("invalid dequeue node.\n");
@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
m_node->buf_id = qbuf->buf_id; m_node->buf_id = qbuf->buf_id;
INIT_LIST_HEAD(&m_node->list); INIT_LIST_HEAD(&m_node->list);
DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id); DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id);
DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
for_each_ipp_planar(i) { for_each_ipp_planar(i) {
@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
mutex_lock(&c_node->event_lock); mutex_lock(&c_node->event_lock);
list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e); DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e);
/* /*
* qbuf == NULL condition means all event deletion. * qbuf == NULL condition means all event deletion.
@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node
/* find memory node from memory list */ /* find memory node from memory list */
list_for_each_entry(m_node, head, list) { list_for_each_entry(m_node, head, list) {
DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node); DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node);
/* compare buffer id */ /* compare buffer id */
if (m_node->buf_id == qbuf->buf_id) if (m_node->buf_id == qbuf->buf_id)
@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
struct exynos_drm_ipp_ops *ops = NULL; struct exynos_drm_ipp_ops *ops = NULL;
int ret = 0; int ret = 0;
DRM_DEBUG_KMS("node[%p]\n", m_node); DRM_DEBUG_KMS("node[%pK]\n", m_node);
if (!m_node) { if (!m_node) {
DRM_ERROR("invalid queue node.\n"); DRM_ERROR("invalid queue node.\n");
@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head, m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list); struct drm_exynos_ipp_mem_node, list);
DRM_DEBUG_KMS("m_node[%p]\n", m_node); DRM_DEBUG_KMS("m_node[%pK]\n", m_node);
ret = ipp_set_mem_node(ippdrv, c_node, m_node); ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) { if (ret) {
@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
} }
ippdrv->prop_list.ipp_id = ret; ippdrv->prop_list.ipp_id = ret;
DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n", DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n",
count++, ippdrv, ret); count++, ippdrv, ret);
/* store parent device for node */ /* store parent device for node */
@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
file_priv->ipp_dev = dev; file_priv->ipp_dev = dev;
DRM_DEBUG_KMS("done priv[%p]\n", dev); DRM_DEBUG_KMS("done priv[%pK]\n", dev);
return 0; return 0;
} }
@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
mutex_lock(&ippdrv->cmd_lock); mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry_safe(c_node, tc_node, list_for_each_entry_safe(c_node, tc_node,
&ippdrv->cmd_list, list) { &ippdrv->cmd_list, list) {
DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n",
count++, ippdrv); count++, ippdrv);
if (c_node->filp == file) { if (c_node->filp == file) {

View File

@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev)
goto err_ippdrv_register; goto err_ippdrv_register;
} }
DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv); DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
platform_set_drvdata(pdev, rot); platform_set_drvdata(pdev, rot);

View File

@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.enable_vblank = vidi_enable_vblank, .enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank, .disable_vblank = vidi_disable_vblank,
.update_plane = vidi_update_plane, .update_plane = vidi_update_plane,
.atomic_flush = exynos_crtc_handle_event,
}; };
static void vidi_fake_vblank_timer(unsigned long arg) static void vidi_fake_vblank_timer(unsigned long arg)

View File

@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
return; return;
mixer_vsync_set_update(mixer_ctx, true); mixer_vsync_set_update(mixer_ctx, true);
exynos_crtc_handle_event(crtc);
} }
static void mixer_enable(struct exynos_drm_crtc *crtc) static void mixer_enable(struct exynos_drm_crtc *crtc)

View File

@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
const char *item; const char *item;
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
gvt_err("Invalid vGPU creation params\n"); gvt_vgpu_err("Invalid vGPU creation params\n");
return -EINVAL; return -EINVAL;
} }
@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
return 0; return 0;
no_enough_resource: no_enough_resource:
gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); gvt_vgpu_err("fail to allocate resource %s\n", item);
gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken)); BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC; return -ENOSPC;
} }

View File

@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
return ret; return ret;
} }
static inline bool is_force_nonpriv_mmio(unsigned int offset)
{
return (offset >= 0x24d0 && offset < 0x2500);
}
static int force_nonpriv_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index)
{
struct intel_gvt *gvt = s->vgpu->gvt;
unsigned int data = cmd_val(s, index + 1);
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
offset, data);
return -EINVAL;
}
return 0;
}
static int cmd_reg_handler(struct parser_exec_state *s, static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd) unsigned int offset, unsigned int index, char *cmd)
{ {
@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
if (offset + 4 > gvt->device_info.mmio_size) { if (offset + 4 > gvt->device_info.mmio_size) {
gvt_err("%s access to (%x) outside of MMIO range\n", gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
cmd, offset); cmd, offset);
return -EINVAL; return -EINVAL;
} }
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
gvt_err("vgpu%d: %s access to non-render register (%x)\n", gvt_vgpu_err("%s access to non-render register (%x)\n",
s->vgpu->id, cmd, offset); cmd, offset);
return 0; return 0;
} }
if (is_shadowed_mmio(offset)) { if (is_shadowed_mmio(offset)) {
gvt_err("vgpu%d: found access of shadowed MMIO %x\n", gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
s->vgpu->id, offset);
return 0; return 0;
} }
if (is_force_nonpriv_mmio(offset) &&
force_nonpriv_reg_handler(s, offset, index))
return -EINVAL;
if (offset == i915_mmio_reg_offset(DERRMR) || if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
else if (post_sync == 1) { else if (post_sync == 1) {
/* check ggtt*/ /* check ggtt*/
if ((cmd_val(s, 2) & (1 << 2))) { if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
gma = cmd_val(s, 2) & GENMASK(31, 3); gma = cmd_val(s, 2) & GENMASK(31, 3);
if (gmadr_bytes == 8) if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, 3)) << 32; gma |= (cmd_gma_hi(s, 3)) << 32;
@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info) struct mi_display_flip_command_info *info)
{ {
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0); u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1); u32 dword1 = cmd_val(s, 1);
u32 dword2 = cmd_val(s, 2); u32 dword2 = cmd_val(s, 2);
@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
break; break;
default: default:
gvt_err("unknown plane code %d\n", plane); gvt_vgpu_err("unknown plane code %d\n", plane);
return -EINVAL; return -EINVAL;
} }
@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
static int cmd_handler_mi_display_flip(struct parser_exec_state *s) static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
{ {
struct mi_display_flip_command_info info; struct mi_display_flip_command_info info;
struct intel_vgpu *vgpu = s->vgpu;
int ret; int ret;
int i; int i;
int len = cmd_length(s); int len = cmd_length(s);
ret = decode_mi_display_flip(s, &info); ret = decode_mi_display_flip(s, &info);
if (ret) { if (ret) {
gvt_err("fail to decode MI display flip command\n"); gvt_vgpu_err("fail to decode MI display flip command\n");
return ret; return ret;
} }
ret = check_mi_display_flip(s, &info); ret = check_mi_display_flip(s, &info);
if (ret) { if (ret) {
gvt_err("invalid MI display flip command\n"); gvt_vgpu_err("invalid MI display flip command\n");
return ret; return ret;
} }
ret = update_plane_mmio_from_mi_display_flip(s, &info); ret = update_plane_mmio_from_mi_display_flip(s, &info);
if (ret) { if (ret) {
gvt_err("fail to update plane mmio\n"); gvt_vgpu_err("fail to update plane mmio\n");
return ret; return ret;
} }
@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
int ret; int ret;
if (op_size > max_surface_size) { if (op_size > max_surface_size) {
gvt_err("command address audit fail name %s\n", s->info->name); gvt_vgpu_err("command address audit fail name %s\n",
s->info->name);
return -EINVAL; return -EINVAL;
} }
@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
} }
return 0; return 0;
err: err:
gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
s->info->name, guest_gma, op_size); s->info->name, guest_gma, op_size);
pr_err("cmd dump: "); pr_err("cmd dump: ");
@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
static inline int unexpected_cmd(struct parser_exec_state *s) static inline int unexpected_cmd(struct parser_exec_state *s)
{ {
gvt_err("vgpu%d: Unexpected %s in command buffer!\n", struct intel_vgpu *vgpu = s->vgpu;
s->vgpu->id, s->info->name);
gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
return -EINVAL; return -EINVAL;
} }
@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
while (gma != end_gma) { while (gma != end_gma) {
gpa = intel_vgpu_gma_to_gpa(mm, gma); gpa = intel_vgpu_gma_to_gpa(mm, gma);
if (gpa == INTEL_GVT_INVALID_ADDR) { if (gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid gma address: %lx\n", gma); gvt_vgpu_err("invalid gma address: %lx\n", gma);
return -EFAULT; return -EFAULT;
} }
@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
uint32_t bb_size = 0; uint32_t bb_size = 0;
uint32_t cmd_len = 0; uint32_t cmd_len = 0;
bool met_bb_end = false; bool met_bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd; u32 cmd;
/* get the start gm address of the batch buffer */ /* get the start gm address of the batch buffer */
@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) { if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id)); cmd, get_opcode(cmd, s->ring_id));
return -EINVAL; return -EINVAL;
} }
@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
gma, gma + 4, &cmd); gma, gma + 4, &cmd);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) { if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id)); cmd, get_opcode(cmd, s->ring_id));
return -EINVAL; return -EINVAL;
} }
@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
static int perform_bb_shadow(struct parser_exec_state *s) static int perform_bb_shadow(struct parser_exec_state *s)
{ {
struct intel_shadow_bb_entry *entry_obj; struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0; unsigned long gma = 0;
uint32_t bb_size; uint32_t bb_size;
void *dst = NULL; void *dst = NULL;
@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
if (ret) { if (ret) {
gvt_err("failed to set shadow batch to CPU\n"); gvt_vgpu_err("failed to set shadow batch to CPU\n");
goto unmap_src; goto unmap_src;
} }
@ -1644,8 +1673,8 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size, gma, gma + bb_size,
dst); dst);
if (ret < 0) { if (ret) {
gvt_err("fail to copy guest ring buffer\n"); gvt_vgpu_err("fail to copy guest ring buffer\n");
goto unmap_src; goto unmap_src;
} }
@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
{ {
bool second_level; bool second_level;
int ret = 0; int ret = 0;
struct intel_vgpu *vgpu = s->vgpu;
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
return -EINVAL; return -EINVAL;
} }
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
return -EINVAL; return -EINVAL;
} }
@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (batch_buffer_needs_scan(s)) { if (batch_buffer_needs_scan(s)) {
ret = perform_bb_shadow(s); ret = perform_bb_shadow(s);
if (ret < 0) if (ret < 0)
gvt_err("invalid shadow batch buffer\n"); gvt_vgpu_err("invalid shadow batch buffer\n");
} else { } else {
/* emulate a batch buffer end to do return right */ /* emulate a batch buffer end to do return right */
ret = cmd_handler_mi_batch_buffer_end(s); ret = cmd_handler_mi_batch_buffer_end(s);
@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
int ret = 0; int ret = 0;
cycles_t t0, t1, t2; cycles_t t0, t1, t2;
struct parser_exec_state s_before_advance_custom; struct parser_exec_state s_before_advance_custom;
struct intel_vgpu *vgpu = s->vgpu;
t0 = get_cycles(); t0 = get_cycles();
@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) { if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id)); cmd, get_opcode(cmd, s->ring_id));
return -EINVAL; return -EINVAL;
} }
@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (info->handler) { if (info->handler) {
ret = info->handler(s); ret = info->handler(s);
if (ret < 0) { if (ret < 0) {
gvt_err("%s handler error\n", info->name); gvt_vgpu_err("%s handler error\n", info->name);
return ret; return ret;
} }
} }
@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
ret = cmd_advance_default(s); ret = cmd_advance_default(s);
if (ret) { if (ret) {
gvt_err("%s IP advance error\n", info->name); gvt_vgpu_err("%s IP advance error\n", info->name);
return ret; return ret;
} }
} }
@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
unsigned long gma_head, gma_tail, gma_bottom; unsigned long gma_head, gma_tail, gma_bottom;
int ret = 0; int ret = 0;
struct intel_vgpu *vgpu = s->vgpu;
gma_head = rb_start + rb_head; gma_head = rb_start + rb_head;
gma_tail = rb_start + rb_tail; gma_tail = rb_start + rb_tail;
@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
if (s->buf_type == RING_BUFFER_INSTRUCTION) { if (s->buf_type == RING_BUFFER_INSTRUCTION) {
if (!(s->ip_gma >= rb_start) || if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) { !(s->ip_gma < gma_bottom)) {
gvt_err("ip_gma %lx out of ring scope." gvt_vgpu_err("ip_gma %lx out of ring scope."
"(base:0x%lx, bottom: 0x%lx)\n", "(base:0x%lx, bottom: 0x%lx)\n",
s->ip_gma, rb_start, s->ip_gma, rb_start,
gma_bottom); gma_bottom);
@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
return -EINVAL; return -EINVAL;
} }
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
gvt_err("ip_gma %lx out of range." gvt_vgpu_err("ip_gma %lx out of range."
"base 0x%lx head 0x%lx tail 0x%lx\n", "base 0x%lx head 0x%lx tail 0x%lx\n",
s->ip_gma, rb_start, s->ip_gma, rb_start,
rb_head, rb_tail); rb_head, rb_tail);
@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
} }
ret = cmd_parser_exec(s); ret = cmd_parser_exec(s);
if (ret) { if (ret) {
gvt_err("cmd parser error\n"); gvt_vgpu_err("cmd parser error\n");
parser_exec_state_dump(s); parser_exec_state_dump(s);
break; break;
} }
@ -2634,8 +2666,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
if (gma_head > gma_tail) { if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
gma_head, gma_top, cs); gma_head, gma_top, cs);
if (ret < 0) { if (ret) {
gvt_err("fail to copy guest ring buffer\n"); gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret; return ret;
} }
cs += ret / sizeof(u32); cs += ret / sizeof(u32);
@ -2644,8 +2676,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
/* copy head or start <-> tail */ /* copy head or start <-> tail */
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs); ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
if (ret < 0) { if (ret) {
gvt_err("fail to copy guest ring buffer\n"); gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret; return ret;
} }
cs += ret / sizeof(u32); cs += ret / sizeof(u32);
@ -2656,16 +2688,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{ {
int ret; int ret;
struct intel_vgpu *vgpu = workload->vgpu;
ret = shadow_workload_ring_buffer(workload); ret = shadow_workload_ring_buffer(workload);
if (ret) { if (ret) {
gvt_err("fail to shadow workload ring_buffer\n"); gvt_vgpu_err("fail to shadow workload ring_buffer\n");
return ret; return ret;
} }
ret = scan_workload(workload); ret = scan_workload(workload);
if (ret) { if (ret) {
gvt_err("scan workload error\n"); gvt_vgpu_err("scan workload error\n");
return ret; return ret;
} }
return 0; return 0;
@ -2675,6 +2708,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
int ctx_size = wa_ctx->indirect_ctx.size; int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret = 0; int ret = 0;
void *map; void *map;
@ -2688,14 +2722,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
/* get the va of the shadow batch buffer */ /* get the va of the shadow batch buffer */
map = i915_gem_object_pin_map(obj, I915_MAP_WB); map = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(map)) { if (IS_ERR(map)) {
gvt_err("failed to vmap shadow indirect ctx\n"); gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
ret = PTR_ERR(map); ret = PTR_ERR(map);
goto put_obj; goto put_obj;
} }
ret = i915_gem_object_set_to_cpu_domain(obj, false); ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret) { if (ret) {
gvt_err("failed to set shadow indirect ctx to CPU\n"); gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src; goto unmap_src;
} }
@ -2703,8 +2737,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->workload->vgpu->gtt.ggtt_mm, wa_ctx->workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size, guest_gma, guest_gma + ctx_size,
map); map);
if (ret < 0) { if (ret) {
gvt_err("fail to copy guest indirect ctx\n"); gvt_vgpu_err("fail to copy guest indirect ctx\n");
goto unmap_src; goto unmap_src;
} }
@ -2738,13 +2772,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
int ret; int ret;
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
if (wa_ctx->indirect_ctx.size == 0) if (wa_ctx->indirect_ctx.size == 0)
return 0; return 0;
ret = shadow_indirect_ctx(wa_ctx); ret = shadow_indirect_ctx(wa_ctx);
if (ret) { if (ret) {
gvt_err("fail to shadow indirect ctx\n"); gvt_vgpu_err("fail to shadow indirect ctx\n");
return ret; return ret;
} }
@ -2752,7 +2787,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
ret = scan_wa_ctx(wa_ctx); ret = scan_wa_ctx(wa_ctx);
if (ret) { if (ret) {
gvt_err("scan wa ctx error\n"); gvt_vgpu_err("scan wa ctx error\n");
return ret; return ret;
} }

View File

@ -27,6 +27,14 @@
#define gvt_err(fmt, args...) \ #define gvt_err(fmt, args...) \
DRM_ERROR("gvt: "fmt, ##args) DRM_ERROR("gvt: "fmt, ##args)
#define gvt_vgpu_err(fmt, args...) \
do { \
if (IS_ERR_OR_NULL(vgpu)) \
DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
else \
DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
} while (0)
#define gvt_dbg_core(fmt, args...) \ #define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)

View File

@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
unsigned char chr = 0; unsigned char chr = 0;
if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
gvt_err("Driver tries to read EDID without proper sequence!\n"); gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
return 0; return 0;
} }
if (edid->current_edid_read >= EDID_SIZE) { if (edid->current_edid_read >= EDID_SIZE) {
gvt_err("edid_get_byte() exceeds the size of EDID!\n"); gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
return 0; return 0;
} }
if (!edid->edid_available) { if (!edid->edid_available) {
gvt_err("Reading EDID but EDID is not available!\n"); gvt_vgpu_err("Reading EDID but EDID is not available!\n");
return 0; return 0;
} }
@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
chr = edid_data->edid_block[edid->current_edid_read]; chr = edid_data->edid_block[edid->current_edid_read];
edid->current_edid_read++; edid->current_edid_read++;
} else { } else {
gvt_err("No EDID available during the reading?\n"); gvt_vgpu_err("No EDID available during the reading?\n");
} }
return chr; return chr;
} }
@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break; break;
default: default:
gvt_err("Unknown/reserved GMBUS cycle detected!\n"); gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
break; break;
} }
/* /*
@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
*/ */
} else { } else {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
vgpu->id);
} }
return 0; return 0;
} }

View File

@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
struct intel_vgpu_execlist *execlist, struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx) struct execlist_ctx_descriptor_format *ctx)
{ {
struct intel_vgpu *vgpu = execlist->vgpu;
struct intel_vgpu_execlist_slot *running = execlist->running_slot; struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
gvt_dbg_el("schedule out context id %x\n", ctx->context_id); gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
if (WARN_ON(!same_context(ctx, execlist->running_context))) { if (WARN_ON(!same_context(ctx, execlist->running_context))) {
gvt_err("schedule out context is not running context," gvt_vgpu_err("schedule out context is not running context,"
"ctx id %x running ctx id %x\n", "ctx id %x running ctx id %x\n",
ctx->context_id, ctx->context_id,
execlist->running_context->context_id); execlist->running_context->context_id);
@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
status.udw = vgpu_vreg(vgpu, status_reg + 4); status.udw = vgpu_vreg(vgpu, status_reg + 4);
if (status.execlist_queue_full) { if (status.execlist_queue_full) {
gvt_err("virtual execlist slots are full\n"); gvt_vgpu_err("virtual execlist slots are full\n");
return NULL; return NULL;
} }
@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx0, *ctx1; struct execlist_ctx_descriptor_format *ctx0, *ctx1;
struct execlist_context_status_format status; struct execlist_context_status_format status;
struct intel_vgpu *vgpu = execlist->vgpu;
gvt_dbg_el("emulate schedule-in\n"); gvt_dbg_el("emulate schedule-in\n");
if (!slot) { if (!slot) {
gvt_err("no available execlist slot\n"); gvt_vgpu_err("no available execlist slot\n");
return -EINVAL; return -EINVAL;
} }
@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
gvt_err("Cannot pin\n");
return; return;
} }
@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0); 0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
gvt_err("Cannot pin indirect ctx obj\n");
return; return;
} }
@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
{ {
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm; struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level; int page_table_level;
u32 pdp[8]; u32 pdp[8];
@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */ } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4; page_table_level = 4;
} else { } else {
gvt_err("Advanced Context mode(SVM) is not supported!\n"); gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL; return -EINVAL;
} }
@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0); pdp, page_table_level, 0);
if (IS_ERR(mm)) { if (IS_ERR(mm)) {
gvt_err("fail to create mm object.\n"); gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm); return PTR_ERR(mm);
} }
} }
@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid guest context LRCA: %x\n", desc->lrca); gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
return -EINVAL; return -EINVAL;
} }
@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
continue; continue;
if (!desc[i]->privilege_access) { if (!desc[i]->privilege_access) {
gvt_err("vgpu%d: unexpected GGTT elsp submission\n", gvt_vgpu_err("unexpected GGTT elsp submission\n");
vgpu->id);
return -EINVAL; return -EINVAL;
} }
@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
} }
if (!valid_desc_bitmap) { if (!valid_desc_bitmap) {
gvt_err("vgpu%d: no valid desc in a elsp submission\n", gvt_vgpu_err("no valid desc in a elsp submission\n");
vgpu->id);
return -EINVAL; return -EINVAL;
} }
if (!test_bit(0, (void *)&valid_desc_bitmap) && if (!test_bit(0, (void *)&valid_desc_bitmap) &&
test_bit(1, (void *)&valid_desc_bitmap)) { test_bit(1, (void *)&valid_desc_bitmap)) {
gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
vgpu->id);
return -EINVAL; return -EINVAL;
} }
@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
ret = submit_context(vgpu, ring_id, &valid_desc[i], ret = submit_context(vgpu, ring_id, &valid_desc[i],
emulate_schedule_in); emulate_schedule_in);
if (ret) { if (ret) {
gvt_err("vgpu%d: fail to schedule workload\n", gvt_vgpu_err("fail to schedule workload\n");
vgpu->id);
return ret; return ret;
} }
emulate_schedule_in = false; emulate_schedule_in = false;

View File

@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{ {
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
vgpu->id, addr, size); addr, size);
return false; return false;
} }
return true; return true;
@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
if (mfn == INTEL_GVT_INVALID_ADDR) { if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate gfn: 0x%lx\n", gfn); gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
return -ENXIO; return -ENXIO;
} }
@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, daddr)) { if (dma_mapping_error(kdev, daddr)) {
gvt_err("fail to map dma addr\n"); gvt_vgpu_err("fail to map dma addr\n");
return -EINVAL; return -EINVAL;
} }
@ -735,7 +735,7 @@ retry:
if (reclaim_one_mm(vgpu->gvt)) if (reclaim_one_mm(vgpu->gvt))
goto retry; goto retry;
gvt_err("fail to allocate ppgtt shadow page\n"); gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -750,14 +750,14 @@ retry:
*/ */
ret = init_shadow_page(vgpu, &spt->shadow_page, type); ret = init_shadow_page(vgpu, &spt->shadow_page, type);
if (ret) { if (ret) {
gvt_err("fail to initialize shadow page for spt\n"); gvt_vgpu_err("fail to initialize shadow page for spt\n");
goto err; goto err;
} }
ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
gfn, ppgtt_write_protection_handler, NULL); gfn, ppgtt_write_protection_handler, NULL);
if (ret) { if (ret) {
gvt_err("fail to initialize guest page for spt\n"); gvt_vgpu_err("fail to initialize guest page for spt\n");
goto err; goto err;
} }
@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
if (p) if (p)
return shadow_page_to_ppgtt_spt(p); return shadow_page_to_ppgtt_spt(p);
gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
vgpu->id, mfn);
return NULL; return NULL;
} }
@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
} }
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
if (!s) { if (!s) {
gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
vgpu->id, ops->get_pfn(e)); ops->get_pfn(e));
return -ENXIO; return -ENXIO;
} }
return ppgtt_invalidate_shadow_page(s); return ppgtt_invalidate_shadow_page(s);
@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{ {
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_entry e; struct intel_gvt_gtt_entry e;
unsigned long index; unsigned long index;
int ret; int ret;
@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
for_each_present_shadow_entry(spt, &e, index) { for_each_present_shadow_entry(spt, &e, index) {
if (!gtt_type_is_pt(get_next_pt_type(e.type))) { if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
gvt_err("GVT doesn't support pse bit for now\n"); gvt_vgpu_err("GVT doesn't support pse bit for now\n");
return -EINVAL; return -EINVAL;
} }
ret = ppgtt_invalidate_shadow_page_by_shadow_entry( ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
@ -868,8 +868,8 @@ release:
ppgtt_free_shadow_page(spt); ppgtt_free_shadow_page(spt);
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
spt->vgpu->id, spt, e.val64, e.type); spt, e.val64, e.type);
return ret; return ret;
} }
@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
} }
return s; return s;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, s, we->val64, we->type); s, we->val64, we->type);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
for_each_present_guest_entry(spt, &ge, i) { for_each_present_guest_entry(spt, &ge, i) {
if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
gvt_err("GVT doesn't support pse bit now\n"); gvt_vgpu_err("GVT doesn't support pse bit now\n");
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;
} }
@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
} }
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, spt, ge.val64, ge.type); spt, ge.val64, ge.type);
return ret; return ret;
} }
@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
struct intel_vgpu_ppgtt_spt *s = struct intel_vgpu_ppgtt_spt *s =
ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
if (!s) { if (!s) {
gvt_err("fail to find guest page\n"); gvt_vgpu_err("fail to find guest page\n");
ret = -ENXIO; ret = -ENXIO;
goto fail; goto fail;
} }
@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
ppgtt_set_shadow_entry(spt, &e, index); ppgtt_set_shadow_entry(spt, &e, index);
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, spt, e.val64, e.type); spt, e.val64, e.type);
return ret; return ret;
} }
@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
} }
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type); spt, we->val64, we->type);
return ret; return ret;
} }
@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
} }
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
vgpu->id, spt, we->val64, we->type); spt, we->val64, we->type);
return ret; return ret;
} }
@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
if (IS_ERR(spt)) { if (IS_ERR(spt)) {
gvt_err("fail to populate guest root pointer\n"); gvt_vgpu_err("fail to populate guest root pointer\n");
ret = PTR_ERR(spt); ret = PTR_ERR(spt);
goto fail; goto fail;
} }
@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
ret = gtt->mm_alloc_page_table(mm); ret = gtt->mm_alloc_page_table(mm);
if (ret) { if (ret) {
gvt_err("fail to allocate page table for mm\n"); gvt_vgpu_err("fail to allocate page table for mm\n");
goto fail; goto fail;
} }
@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
} }
return mm; return mm;
fail: fail:
gvt_err("fail to create mm\n"); gvt_vgpu_err("fail to create mm\n");
if (mm) if (mm)
intel_gvt_mm_unreference(mm); intel_gvt_mm_unreference(mm);
return ERR_PTR(ret); return ERR_PTR(ret);
@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
mm->page_table_level, gma, gpa); mm->page_table_level, gma, gpa);
return gpa; return gpa;
err: err:
gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
} }
@ -1836,8 +1836,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (ops->test_present(&e)) { if (ops->test_present(&e)) {
ret = gtt_entry_p2m(vgpu, &e, &m); ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) { if (ret) {
gvt_err("vgpu%d: fail to translate guest gtt entry\n", gvt_vgpu_err("fail to translate guest gtt entry\n");
vgpu->id);
return ret; return ret;
} }
} else { } else {
@ -1893,14 +1892,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
if (!scratch_pt) { if (!scratch_pt) {
gvt_err("fail to allocate scratch page\n"); gvt_vgpu_err("fail to allocate scratch page\n");
return -ENOMEM; return -ENOMEM;
} }
daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
4096, PCI_DMA_BIDIRECTIONAL); 4096, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) { if (dma_mapping_error(dev, daddr)) {
gvt_err("fail to dmamap scratch_pt\n"); gvt_vgpu_err("fail to dmamap scratch_pt\n");
__free_page(virt_to_page(scratch_pt)); __free_page(virt_to_page(scratch_pt));
return -ENOMEM; return -ENOMEM;
} }
@ -2003,7 +2002,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0); NULL, 1, 0);
if (IS_ERR(ggtt_mm)) { if (IS_ERR(ggtt_mm)) {
gvt_err("fail to create mm for ggtt.\n"); gvt_vgpu_err("fail to create mm for ggtt.\n");
return PTR_ERR(ggtt_mm); return PTR_ERR(ggtt_mm);
} }
@ -2076,7 +2075,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
for (i = 0; i < preallocated_oos_pages; i++) { for (i = 0; i < preallocated_oos_pages; i++) {
oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
if (!oos_page) { if (!oos_page) {
gvt_err("fail to pre-allocate oos page\n");
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
} }
@ -2166,7 +2164,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0); pdp, page_table_level, 0);
if (IS_ERR(mm)) { if (IS_ERR(mm)) {
gvt_err("fail to create mm\n"); gvt_vgpu_err("fail to create mm\n");
return PTR_ERR(mm); return PTR_ERR(mm);
} }
} }
@ -2196,7 +2194,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
if (!mm) { if (!mm) {
gvt_err("fail to find ppgtt instance.\n"); gvt_vgpu_err("fail to find ppgtt instance.\n");
return -EINVAL; return -EINVAL;
} }
intel_gvt_mm_unreference(mm); intel_gvt_mm_unreference(mm);

View File

@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
GVT_FAILSAFE_UNSUPPORTED_GUEST); GVT_FAILSAFE_UNSUPPORTED_GUEST);
if (!vgpu->mmio.disable_warn_untrack) { if (!vgpu->mmio.disable_warn_untrack) {
gvt_err("vgpu%d: found oob fence register access\n", gvt_vgpu_err("found oob fence register access\n");
vgpu->id); gvt_vgpu_err("total fence %d, access fence %d\n",
gvt_err("vgpu%d: total fence %d, access fence %d\n", vgpu_fence_sz(vgpu), fence_num);
vgpu->id, vgpu_fence_sz(vgpu),
fence_num);
} }
memset(p_data, 0, bytes); memset(p_data, 0, bytes);
return -EINVAL; return -EINVAL;
@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
break; break;
default: default:
/*should not hit here*/ /*should not hit here*/
gvt_err("invalid forcewake offset 0x%x\n", offset); gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
return -EINVAL; return -EINVAL;
} }
} else { } else {
@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
} else { } else {
gvt_err("Invalid train pattern %d\n", train_pattern); gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
return -EINVAL; return -EINVAL;
} }
@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset); index = FDI_RX_IMR_TO_PIPE(offset);
else { else {
gvt_err("Unsupport registers %x\n", offset); gvt_vgpu_err("Unsupport registers %x\n", offset);
return -EINVAL; return -EINVAL;
} }
@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
u32 data; u32 data;
if (!dpy_is_valid_port(port_index)) { if (!dpy_is_valid_port(port_index)) {
gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); gvt_vgpu_err("Unsupported DP port access!\n");
return 0; return 0;
} }
@ -1016,8 +1014,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
if (i == num) { if (i == num) {
if (num == SBI_REG_MAX) { if (num == SBI_REG_MAX) {
gvt_err("vgpu%d: SBI caching meets maximum limits\n", gvt_vgpu_err("SBI caching meets maximum limits\n");
vgpu->id);
return; return;
} }
display->sbi.number++; display->sbi.number++;
@ -1097,7 +1094,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
break; break;
} }
if (invalid_read) if (invalid_read)
gvt_err("invalid pvinfo read: [%x:%x] = %x\n", gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
offset, bytes, *(u32 *)p_data); offset, bytes, *(u32 *)p_data);
vgpu->pv_notified = true; vgpu->pv_notified = true;
return 0; return 0;
@ -1125,7 +1122,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
case 1: /* Remove this in guest driver. */ case 1: /* Remove this in guest driver. */
break; break;
default: default:
gvt_err("Invalid PV notification %d\n", notification); gvt_vgpu_err("Invalid PV notification %d\n", notification);
} }
return ret; return ret;
} }
@ -1181,7 +1178,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
break; break;
default: default:
gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data); offset, bytes, data);
break; break;
} }
@ -1415,7 +1412,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (execlist->elsp_dwords.index == 3) { if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id); ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret) if(ret)
gvt_err("fail submit workload on ring %d\n", ring_id); gvt_vgpu_err("fail submit workload on ring %d\n",
ring_id);
} }
++execlist->elsp_dwords.index; ++execlist->elsp_dwords.index;
@ -2988,3 +2986,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
return 0; return 0;
} }
/**
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
* force-nopriv register
*
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* True if the register is in force-nonpriv whitelist;
* False if outside;
*/
bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
unsigned int offset)
{
return in_whitelist(offset);
}

View File

@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{ {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type; struct intel_vgpu_type *type;
struct device *pdev; struct device *pdev;
void *gvt; void *gvt;
@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type) { if (!type) {
gvt_err("failed to find type %s to create\n", gvt_vgpu_err("failed to find type %s to create\n",
kobject_name(kobj)); kobject_name(kobj));
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
vgpu = intel_gvt_ops->vgpu_create(gvt, type); vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) { if (IS_ERR_OR_NULL(vgpu)) {
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
gvt_err("failed to create intel vgpu: %d\n", ret); gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
goto out; goto out;
} }
@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
&vgpu->vdev.iommu_notifier); &vgpu->vdev.iommu_notifier);
if (ret != 0) { if (ret != 0) {
gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
goto out; goto out;
} }
@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
&vgpu->vdev.group_notifier); &vgpu->vdev.group_notifier);
if (ret != 0) { if (ret != 0) {
gvt_err("vfio_register_notifier for group failed: %d\n", ret); gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret);
goto undo_iommu; goto undo_iommu;
} }
@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
if (index >= VFIO_PCI_NUM_REGIONS) { if (index >= VFIO_PCI_NUM_REGIONS) {
gvt_err("invalid index: %u\n", index); gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL; return -EINVAL;
} }
@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_VGA_REGION_INDEX: case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX:
default: default:
gvt_err("unsupported region: %u\n", index); gvt_vgpu_err("unsupported region: %u\n", index);
} }
return ret == 0 ? count : ret; return ret == 0 ? count : ret;
@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
trigger = eventfd_ctx_fdget(fd); trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) { if (IS_ERR(trigger)) {
gvt_err("eventfd_ctx_fdget failed\n"); gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger); return PTR_ERR(trigger);
} }
vgpu->vdev.msi_trigger = trigger; vgpu->vdev.msi_trigger = trigger;
@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
ret = vfio_set_irqs_validate_and_prepare(&hdr, max, ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
VFIO_PCI_NUM_IRQS, &data_size); VFIO_PCI_NUM_IRQS, &data_size);
if (ret) { if (ret) {
gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
return -EINVAL; return -EINVAL;
} }
if (data_size) { if (data_size) {
@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvm = vgpu->vdev.kvm; kvm = vgpu->vdev.kvm;
if (!kvm || kvm->mm != current->mm) { if (!kvm || kvm->mm != current->mm) {
gvt_err("KVM is required to use Intel vGPU\n"); gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH; return -ESRCH;
} }
@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{ {
struct intel_vgpu *vgpu = info->vgpu;
if (!info) { if (!info) {
gvt_err("kvmgt_guest_info invalid\n"); gvt_vgpu_err("kvmgt_guest_info invalid\n");
return false; return false;
} }
@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
unsigned long iova, pfn; unsigned long iova, pfn;
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct device *dev; struct device *dev;
struct intel_vgpu *vgpu;
int rc; int rc;
if (!handle_valid(handle)) if (!handle_valid(handle))
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle; info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu;
iova = gvt_cache_find(info->vgpu, gfn); iova = gvt_cache_find(info->vgpu, gfn);
if (iova != INTEL_GVT_INVALID_ADDR) if (iova != INTEL_GVT_INVALID_ADDR)
return iova; return iova;
@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
dev = mdev_dev(info->vgpu->vdev.mdev); dev = mdev_dev(info->vgpu->vdev.mdev);
rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
if (rc != 1) { if (rc != 1) {
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
gfn, rc);
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
} }
/* transfer to host iova for GFX to use DMA */ /* transfer to host iova for GFX to use DMA */
rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
if (rc) { if (rc) {
gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
vfio_unpin_pages(dev, &gfn, 1); vfio_unpin_pages(dev, &gfn, 1);
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
} }
@ -1417,7 +1424,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
{ {
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct kvm *kvm; struct kvm *kvm;
int ret; int idx, ret;
bool kthread = current->mm == NULL; bool kthread = current->mm == NULL;
if (!handle_valid(handle)) if (!handle_valid(handle))
@ -1429,8 +1436,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
if (kthread) if (kthread)
use_mm(kvm->mm); use_mm(kvm->mm);
idx = srcu_read_lock(&kvm->srcu);
ret = write ? kvm_write_guest(kvm, gpa, buf, len) : ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm_read_guest(kvm, gpa, buf, len); kvm_read_guest(kvm, gpa, buf, len);
srcu_read_unlock(&kvm->srcu, idx);
if (kthread) if (kthread)
unuse_mm(kvm->mm); unuse_mm(kvm->mm);

View File

@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes); p_data, bytes);
if (ret) { if (ret) {
gvt_err("vgpu%d: guest page read error %d, " gvt_vgpu_err("guest page read error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
vgpu->id, ret, ret, gp->gfn, pa, *(u32 *)p_data,
gp->gfn, pa, *(u32 *)p_data, bytes); bytes);
} }
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
if (!vgpu->mmio.disable_warn_untrack) { if (!vgpu->mmio.disable_warn_untrack) {
gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data); offset, bytes, *(u32 *)p_data);
if (offset == 0x206c) { if (offset == 0x206c) {
gvt_err("------------------------------------------\n"); gvt_vgpu_err("------------------------------------------\n");
gvt_err("vgpu%d: likely triggers a gfx reset\n", gvt_vgpu_err("likely triggers a gfx reset\n");
vgpu->id); gvt_vgpu_err("------------------------------------------\n");
gvt_err("------------------------------------------\n");
vgpu->mmio.disable_warn_untrack = true; vgpu->mmio.disable_warn_untrack = true;
} }
} }
@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return 0; return 0;
err: err:
gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
vgpu->id, offset, bytes); offset, bytes);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
} }
@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (gp) { if (gp) {
ret = gp->handler(gp, pa, p_data, bytes); ret = gp->handler(gp, pa, p_data, bytes);
if (ret) { if (ret) {
gvt_err("vgpu%d: guest page write error %d, " gvt_err("guest page write error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", "gfn 0x%lx, pa 0x%llx, "
vgpu->id, ret, "var 0x%x, len %d\n",
gp->gfn, pa, *(u32 *)p_data, bytes); ret, gp->gfn, pa,
*(u32 *)p_data, bytes);
} }
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
/* all register bits are RO. */ /* all register bits are RO. */
if (ro_mask == ~(u64)0) { if (ro_mask == ~(u64)0) {
gvt_err("vgpu%d: try to write RO reg %x\n", gvt_vgpu_err("try to write RO reg %x\n",
vgpu->id, offset); offset);
ret = 0; ret = 0;
goto out; goto out;
} }
@ -360,8 +360,8 @@ out:
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return 0; return 0;
err: err:
gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
vgpu->id, offset, bytes); bytes);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
} }

View File

@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes); void *p_data, unsigned int bytes);
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes); void *p_data, unsigned int bytes);
bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
unsigned int offset);
#endif #endif

View File

@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE); + i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) { if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to get MFN from VA\n"); gvt_vgpu_err("fail to get MFN from VA\n");
return -EINVAL; return -EINVAL;
} }
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i], vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map); mfn, 1, map);
if (ret) { if (ret) {
gvt_err("fail to map GFN to MFN, errno: %d\n", ret); gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
ret);
return ret; return ret;
} }
} }
@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
if (!(swsci & SWSCI_SCI_SELECT)) { if (!(swsci & SWSCI_SCI_SELECT)) {
gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); gvt_vgpu_err("requesting SMI service\n");
return 0; return 0;
} }
/* ignore non 0->1 trasitions */ /* ignore non 0->1 trasitions */
@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
func = GVT_OPREGION_FUNC(*scic); func = GVT_OPREGION_FUNC(*scic);
subfunc = GVT_OPREGION_SUBFUNC(*scic); subfunc = GVT_OPREGION_SUBFUNC(*scic);
if (!querying_capabilities(*scic)) { if (!querying_capabilities(*scic)) {
gvt_err("vgpu%d: requesting runtime service: func \"%s\"," gvt_vgpu_err("requesting runtime service: func \"%s\","
" subfunc \"%s\"\n", " subfunc \"%s\"\n",
vgpu->id,
opregion_func_name(func), opregion_func_name(func),
opregion_subfunc_name(subfunc)); opregion_subfunc_name(subfunc));
/* /*

View File

@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
I915_WRITE_FW(reg, 0x1); I915_WRITE_FW(reg, 0x1);
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else else
vgpu_vreg(vgpu, regs[ring_id]) = 0; vgpu_vreg(vgpu, regs[ring_id]) = 0;

View File

@ -101,7 +101,7 @@ struct tbs_sched_data {
struct list_head runq_head; struct list_head runq_head;
}; };
#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) #define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
static void tbs_sched_func(struct work_struct *work) static void tbs_sched_func(struct work_struct *work)
{ {
@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
return; return;
list_add_tail(&vgpu_data->list, &sched_data->runq_head); list_add_tail(&vgpu_data->list, &sched_data->runq_head);
schedule_delayed_work(&sched_data->work, sched_data->period); schedule_delayed_work(&sched_data->work, 0);
} }
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)

View File

@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
(u32)((workload->ctx_desc.lrca + i) << (u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT)); GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) { if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("Invalid guest context descriptor\n"); gvt_vgpu_err("Invalid guest context descriptor\n");
return -EINVAL; return -EINVAL;
} }
@ -173,7 +173,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
int ret; int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@ -185,9 +187,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
* moment, i915 may already unpined the shadow context to make the
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ret = engine->context_pin(engine, shadow_ctx);
if (ret) {
gvt_vgpu_err("fail to pin shadow context\n");
workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
gvt_err("fail to allocate gem request\n"); gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq); ret = PTR_ERR(rq);
goto out; goto out;
} }
@ -200,9 +217,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret) if (ret)
goto out; goto out;
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if ((workload->ring_id == RCS) &&
if (ret) (workload->wa_ctx.indirect_ctx.size != 0)) {
goto out; ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto out;
}
ret = populate_shadow_context(workload); ret = populate_shadow_context(workload);
if (ret) if (ret)
@ -225,6 +245,9 @@ out:
if (!IS_ERR_OR_NULL(rq)) if (!IS_ERR_OR_NULL(rq))
i915_add_request(rq); i915_add_request(rq);
else
engine->context_unpin(engine, shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
return ret; return ret;
} }
@ -320,7 +343,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
(u32)((workload->ctx_desc.lrca + i) << (u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT)); GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) { if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid guest context descriptor\n"); gvt_vgpu_err("invalid guest context descriptor\n");
return; return;
} }
@ -374,6 +397,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* For the workload w/o request, directly complete the workload. * For the workload w/o request, directly complete the workload.
*/ */
if (workload->req) { if (workload->req) {
struct drm_i915_private *dev_priv =
workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine =
dev_priv->engine[workload->ring_id];
wait_event(workload->shadow_ctx_status_wq, wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active)); !atomic_read(&workload->shadow_ctx_active));
@ -386,6 +413,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX) INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event); intel_vgpu_trigger_virtual_event(vgpu, event);
} }
mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */
engine->context_unpin(engine, workload->vgpu->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
} }
gvt_dbg_sched("ring id %d complete workload %p status %d\n", gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@ -415,6 +446,7 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id; int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret; int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function); DEFINE_WAIT_FUNC(wait, woken_wake_function);
@ -457,25 +489,14 @@ static int workload_thread(void *priv)
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
if (ret) { if (ret) {
gvt_err("fail to dispatch workload, skip\n"); vgpu = workload->vgpu;
gvt_vgpu_err("fail to dispatch workload, skip\n");
goto complete; goto complete;
} }
gvt_dbg_sched("ring id %d wait workload %p\n", gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload); workload->ring_id, workload);
retry: i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
i915_wait_request(workload->req,
0, MAX_SCHEDULE_TIMEOUT);
/* I915 has replay mechanism and a request will be replayed
* if there is i915 reset. So the seqno will be updated anyway.
* If the seqno is not updated yet after waiting, which means
* the replay may still be in progress and we can wait again.
*/
if (!i915_gem_request_completed(workload->req)) {
gvt_dbg_sched("workload %p not completed, wait again\n",
workload);
goto retry;
}
complete: complete:
gvt_dbg_sched("will complete workload %p, status: %d\n", gvt_dbg_sched("will complete workload %p, status: %d\n",

View File

@ -1104,6 +1104,7 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
u32 render, media; u32 render, media;
time = ktime_us_delta(now.ktime, prev->ktime); time = ktime_us_delta(now.ktime, prev->ktime);
time *= dev_priv->czclk_freq; time *= dev_priv->czclk_freq;
/* Workload can be split between render + media, /* Workload can be split between render + media,

Some files were not shown because too many files have changed in this diff Show More