Merge tag 'drm-intel-next-fixes-2015-09-02' into drm-intel-next-queued

Backmerge -fixes since there's more DDI-E related cleanups on top of
the pile of -fixes for skl that just landed for 4.3.

Conflicts:
	drivers/gpu/drm/i915/intel_display.c
	drivers/gpu/drm/i914/intel_dp.c
	drivers/gpu/drm/i915/intel_lrc.c

Conflicts are all fairly harmless adjacent line stuff.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
Daniel Vetter 2015-09-02 14:33:42 +02:00
commit e93c28f393
627 changed files with 48288 additions and 17224 deletions

1
.get_maintainer.ignore Normal file
View File

@ -0,0 +1 @@
Christoph Hellwig <hch@lst.de>

View File

@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andreas Herrmann <aherrman@de.ibm.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>

View File

@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
"qcom,kpss-acc-v1"
"qcom,kpss-acc-v2"
"rockchip,rk3066-smp"
"ste,dbx500-smp"
- cpu-release-addr
Usage: required for systems that have an "enable-method"

View File

@ -30,20 +30,27 @@ Optional properties:
- panel@0: Node of panel connected to this DSI controller.
See files in Documentation/devicetree/bindings/panel/ for each supported
panel.
- qcom,dual-panel-mode: Boolean value indicating if the DSI controller is
- qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is
driving a panel which needs 2 DSI links.
- qcom,master-panel: Boolean value indicating if the DSI controller is driving
- qcom,master-dsi: Boolean value indicating if the DSI controller is driving
the master link of the 2-DSI panel.
- qcom,sync-dual-panel: Boolean value indicating if the DSI controller is
- qcom,sync-dual-dsi: Boolean value indicating if the DSI controller is
driving a 2-DSI panel whose 2 links need receive command simultaneously.
- interrupt-parent: phandle to the MDP block if the interrupt signal is routed
through MDP block
- pinctrl-names: the pin control state names; should contain "default"
- pinctrl-0: the default pinctrl state (active)
- pinctrl-n: the "sleep" pinctrl state
- port: DSI controller output port. This contains one endpoint subnode, with its
remote-endpoint set to the phandle of the connected panel's endpoint.
See Documentation/devicetree/bindings/graph.txt for device graph info.
DSI PHY:
Required properties:
- compatible: Could be the following
* "qcom,dsi-phy-28nm-hpm"
* "qcom,dsi-phy-28nm-lp"
* "qcom,dsi-phy-20nm"
- reg: Physical base address and length of the registers of PLL, PHY and PHY
regulator
- reg-names: The names of register regions. The following regions are required:
@ -59,6 +66,10 @@ Required properties:
* "iface_clk"
- vddio-supply: phandle to vdd-io regulator device node
Optional properties:
- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
regulator is wanted.
Example:
mdss_dsi0: qcom,mdss_dsi@fd922800 {
compatible = "qcom,mdss-dsi-ctrl";
@ -90,9 +101,13 @@ Example:
qcom,dsi-phy = <&mdss_dsi_phy0>;
qcom,dual-panel-mode;
qcom,master-panel;
qcom,sync-dual-panel;
qcom,dual-dsi-mode;
qcom,master-dsi;
qcom,sync-dual-dsi;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&mdss_dsi_active>;
pinctrl-1 = <&mdss_dsi_suspend>;
panel: panel@0 {
compatible = "sharp,lq101r1sx01";
@ -101,6 +116,18 @@ Example:
power-supply = <...>;
backlight = <...>;
port {
panel_in: endpoint {
remote-endpoint = <&dsi0_out>;
};
};
};
port {
dsi0_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};
@ -117,4 +144,6 @@ Example:
clock-names = "iface_clk";
clocks = <&mmcc MDSS_AHB_CLK>;
vddio-supply = <&pma8084_l12>;
qcom,dsi-phy-regulator-ldo-mode;
};

View File

@ -2,8 +2,9 @@ Qualcomm adreno/snapdragon hdmi output
Required properties:
- compatible: one of the following
* "qcom,hdmi-tx-8994"
* "qcom,hdmi-tx-8084"
* "qcom,hdmi-tx-8074"
* "qcom,hdmi-tx-8974"
* "qcom,hdmi-tx-8660"
* "qcom,hdmi-tx-8960"
- reg: Physical base address and length of the controller's registers

View File

@ -197,9 +197,11 @@ of the following host1x client modules:
- sor: serial output resource
Required properties:
- compatible: For Tegra124, must contain "nvidia,tegra124-sor". Otherwise,
must contain '"nvidia,<chip>-sor", "nvidia,tegra124-sor"', where <chip>
is tegra132.
- compatible: Should be:
- "nvidia,tegra124-sor": for Tegra124 and Tegra132
- "nvidia,tegra132-sor": for Tegra132
- "nvidia,tegra210-sor": for Tegra210
- "nvidia,tegra210-sor1": for Tegra210
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- clocks: Must contain an entry for each entry in clock-names.

View File

@ -52,10 +52,9 @@ STMicroelectronics stih4xx platforms
See ../reset/reset.txt for details.
- reset-names: names of the resets listed in resets property in the same
order.
- ranges: to allow probing of subdevices
- sti-hdmi: hdmi output block
must be a child of sti-tvout
must be a child of sti-display-subsystem
Required properties:
- compatible: "st,stih<chip>-hdmi";
- reg: Physical base address of the IP registers and length of memory mapped region.
@ -72,7 +71,7 @@ STMicroelectronics stih4xx platforms
sti-hda:
Required properties:
must be a child of sti-tvout
must be a child of sti-display-subsystem
- compatible: "st,stih<chip>-hda"
- reg: Physical base address of the IP registers and length of memory mapped region.
- reg-names: names of the mapped memory regions listed in regs property in
@ -85,7 +84,7 @@ sti-hda:
sti-dvo:
Required properties:
must be a child of sti-tvout
must be a child of sti-display-subsystem
- compatible: "st,stih<chip>-dvo"
- reg: Physical base address of the IP registers and length of memory mapped region.
- reg-names: names of the mapped memory regions listed in regs property in
@ -195,38 +194,37 @@ Example:
reg-names = "tvout-reg", "hda-reg", "syscfg";
reset-names = "tvout";
resets = <&softreset STIH416_HDTVOUT_SOFTRESET>;
ranges;
};
sti-hdmi@fe85c000 {
compatible = "st,stih416-hdmi";
reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
reg-names = "hdmi-reg", "syscfg";
interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>;
interrupt-names = "irq";
clock-names = "pix", "tmds", "phy", "audio";
clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
};
sti-hdmi@fe85c000 {
compatible = "st,stih416-hdmi";
reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
reg-names = "hdmi-reg", "syscfg";
interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>;
interrupt-names = "irq";
clock-names = "pix", "tmds", "phy", "audio";
clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
};
sti-hda@fe85a000 {
compatible = "st,stih416-hda";
reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>;
reg-names = "hda-reg", "video-dacs-ctrl";
clock-names = "pix", "hddac";
clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
};
sti-hda@fe85a000 {
compatible = "st,stih416-hda";
reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>;
reg-names = "hda-reg", "video-dacs-ctrl";
clock-names = "pix", "hddac";
clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
};
sti-dvo@8d00400 {
compatible = "st,stih407-dvo";
reg = <0x8d00400 0x200>;
reg-names = "dvo-reg";
clock-names = "dvo_pix", "dvo",
"main_parent", "aux_parent";
clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
<&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dvo>;
sti,panel = <&panel_dvo>;
};
sti-dvo@8d00400 {
compatible = "st,stih407-dvo";
reg = <0x8d00400 0x200>;
reg-names = "dvo-reg";
clock-names = "dvo_pix", "dvo",
"main_parent", "aux_parent";
clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
<&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dvo>;
sti,panel = <&panel_dvo>;
};
sti-hqvdp@9c000000 {
@ -237,7 +235,7 @@ Example:
reset-names = "hqvdp";
resets = <&softreset STIH407_HDQVDP_SOFTRESET>;
st,vtg = <&vtg_main>;
};
};
};
...
};

View File

@ -0,0 +1,7 @@
AU Optronics Corporation 8.0" WUXGA TFT LCD panel
Required properties:
- compatible: should be "auo,b101ean01"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,19 @@
LG LG4573 TFT Liquid Crystal Display with SPI control bus
Required properties:
- compatible: "lg,lg4573"
- reg: address of the panel on the SPI bus
The panel must obey rules for SPI slave device specified in document [1].
[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
Example:
lcd_panel: display@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "lg,lg4573";
spi-max-frequency = <10000000>;
reg = <0>;
};

View File

@ -0,0 +1,7 @@
NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
Required properties:
- compatible: should be "nec,nl4827hc19-05b"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,7 @@
OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
Required properties:
- compatible: should be "okaya,rs800480t-7x0gp"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -140,6 +140,7 @@ mundoreader Mundo Reader S.L.
murata Murata Manufacturing Co., Ltd.
mxicy Macronix International Co., Ltd.
national National Semiconductor
nec NEC LCD Technologies, Ltd.
neonode Neonode Inc.
netgear NETGEAR
netlogic Broadcom Corporation (formerly NetLogic Microsystems)
@ -148,6 +149,7 @@ nintendo Nintendo
nokia Nokia
nvidia NVIDIA
nxp NXP Semiconductors
okaya Okaya Electric America, Inc.
onnn ON Semiconductor Corp.
opencores OpenCores.org
ortustech Ortus Technology Co., Ltd.

View File

@ -0,0 +1,22 @@
Device Tree bindings for Freescale DCU DRM Driver
Required properties:
- compatible: Should be one of
* "fsl,ls1021a-dcu".
* "fsl,vf610-dcu".
- reg: Address and length of the register set for dcu.
- clocks: From common clock binding: handle to dcu clock.
- clock-names: From common clock binding: Shall be "dcu".
- big-endian Boolean property, LS1021A DCU registers are big-endian.
- fsl,panel: The phandle to panel node.
Examples:
dcu: dcu@2ce0000 {
compatible = "fsl,ls1021a-dcu";
reg = <0x0 0x2ce0000 0x0 0x10000>;
clocks = <&platform_clk 0>;
clock-names = "dcu";
big-endian;
fsl,panel = <&panel>;
};

View File

@ -3555,6 +3555,15 @@ F: drivers/gpu/drm/exynos/
F: include/drm/exynos*
F: include/uapi/drm/exynos*
DRM DRIVERS FOR FREESCALE DCU
M: Jianwei Wang <jianwei.wang.chn@gmail.com>
M: Alison Wang <alison.wang@freescale.com>
L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/fsl-dcu/
F: Documentation/devicetree/bindings/video/fsl,dcu.txt
F: Documentation/devicetree/bindings/panel/nec,nl4827hc19_05b.txt
DRM DRIVERS FOR FREESCALE IMX
M: Philipp Zabel <p.zabel@pengutronix.de>
L: dri-devel@lists.freedesktop.org
@ -3592,6 +3601,15 @@ S: Maintained
F: drivers/gpu/drm/rockchip/
F: Documentation/devicetree/bindings/video/rockchip*
DRM DRIVERS FOR STI
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
M: Vincent Abriou <vincent.abriou@st.com>
L: dri-devel@lists.freedesktop.org
T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
S: Maintained
F: drivers/gpu/drm/sti
F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc8
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*

View File

@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
bootpImage uImage: zImage
zImage: Image
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@

View File

@ -116,7 +116,7 @@
ranges = <0 0x2000 0x2000>;
scm_conf: scm_conf@0 {
compatible = "syscon";
compatible = "syscon", "simple-bus";
reg = <0x0 0x1400>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -181,10 +181,10 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
<&clks IMX6QDL_CLK_LVDS1_GATE>,
<&clks IMX6QDL_CLK_PCIE_REF_125M>;

View File

@ -131,10 +131,17 @@
<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
};
};
mdio: mdio@24200f00 {
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x24200f00 0x100>;
status = "disabled";
clocks = <&clkcpgmac>;
clock-names = "fck";
bus_freq = <2500000>;
};
/include/ "k2e-netcp.dtsi"
};
};
&mdio {
reg = <0x24200f00 0x100>;
};

View File

@ -98,6 +98,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x25c>;
};
mdio: mdio@02090300 {
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x02090300 0x100>;
status = "disabled";
clocks = <&clkcpgmac>;
clock-names = "fck";
bus_freq = <2500000>;
};
/include/ "k2hk-netcp.dtsi"
};
};

View File

@ -29,7 +29,6 @@
};
soc {
/include/ "k2l-clocks.dtsi"
uart2: serial@02348400 {
@ -79,6 +78,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x24c>;
};
mdio: mdio@26200f00 {
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x26200f00 0x100>;
status = "disabled";
clocks = <&clkcpgmac>;
clock-names = "fck";
bus_freq = <2500000>;
};
/include/ "k2l-netcp.dtsi"
};
};
@ -96,7 +106,3 @@
/* Pin muxed. Enabled and configured by Bootloader */
status = "disabled";
};
&mdio {
reg = <0x26200f00 0x100>;
};

View File

@ -267,17 +267,6 @@
1 0 0x21000A00 0x00000100>;
};
mdio: mdio@02090300 {
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x02090300 0x100>;
status = "disabled";
clocks = <&clkpa>;
clock-names = "fck";
bus_freq = <2500000>;
};
kirq0: keystone_irq@26202a0 {
compatible = "ti,keystone-irq";
interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;

View File

@ -51,7 +51,8 @@
};
scm_conf: scm_conf@270 {
compatible = "syscon";
compatible = "syscon",
"simple-bus";
reg = <0x270 0x240>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -191,7 +191,8 @@
};
omap4_padconf_global: omap4_padconf_global@5a0 {
compatible = "syscon";
compatible = "syscon",
"simple-bus";
reg = <0x5a0 0x170>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -180,7 +180,8 @@
};
omap5_padconf_global: omap5_padconf_global@5a0 {
compatible = "syscon";
compatible = "syscon",
"simple-bus";
reg = <0x5a0 0xec>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -15,6 +15,33 @@
#include "skeleton.dtsi"
/ {
cpus {
#address-cells = <1>;
#size-cells = <0>;
enable-method = "ste,dbx500-smp";
cpu-map {
cluster0 {
core0 {
cpu = <&CPU0>;
};
core1 {
cpu = <&CPU1>;
};
};
};
CPU0: cpu@300 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0x300>;
};
CPU1: cpu@301 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0x301>;
};
};
soc {
#address-cells = <1>;
#size-cells = <1>;
@ -22,32 +49,6 @@
interrupt-parent = <&intc>;
ranges;
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu-map {
cluster0 {
core0 {
cpu = <&CPU0>;
};
core1 {
cpu = <&CPU1>;
};
};
};
CPU0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
};
CPU1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
};
};
ptm@801ae000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0x801ae000 0x1000>;

View File

@ -124,14 +124,14 @@ CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_S5M8767=y
CONFIG_REGULATOR_TPS65090=y
CONFIG_DRM=y
CONFIG_DRM_PTN3460=y
CONFIG_DRM_PS8622=y
CONFIG_DRM_NXP_PTN3460=y
CONFIG_DRM_PARADE_PS8622=y
CONFIG_DRM_EXYNOS=y
CONFIG_DRM_EXYNOS_FIMD=y
CONFIG_DRM_EXYNOS_DSI=y
CONFIG_DRM_EXYNOS_HDMI=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_S6E8AA0=y
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
CONFIG_FB_SIMPLE=y
CONFIG_EXYNOS_VIDEO=y
CONFIG_EXYNOS_MIPI_DSI=y

View File

@ -429,15 +429,15 @@ CONFIG_VIDEO_RENESAS_VSP1=m
CONFIG_VIDEO_ADV7180=m
CONFIG_VIDEO_ML86V7667=m
CONFIG_DRM=y
CONFIG_DRM_PTN3460=m
CONFIG_DRM_PS8622=m
CONFIG_DRM_NXP_PTN3460=m
CONFIG_DRM_PARADE_PS8622=m
CONFIG_DRM_EXYNOS=m
CONFIG_DRM_EXYNOS_DSI=y
CONFIG_DRM_EXYNOS_FIMD=y
CONFIG_DRM_EXYNOS_HDMI=y
CONFIG_DRM_RCAR_DU=m
CONFIG_DRM_TEGRA=y
CONFIG_DRM_PANEL_S6E8AA0=m
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_FB_ARMCLCD=y
CONFIG_FB_WM8505=y

View File

@ -61,6 +61,7 @@ work_pending:
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.

View File

@ -399,6 +399,9 @@ ENTRY(secondary_startup)
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address

View File

@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
*/
void update_vsyscall(struct timekeeper *tk)
{
struct timespec xtime_coarse;
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
vdso_write_begin(vdso_data);
xtime_coarse = __current_kernel_time();
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;

View File

@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
}
/* the mmap semaphore is taken only if not in an atomic context */
atomic = in_atomic();
atomic = faulthandler_disabled();
if (!atomic)
down_read(&current->mm->mmap_sem);

View File

@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
pd->base = of_iomap(np, 0);
if (!pd->base) {
pr_warn("%s: failed to map memory\n", __func__);
kfree(pd->pd.name);
kfree_const(pd->pd.name);
kfree(pd);
of_node_put(np);
continue;
}

View File

@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
.irq_mask = wakeupgen_mask,
.irq_unmask = wakeupgen_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,

View File

@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
VDSO_LDFLAGS += -nostdlib -shared
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds

View File

@ -199,16 +199,15 @@ up_fail:
*/
void update_vsyscall(struct timekeeper *tk)
{
struct timespec xtime_coarse;
u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
xtime_coarse = __current_kernel_time();
vdso_data->use_syscall = use_syscall;
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
tk->tkr_mono.shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;

View File

@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
__BUILD_clear_\clear
__build_clear_\clear
.set at
__BUILD_\verbose \exception
move a0, sp

View File

@ -80,7 +80,7 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
daddiu a1, v0, __NR_64_Linux
move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall

View File

@ -72,7 +72,7 @@ n32_syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
daddiu a1, v0, __NR_N32_Linux
move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall

View File

@ -140,6 +140,7 @@ sysexit_from_sys_call:
*/
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp), %ecx /* User %eip */
movq RAX(%rsp), %rax
RESTORE_RSI_RDI
xorl %edx, %edx /* Do not leak kernel information */
xorq %r8, %r8
@ -219,7 +220,6 @@ sysexit_from_sys_call:
1: setbe %al /* 1 if error, 0 if not */
movzbl %al, %edi /* zero-extend that into %edi */
call __audit_syscall_exit
movq RAX(%rsp), %rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@ -368,6 +368,7 @@ sysretl_from_sys_call:
RESTORE_RSI_RDI_RDX
movl RIP(%rsp), %ecx
movl EFLAGS(%rsp), %r11d
movq RAX(%rsp), %rax
xorq %r10, %r10
xorq %r9, %r9
xorq %r8, %r8

View File

@ -57,9 +57,9 @@ struct sigcontext {
unsigned long ip;
unsigned long flags;
unsigned short cs;
unsigned short __pad2; /* Was called gs, but was always zero. */
unsigned short __pad1; /* Was called fs, but was always zero. */
unsigned short ss;
unsigned short gs;
unsigned short fs;
unsigned short __pad0;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;

View File

@ -79,12 +79,12 @@ do { \
#else /* CONFIG_X86_32 */
/* frame pointer must be last for get_wchan */
#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
#define __EXTRA_CLOBBER \
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
"r12", "r13", "r14", "r15", "flags"
"r12", "r13", "r14", "r15"
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary \
@ -100,11 +100,7 @@ do { \
#define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */
/*
* There is no need to save or restore flags, because flags are always
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
* has no effect.
*/
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \

View File

@ -177,24 +177,9 @@ struct sigcontext {
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
/*
* Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
* Linux saved and restored fs and gs in these slots. This
* was counterproductive, as fsbase and gsbase were never
* saved, so arch_prctl was presumably unreliable.
*
* If these slots are ever needed for any other purpose, there
* is some risk that very old 64-bit binaries could get
* confused. I doubt that many such binaries still work,
* though, since the same patch in 2.5.64 also removed the
* 64-bit set_thread_area syscall, so it appears that there is
* no TLS API that works in both pre- and post-2.5.64 kernels.
*/
__u16 __pad2; /* Was gs. */
__u16 __pad1; /* Was fs. */
__u16 ss;
__u16 gs;
__u16 fs;
__u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;

View File

@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
err = assign_irq_vector_policy(virq, irq_data->node, data,
err = assign_irq_vector_policy(virq + i, irq_data->node, data,
info);
if (err)
goto error;

View File

@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
return NOTIFY_BAD;
goto err;
}
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
if (!cpuc->constraint_list)
return NOTIFY_BAD;
goto err_shared_regs;
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
if (!cpuc->excl_cntrs) {
kfree(cpuc->constraint_list);
kfree(cpuc->shared_regs);
return NOTIFY_BAD;
}
if (!cpuc->excl_cntrs)
goto err_constraint_list;
cpuc->excl_thread_id = 0;
}
return NOTIFY_OK;
err_constraint_list:
kfree(cpuc->constraint_list);
cpuc->constraint_list = NULL;
err_shared_regs:
kfree(cpuc->shared_regs);
cpuc->shared_regs = NULL;
err:
return NOTIFY_BAD;
}
static void intel_pmu_cpu_starting(int cpu)

View File

@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
cpumask_set_cpu(cpu, &cqm_cpumask);
}
static void intel_cqm_cpu_prepare(unsigned int cpu)
static void intel_cqm_cpu_starting(unsigned int cpu)
{
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
intel_cqm_cpu_prepare(cpu);
break;
case CPU_DOWN_PREPARE:
intel_cqm_cpu_exit(cpu);
break;
case CPU_STARTING:
intel_cqm_cpu_starting(cpu);
cqm_pick_event_reader(cpu);
break;
}
@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
goto out;
for_each_online_cpu(i) {
intel_cqm_cpu_prepare(i);
intel_cqm_cpu_starting(i);
cqm_pick_event_reader(i);
}

View File

@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
if (src_fpu->fpstate_active)
if (src_fpu->fpstate_active && cpu_has_fpu)
fpu_copy(dst_fpu, src_fpu);
return 0;

View File

@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
write_cr0(cr0);
/* Flush out any pending x87 state: */
asm volatile ("fninit");
#ifdef CONFIG_MATH_EMULATION
if (!cpu_has_fpu)
fpstate_init_soft(&current->thread.fpu.state.soft);
else
#endif
asm volatile ("fninit");
}
/*

View File

@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
smp_mb(); /* quirk */
clflush((void *)&current_thread_info()->flags);
@ -419,6 +420,7 @@ static void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}

View File

@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
COPY(r15);
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_32
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
#else /* !CONFIG_X86_32 */
/* Kernel saves and restores only the CS segment register on signals,
* which is the bare minimum needed to allow mixed 32/64-bit code.
* App's signal handler can save/restore other segments if needed. */
COPY_SEG_CPL3(cs);
#endif /* CONFIG_X86_32 */
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
#else /* !CONFIG_X86_32 */
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->cs, &sc->cs);
put_user_ex(0, &sc->__pad2);
put_user_ex(0, &sc->__pad1);
put_user_ex(regs->ss, &sc->ss);
put_user_ex(0, &sc->gs);
put_user_ex(0, &sc->fs);
#endif /* CONFIG_X86_32 */
put_user_ex(fpstate, &sc->fpstate);
@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
regs->sp = (unsigned long)frame;
/*
* Set up the CS and SS registers to run signal handlers in
* 64-bit mode, even if the handler happens to be interrupting
* 32-bit or 16-bit code.
*
* SS is subtle. In 64-bit mode, we don't need any particular
* SS descriptor, but we do need SS to be valid. It's possible
* that the old SS is entirely bogus -- this can happen if the
* signal we're trying to deliver is #GP or #SS caused by a bad
* SS value.
*/
/* Set up the CS register to run signal handlers in 64-bit mode,
even if the handler happens to be interrupting 32-bit code. */
regs->cs = __USER_CS;
regs->ss = __USER_DS;
return 0;
}

View File

@ -28,11 +28,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
struct desc_struct *desc;
unsigned long base;
seg &= ~7UL;
seg >>= 3;
mutex_lock(&child->mm->context.lock);
if (unlikely(!child->mm->context.ldt ||
(seg >> 3) >= child->mm->context.ldt->size))
seg >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
desc = &child->mm->context.ldt->entries[seg];

View File

@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
@ -6327,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
static void process_smi(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
struct desc_ptr dt;
char buf[512];
u32 cr0;
@ -6359,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
kvm_x86_ops->set_cr4(vcpu, 0);
/* Undocumented: IDT limit is set to zero on entry to SMM. */
dt.address = dt.size = 0;
kvm_x86_ops->set_idt(vcpu, &dt);
__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;

View File

@ -29,7 +29,6 @@
#include <asm/uaccess.h>
#include <asm/traps.h>
#include <asm/desc.h>
#include <asm/user.h>
#include <asm/fpu/internal.h>
@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
math_abort(FPU_info, SIGILL);
}
code_descriptor = LDT_DESCRIPTOR(FPU_CS);
code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
if (SEG_D_SIZE(code_descriptor)) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */

View File

@ -16,9 +16,24 @@
#include <linux/kernel.h>
#include <linux/mm.h>
/* s is always from a cpu register, and the cpu does bounds checking
* during register load --> no further bounds checks needed */
#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
#include <asm/desc.h>
#include <asm/mmu_context.h>
static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
{
static struct desc_struct zero_desc;
struct desc_struct ret = zero_desc;
#ifdef CONFIG_MODIFY_LDT_SYSCALL
seg >>= 3;
mutex_lock(&current->mm->context.lock);
if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
ret = current->mm->context.ldt->entries[seg];
mutex_unlock(&current->mm->context.lock);
#endif
return ret;
}
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
#define SEG_G_BIT(x) ((x).b & (1 << 23))
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)

View File

@ -20,7 +20,6 @@
#include <linux/stddef.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
#include "fpu_system.h"
#include "exception.h"
@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
addr->selector = PM_REG_(segment);
}
descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
descriptor = FPU_get_ldt_descriptor(addr->selector);
base_address = SEG_BASE_ADDR(descriptor);
address = base_address + offset;
limit = base_address

View File

@ -8,7 +8,7 @@ config XEN
select PARAVIRT_CLOCK
select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE)
depends on X86_TSC
depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
@ -17,7 +17,7 @@ config XEN
config XEN_DOM0
def_bool y
depends on XEN && PCI_XEN && SWIOTLB_XEN
depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
depends on X86_IO_APIC && ACPI && PCI
config XEN_PVHVM
def_bool y

View File

@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o platform-pci-unplug.o \
p2m.o
p2m.o apic.o
obj-$(CONFIG_EVENT_TRACING) += trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
obj-$(CONFIG_XEN_DOM0) += apic.o vga.o
obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
obj-$(CONFIG_XEN_EFI) += efi.o

View File

@ -101,17 +101,15 @@ struct dom0_vga_console_info;
#ifdef CONFIG_XEN_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
void __init xen_init_apic(void);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
size_t size)
{
}
static inline void __init xen_init_apic(void)
{
}
#endif
void __init xen_init_apic(void);
#ifdef CONFIG_XEN_EFI
extern void xen_efi_init(void);
#else

View File

@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Description:
* Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
* the device driver based upon the combined capabilities of I/O
* controller and storage device.
* the device driver based upon the capabilities of the I/O
* controller.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a

View File

@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
unsigned int cryptlen = req->cryptlen;
struct page *dstp;
@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
cryptlen += ivsize;
}
if (sg_is_last(assoc))
return -EINVAL;
assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;
assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length;
areq_ctx->trailen = assoc1->length;
areq_ctx->headlen = 8;
areq_ctx->trailen = 4;
areq_ctx->sg = dst;
areq_ctx->complete = authenc_esn_geniv_ahash_done;
@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
struct page *srcp;
u8 *vsrc;
@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
cryptlen += ivsize;
}
if (sg_is_last(assoc))
return -EINVAL;
assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;
assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length;
areq_ctx->trailen = assoc1->length;
areq_ctx->headlen = 8;
areq_ctx->trailen = 4;
areq_ctx->sg = src;
areq_ctx->complete = authenc_esn_verify_ahash_done;

View File

@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <acpi/video.h>
ACPI_MODULE_NAME("video");
@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
static bool backlight_notifier_registered;
static struct notifier_block backlight_nb;
static struct work_struct backlight_notify_work;
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
{ },
};
/* This uses a workqueue to avoid various locking ordering issues */
static void acpi_video_backlight_notify_work(struct work_struct *work)
{
if (acpi_video_get_backlight_type() != acpi_backlight_video)
acpi_video_unregister_backlight();
}
static int acpi_video_backlight_notify(struct notifier_block *nb,
unsigned long val, void *bd)
{
@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
/* A raw bl registering may change video -> native */
if (backlight->props.type == BACKLIGHT_RAW &&
val == BACKLIGHT_REGISTERED &&
acpi_video_get_backlight_type() != acpi_backlight_video)
acpi_video_unregister_backlight();
val == BACKLIGHT_REGISTERED)
schedule_work(&backlight_notify_work);
return NOTIFY_OK;
}
@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
INIT_WORK(&backlight_notify_work,
acpi_video_backlight_notify_work);
backlight_nb.notifier_call = acpi_video_backlight_notify;
backlight_nb.priority = 0;
if (backlight_register_notifier(&backlight_nb) == 0)

View File

@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
{
/* See brcm_sata_readreg() comments */
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
}
#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
brcm_sata_phys_enable(priv);
return ahci_platform_resume(dev);
}
#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),

View File

@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
* RETURNS:
* Block address read from @tf.
*/
u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
if (!dev || tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32;
@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
return 0;
}
static void ata_dev_config_sense_reporting(struct ata_device *dev)
{
unsigned int err_mask;
if (!ata_id_has_sense_reporting(dev->id))
return;
if (ata_id_sense_reporting_enabled(dev->id))
return;
err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
if (err_mask) {
ata_dev_dbg(dev,
"failed to enable Sense Data Reporting, Emask 0x%x\n",
err_mask);
}
}
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
dev->devslp_timing[i] = sata_setting[j];
}
}
ata_dev_config_sense_reporting(dev);
dev->cdb_len = 16;
}

View File

@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
if (ata_id_has_ncq_autosense(dev->id))
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
@ -1629,70 +1627,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
return err_mask;
}
/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
* @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
* @dfl_sense_key: default sense key to use
*
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
* SENSE. This function is EH helper.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* encoded sense data on success, 0 on failure or if sense data
* is not available.
*/
static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
struct scsi_cmnd *cmd)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile tf;
unsigned int err_mask;
if (!cmd)
return 0;
DPRINTK("ATA request sense\n");
ata_dev_warn(dev, "request sense\n");
if (!ata_id_sense_reporting_enabled(dev->id)) {
ata_dev_warn(qc->dev, "sense data reporting disabled\n");
return 0;
}
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
tf.command = ATA_CMD_REQ_SENSE_DATA;
tf.protocol = ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
/*
* ACS-4 states:
* The device may set the SENSE DATA AVAILABLE bit to one in the
* STATUS field and clear the ERROR bit to zero in the STATUS field
* to indicate that the command returned completion without an error
* and the sense data described in table 306 is available.
*
* IOW the 'ATA_SENSE' bit might not be set even though valid
* sense data is available.
* So check for both.
*/
if ((tf.command & ATA_SENSE) ||
tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
tf.lbah, tf.lbam, tf.lbal);
} else {
ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
tf.command, err_mask);
}
return err_mask;
}
/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
if (qc->result_tf.auxiliary) {
char sense_key, asc, ascq;
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
asc = (qc->result_tf.auxiliary >> 8) & 0xff;
ascq = qc->result_tf.auxiliary & 0xff;
ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
sense_key, asc, ascq);
ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
ehc->i.err_mask &= ~AC_ERR_DEV;
}
@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
return ATA_EH_RESET;
}
/*
* Sense data reporting does not work if the
* device fault bit is set.
*/
if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
tmp = ata_eh_request_sense(qc, qc->scsicmd);
if (tmp)
qc->err_mask |= tmp;
else
ata_scsi_set_sense_information(qc->scsicmd, tf);
} else {
ata_dev_warn(qc->dev, "sense data available but port frozen\n");
}
}
/* Set by NCQ autosense or request sense above */
if (qc->flags & ATA_QCFLAG_SENSE_VALID)
return 0;
if (stat & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
else
@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
ATA_SENSE | ATA_ERR)) {
ATA_ERR)) {
if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : "");
}

View File

@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
if (!cmd)
return;
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
}
void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
const struct ata_taskfile *tf)
{
u64 information;
if (!cmd)
return;
information = ata_tf_read_block(tf, NULL);
scsi_set_sense_information(cmd->sense_buffer, information);
}
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc);
} else {
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
cmd->result = SAM_STAT_CHECK_CONDITION;
} else if (!need_sense) {
if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
/* TODO: decide which descriptor format to use

View File

@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);

View File

@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
readl(mmio + PDC_SDRAM_CONTROL);
/* Turn on for ECC */
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0);
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* ECC initiliazation. */
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0);
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
void *buf;
VPRINTK("Start ECC initialization\n");

View File

@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (!blk)
return -ENOMEM;
present = krealloc(rbnode->cache_present,
BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
if (!present) {
kfree(blk);
return -ENOMEM;
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
present = krealloc(rbnode->cache_present,
BITS_TO_LONGS(blklen) * sizeof(*present),
GFP_KERNEL);
if (!present) {
kfree(blk);
return -ENOMEM;
}
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
* sizeof(*present));
} else {
present = rbnode->cache_present;
}
/* insert the register value in the correct place in the rbnode block */

View File

@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
return;
}
if (work_pending(&blkif->persistent_purge_work)) {
pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
if (work_busy(&blkif->persistent_purge_work)) {
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
return;
}

View File

@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
static int blkfront_setup_indirect(struct blkfront_info *info);
static int blkfront_gather_backend_features(struct blkfront_info *info);
static int get_id_from_freelist(struct blkfront_info *info)
{
@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
list_add(&indirect_page->lru, &info->indirect_pages);
if (!info->feature_persistent) {
indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
list_add(&indirect_page->lru, &info->indirect_pages);
}
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
}
@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
info->shadow_free = info->ring.req_prod_pvt;
info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
rc = blkfront_setup_indirect(info);
rc = blkfront_gather_backend_features(info);
if (rc) {
kfree(copy);
return rc;
@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
static int blkfront_setup_indirect(struct blkfront_info *info)
{
unsigned int indirect_segments, segs;
unsigned int segs;
int err, i;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-max-indirect-segments", "%u", &indirect_segments,
NULL);
if (err) {
info->max_indirect_segments = 0;
if (info->max_indirect_segments == 0)
segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
} else {
info->max_indirect_segments = min(indirect_segments,
xen_blkif_max_segments);
else
segs = info->max_indirect_segments;
}
err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
if (err)
@ -1796,6 +1792,68 @@ out_of_memory:
return -ENOMEM;
}
/*
* Gather all backend feature-*
*/
static int blkfront_gather_backend_features(struct blkfront_info *info)
{
int err;
int barrier, flush, discard, persistent;
unsigned int indirect_segments;
info->feature_flush = 0;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-barrier", "%d", &barrier,
NULL);
/*
* If there's no "feature-barrier" defined, then it means
* we're dealing with a very old backend which writes
* synchronously; nothing to do.
*
* If there are barriers, then we use flush.
*/
if (!err && barrier)
info->feature_flush = REQ_FLUSH | REQ_FUA;
/*
* And if there is "feature-flush-cache" use that above
* barriers.
*/
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-flush-cache", "%d", &flush,
NULL);
if (!err && flush)
info->feature_flush = REQ_FLUSH;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-discard", "%d", &discard,
NULL);
if (!err && discard)
blkfront_setup_discard(info);
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-persistent", "%u", &persistent,
NULL);
if (err)
info->feature_persistent = 0;
else
info->feature_persistent = persistent;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-max-indirect-segments", "%u", &indirect_segments,
NULL);
if (err)
info->max_indirect_segments = 0;
else
info->max_indirect_segments = min(indirect_segments,
xen_blkif_max_segments);
return blkfront_setup_indirect(info);
}
/*
* Invoked when the backend is finally 'ready' (and has told produced
* the details about the physical device - #sectors, size, etc).
@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned int physical_sector_size;
unsigned int binfo;
int err;
int barrier, flush, discard, persistent;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
if (err != 1)
physical_sector_size = sector_size;
info->feature_flush = 0;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-barrier", "%d", &barrier,
NULL);
/*
* If there's no "feature-barrier" defined, then it means
* we're dealing with a very old backend which writes
* synchronously; nothing to do.
*
* If there are barriers, then we use flush.
*/
if (!err && barrier)
info->feature_flush = REQ_FLUSH | REQ_FUA;
/*
* And if there is "feature-flush-cache" use that above
* barriers.
*/
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-flush-cache", "%d", &flush,
NULL);
if (!err && flush)
info->feature_flush = REQ_FLUSH;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-discard", "%d", &discard,
NULL);
if (!err && discard)
blkfront_setup_discard(info);
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-persistent", "%u", &persistent,
NULL);
if (err)
info->feature_persistent = 0;
else
info->feature_persistent = persistent;
err = blkfront_setup_indirect(info);
err = blkfront_gather_backend_features(info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);

View File

@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
kfree(meta);
}
static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
{
size_t num_pages;
char pool_name[8];
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
goto out_error;
}
snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
return -EINVAL;
disksize = PAGE_ALIGN(disksize);
meta = zram_meta_alloc(zram->disk->first_minor, disksize);
meta = zram_meta_alloc(zram->disk->disk_name, disksize);
if (!meta)
return -ENOMEM;

View File

@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
div_hp, bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \

View File

@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
if (!ch->cs_enabled)
return;
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
}
@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
if (!ch->cs_enabled)
return;
pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
sh_cmt_start(ch, FLAG_CLOCKSOURCE);
}

View File

@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
BUG_ON(!imxtm->base);
imxtm->type = type;
imxtm->irq = irq;
_mxc_timer_init(imxtm);
}

View File

@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
ret = exynos5250_cpufreq_init(exynos_info);
} else {
pr_err("%s: Unknown SoC type\n", __func__);
return -ENODEV;
ret = -ENODEV;
}
if (ret)
@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
if (exynos_info->set_freq == NULL) {
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
ret = -EINVAL;
goto err_vdd_arm;
}
arm_regulator = regulator_get(NULL, "vdd_arm");
if (IS_ERR(arm_regulator)) {
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
ret = -EINVAL;
goto err_vdd_arm;
}
@ -225,7 +227,7 @@ err_cpufreq_reg:
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
return -EINVAL;
return ret;
}
static struct platform_driver exynos_cpufreq_platdrv = {

View File

@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
int sec4_sg_bytes;
int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);

View File

@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
}
do {
/*
* to_process: the SHA256_BLOCK_SIZE data chunk to process in
* this update. This value is also restricted by the sg list
* limits.
*/
to_process = total - to_process;
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
in_sg = nx_build_sg_list(nx_ctx->in_sg,
in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len,
max_sg_len);
@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
used_sgs = in_sg - nx_ctx->in_sg;
}
/* to_process: SHA256_BLOCK_SIZE aligned chunk to be
* processed in this iteration. This value is restricted
* by sg list limits and number of sgs we already used
* for leftover data. (see above)
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
* but because data may not be aligned, we need to account
* for that too. */
to_process = min_t(u64, total,
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
to_process = (data_len + buf_len);
to_process = data_len + buf_len;
leftover = total - to_process;
/*

View File

@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
}
do {
/*
* to_process: the SHA512_BLOCK_SIZE data chunk to process in
* this update. This value is also restricted by the sg list
* limits.
*/
to_process = total - leftover;
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
leftover = total - to_process;
int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
in_sg = nx_build_sg_list(nx_ctx->in_sg,
in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len, max_sg_len);
@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
used_sgs = in_sg - nx_ctx->in_sg;
}
/* to_process: SHA512_BLOCK_SIZE aligned chunk to be
* processed in this iteration. This value is restricted
* by sg list limits and number of sgs we already used
* for leftover data. (see above)
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
* but because data may not be aligned, we need to account
* for that too. */
to_process = min_t(u64, total,
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
}
to_process = (data_len + buf_len);
to_process = data_len + buf_len;
leftover = total - to_process;
/*

View File

@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
if (IS_ERR(ch))
return NULL;
dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
ch->device->privatecnt++;
return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);

View File

@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
*/
for (row = 0; row < mci->nr_csrows; row++) {
struct csrow_info *csi = &mci->csrows[row];
struct csrow_info *csi = mci->csrows[row];
/*
* Get the configuration settings for this

View File

@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
}
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
MODULE_LICENSE("GPLv2");
MODULE_LICENSE("GPL v2");

View File

@ -37,9 +37,29 @@ config DRM_KMS_FB_HELPER
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
select FB_SYS_FOPS
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
help
FBDEV helpers for KMS drivers.
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
default y
help
Choose this option if you have a need for the legacy fbdev
support. Note that this support also provides the linux console
support on top of your modesetting driver.
If in doubt, say "Y".
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
depends on DRM_KMS_HELPER
@ -79,8 +99,6 @@ config DRM_KMS_CMA_HELPER
source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/bridge/Kconfig"
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@ -110,6 +128,7 @@ config DRM_RADEON
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
help
Choose this option if you have an ATI Radeon graphics card. There
@ -133,6 +152,7 @@ config DRM_AMDGPU
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
help
Choose this option if you have a recent AMD Radeon graphics card.
@ -231,10 +251,14 @@ source "drivers/gpu/drm/virtio/Kconfig"
source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/fsl-dcu/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
source "drivers/gpu/drm/bridge/Kconfig"
source "drivers/gpu/drm/sti/Kconfig"
source "drivers/gpu/drm/amd/amdkfd/Kconfig"

View File

@ -23,7 +23,7 @@ drm-$(CONFIG_OF) += drm_of.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@ -70,3 +70,4 @@ obj-$(CONFIG_DRM_IMX) += imx/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/

View File

@ -3,7 +3,9 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \
-Idrivers/gpu/drm/amd/include
-Idrivers/gpu/drm/amd/include \
-Idrivers/gpu/drm/amd/amdgpu \
-Idrivers/gpu/drm/amd/scheduler
amdgpu-y := amdgpu_drv.o
@ -21,7 +23,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
amdgpu_amdkfd_gfx_v7.o
amdgpu-y += \
vi.o
@ -43,6 +46,7 @@ amdgpu-y += \
amdgpu_dpm.o \
cz_smc.o cz_dpm.o \
tonga_smc.o tonga_dpm.o \
fiji_smc.o fiji_dpm.o \
iceland_smc.o iceland_dpm.o
# add DCE block
@ -74,9 +78,17 @@ amdgpu-y += \
# add amdkfd interfaces
amdgpu-y += \
amdgpu_amdkfd.o \
amdgpu_amdkfd_gfx_v7.o \
amdgpu_amdkfd_gfx_v8.o
# add cgs
amdgpu-y += amdgpu_cgs.o
# GPU scheduler
amdgpu-y += \
../scheduler/gpu_scheduler.o \
../scheduler/sched_fence.o \
amdgpu_sched.o
amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o

View File

@ -42,17 +42,19 @@
#include <ttm/ttm_module.h>
#include <ttm/ttm_execbuf_util.h>
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/amdgpu_drm.h>
#include "amd_shared.h"
#include "amdgpu_family.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
#include "amdgpu_ucode.h"
#include "amdgpu_gds.h"
#include "gpu_scheduler.h"
/*
* Modules parameters.
*/
@ -77,7 +79,11 @@ extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
extern int amdgpu_enable_scheduler;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
@ -178,6 +184,7 @@ struct amdgpu_ring;
struct amdgpu_semaphore;
struct amdgpu_cs_parser;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@ -381,10 +388,10 @@ struct amdgpu_fence_driver {
uint64_t sync_seq[AMDGPU_MAX_RINGS];
atomic64_t last_seq;
bool initialized;
bool delayed_irq;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
struct delayed_work lockup_work;
wait_queue_head_t fence_queue;
};
/* some special values for the owner field */
@ -423,20 +430,18 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
struct amdgpu_fence **fence);
int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
uint64_t seq, struct amdgpu_fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
int amdgpu_fence_wait_any(struct amdgpu_device *adev,
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **fences,
bool intr);
bool intr, long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);
@ -481,7 +486,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
return a->seq < b->seq;
}
int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
void *owner, struct amdgpu_fence **fence);
/*
@ -532,14 +537,16 @@ struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
uint64_t addr;
struct amdgpu_fence *last_pt_update;
struct fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex */
struct list_head mappings;
/* protected by vm mutex and spinlock */
struct list_head vm_status;
/* mappings for this bo_va */
struct list_head invalids;
struct list_head valids;
/* constant after initialization */
struct amdgpu_vm *vm;
struct amdgpu_bo *bo;
@ -697,8 +704,8 @@ struct amdgpu_sync {
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
void amdgpu_sync_fence(struct amdgpu_sync *sync,
struct amdgpu_fence *fence);
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct fence *f);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
@ -821,7 +828,9 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_rbo;
struct fence *fence;
struct fence *excl;
unsigned shared_count;
struct fence **shared;
};
@ -844,6 +853,8 @@ struct amdgpu_ib {
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
uint32_t flags;
/* resulting sequence number */
uint64_t sequence;
};
enum amdgpu_ring_type {
@ -854,11 +865,23 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_VCE
};
extern struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_ib *ibs,
unsigned num_ibs,
int (*free_job)(struct amdgpu_cs_parser *),
void *owner,
struct fence **fence);
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler *scheduler;
spinlock_t fence_lock;
struct mutex *ring_lock;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
@ -892,6 +915,7 @@ struct amdgpu_ring {
struct amdgpu_ctx *current_ctx;
enum amdgpu_ring_type type;
char name[16];
bool is_pte_ring;
};
/*
@ -943,18 +967,22 @@ struct amdgpu_vm {
struct rb_root va;
/* protecting invalidated and freed */
/* protecting invalidated */
spinlock_t status_lock;
/* BOs moved, but not yet updated in the PT */
struct list_head invalidated;
/* BOs freed, but not yet updated in the PT */
/* BOs cleared in the PT because of a move */
struct list_head cleared;
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
struct fence *page_directory_fence;
/* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables;
@ -983,27 +1011,47 @@ struct amdgpu_vm_manager {
* context related structures
*/
struct amdgpu_ctx_state {
uint64_t flags;
uint32_t hangs;
#define AMDGPU_CTX_MAX_CS_PENDING 16
struct amdgpu_ctx_ring {
uint64_t sequence;
struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
struct amd_sched_entity entity;
};
struct amdgpu_ctx {
/* call kref_get()before CS start and kref_put() after CS fence signaled */
struct kref refcount;
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx_state state;
uint32_t id;
unsigned reset_counter;
struct kref refcount;
struct amdgpu_device *adev;
unsigned reset_counter;
spinlock_t ring_lock;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
};
struct amdgpu_ctx_mgr {
struct amdgpu_device *adev;
struct idr ctx_handles;
/* lock for IDR system */
struct mutex lock;
struct amdgpu_device *adev;
struct mutex lock;
/* protected by lock */
struct idr ctx_handles;
};
int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
struct amdgpu_ctx *ctx);
void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence, uint64_t queued_seq);
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
/*
* file private structure
*/
@ -1012,7 +1060,7 @@ struct amdgpu_fpriv {
struct amdgpu_vm vm;
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
struct amdgpu_ctx_mgr ctx_mgr;
};
/*
@ -1029,6 +1077,8 @@ struct amdgpu_bo_list {
struct amdgpu_bo_list_entry *array;
};
struct amdgpu_bo_list *
amdgpu_bo_list_clone(struct amdgpu_bo_list *list);
struct amdgpu_bo_list *
amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
@ -1205,6 +1255,14 @@ struct amdgpu_cs_parser {
/* user fence */
struct amdgpu_user_fence uf;
struct amdgpu_ring *ring;
struct mutex job_lock;
struct work_struct job_work;
int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
int (*run_job)(struct amdgpu_cs_parser *sched_job);
int (*free_job)(struct amdgpu_cs_parser *sched_job);
struct amd_sched_fence *s_fence;
};
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
@ -1849,17 +1907,12 @@ struct amdgpu_atcs {
struct amdgpu_atcs_functions functions;
};
int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv,
uint32_t *id,uint32_t flags);
int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
uint32_t id);
/*
* CGS
*/
void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(void *cgs_device);
void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
/*
* Core structure, functions and helpers.
@ -1883,7 +1936,7 @@ struct amdgpu_device {
struct rw_semaphore exclusive_lock;
/* ASIC */
enum amdgpu_asic_type asic_type;
enum amd_asic_type asic_type;
uint32_t family;
uint32_t rev_id;
uint32_t external_rev_id;
@ -1976,7 +2029,6 @@ struct amdgpu_device {
struct amdgpu_irq_src hpd_irq;
/* rings */
wait_queue_head_t fence_queue;
unsigned fence_context;
struct mutex ring_lock;
unsigned num_rings;
@ -2028,6 +2080,9 @@ struct amdgpu_device {
/* amdkfd interface */
struct kfd_dev *kfd;
/* kernel conext for IB submission */
struct amdgpu_ctx kernel_ctx;
};
bool amdgpu_device_is_px(struct drm_device *dev);
@ -2215,6 +2270,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct amdgpu_ctx *ctx,
struct amdgpu_ib *ibs,
uint32_t num_ibs);
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,
@ -2278,8 +2339,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *head);
struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
struct amdgpu_fence *updates);

View File

@ -21,7 +21,7 @@
*/
#include "amdgpu_amdkfd.h"
#include "amdgpu_family.h"
#include "amd_shared.h"
#include <drm/drmP.h>
#include "amdgpu.h"
#include <linux/module.h>
@ -50,9 +50,11 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
#endif
switch (rdev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
break;
#endif
case CHIP_CARRIZO:
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;

View File

@ -450,7 +450,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
if (temp & CP_HQD_ACTIVE__ACTIVE__SHIFT)
if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
break;
if (timeout == 0) {
pr_err("kfd: cp queue preemption time out (%dms)\n",

View File

@ -897,7 +897,7 @@ bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
if ((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS))
ss->rate /= 100;
if (adev->flags & AMDGPU_IS_APU)
if (adev->flags & AMD_IS_APU)
amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
return true;
}
@ -1058,7 +1058,7 @@ void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
SET_MEMORY_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
if (adev->flags & AMDGPU_IS_APU)
if (adev->flags & AMD_IS_APU)
return;
args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */

View File

@ -42,7 +42,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
if (r)
goto exit_do_move;
r = amdgpu_fence_wait(fence, false);
r = fence_wait(&fence->base, false);
if (r)
goto exit_do_move;
amdgpu_fence_unref(&fence);

View File

@ -48,7 +48,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
resource_size_t vram_base;
resource_size_t size = 256 * 1024; /* ??? */
if (!(adev->flags & AMDGPU_IS_APU))
if (!(adev->flags & AMD_IS_APU))
if (!amdgpu_card_posted(adev))
return false;
@ -184,7 +184,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
bool found = false;
/* ATRM is for the discrete card only */
if (adev->flags & AMDGPU_IS_APU)
if (adev->flags & AMD_IS_APU)
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
@ -246,7 +246,7 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
{
if (adev->flags & AMDGPU_IS_APU)
if (adev->flags & AMD_IS_APU)
return igp_read_bios_from_vram(adev);
else
return amdgpu_asic_read_disabled_bios(adev);

View File

@ -62,6 +62,39 @@ static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
return 0;
}
struct amdgpu_bo_list *
amdgpu_bo_list_clone(struct amdgpu_bo_list *list)
{
struct amdgpu_bo_list *result;
unsigned i;
result = kmalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
if (!result)
return NULL;
result->array = drm_calloc_large(list->num_entries,
sizeof(struct amdgpu_bo_list_entry));
if (!result->array) {
kfree(result);
return NULL;
}
mutex_init(&result->lock);
result->gds_obj = list->gds_obj;
result->gws_obj = list->gws_obj;
result->oa_obj = list->oa_obj;
result->has_userptr = list->has_userptr;
result->num_entries = list->num_entries;
memcpy(result->array, list->array, list->num_entries *
sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < result->num_entries; ++i)
amdgpu_bo_ref(result->array[i].robj);
return result;
}
static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
{
struct amdgpu_bo_list *list;

View File

@ -0,0 +1,838 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <drm/drmP.h>
#include <linux/firmware.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "cgs_linux.h"
#include "atom.h"
#include "amdgpu_ucode.h"
struct amdgpu_cgs_device {
struct cgs_device base;
struct amdgpu_device *adev;
};
#define CGS_FUNC_ADEV \
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
uint64_t *mc_start, uint64_t *mc_size,
uint64_t *mem_size)
{
CGS_FUNC_ADEV;
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
*mc_start = 0;
*mc_size = adev->mc.visible_vram_size;
*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
*mc_start = adev->mc.visible_vram_size;
*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
*mem_size = *mc_size;
break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
*mc_start = adev->mc.gtt_start;
*mc_size = adev->mc.gtt_size;
*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
uint64_t size,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *kmem_handle, uint64_t *mcaddr)
{
CGS_FUNC_ADEV;
int ret;
struct amdgpu_bo *bo;
struct page *kmem_page = vmalloc_to_page(kmem);
int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
if (ret)
return ret;
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
/* pin buffer into GTT */
ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(bo);
*kmem_handle = (cgs_handle_t)bo;
return ret;
}
static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
if (obj) {
int r = amdgpu_bo_reserve(obj, false);
if (likely(r == 0)) {
amdgpu_bo_unpin(obj);
amdgpu_bo_unreserve(obj);
}
amdgpu_bo_unref(&obj);
}
return 0;
}
static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle)
{
CGS_FUNC_ADEV;
uint16_t flags = 0;
int ret = 0;
uint32_t domain = 0;
struct amdgpu_bo *obj;
struct ttm_placement placement;
struct ttm_place place;
if (min_offset > max_offset) {
BUG_ON(1);
return -EINVAL;
}
/* fail if the alignment is not a power of 2 */
if (((align != 1) && (align & (align - 1)))
|| size == 0 || align == 0)
return -EINVAL;
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
place.lpfn =
min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
}
break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
domain = AMDGPU_GEM_DOMAIN_GTT;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
break;
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
domain = AMDGPU_GEM_DOMAIN_GTT;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
TTM_PL_FLAG_UNCACHED;
break;
default:
return -EINVAL;
}
*handle = 0;
placement.placement = &place;
placement.num_placement = 1;
placement.busy_placement = &place;
placement.num_busy_placement = 1;
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
true, domain, flags,
NULL, &placement, &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
}
*handle = (cgs_handle_t)obj;
return ret;
}
static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
cgs_handle_t *handle)
{
CGS_FUNC_ADEV;
int r;
uint32_t dma_handle;
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
struct drm_device *dev = adev->ddev;
struct drm_file *file_priv = NULL, *priv;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(priv, &dev->filelist, lhead) {
rcu_read_lock();
if (priv->pid == get_pid(task_pid(current)))
file_priv = priv;
rcu_read_unlock();
if (file_priv)
break;
}
mutex_unlock(&dev->struct_mutex);
r = dev->driver->prime_fd_to_handle(dev,
file_priv, dmabuf_fd,
&dma_handle);
spin_lock(&file_priv->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&file_priv->object_idr, dma_handle);
if (obj == NULL) {
spin_unlock(&file_priv->table_lock);
return -EINVAL;
}
spin_unlock(&file_priv->table_lock);
bo = gem_to_amdgpu_bo(obj);
*handle = (cgs_handle_t)bo;
return 0;
}
static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
if (obj) {
int r = amdgpu_bo_reserve(obj, false);
if (likely(r == 0)) {
amdgpu_bo_kunmap(obj);
amdgpu_bo_unpin(obj);
amdgpu_bo_unreserve(obj);
}
amdgpu_bo_unref(&obj);
}
return 0;
}
static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
uint64_t *mcaddr)
{
int r;
u64 min_offset, max_offset;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
WARN_ON_ONCE(obj->placement.num_placement > 1);
min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
}
static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_unpin(obj);
amdgpu_bo_unreserve(obj);
return r;
}
static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
void **map)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(obj, map);
amdgpu_bo_unreserve(obj);
return r;
}
static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
amdgpu_bo_kunmap(obj);
amdgpu_bo_unreserve(obj);
return r;
}
static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
{
CGS_FUNC_ADEV;
return RREG32(offset);
}
static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
uint32_t value)
{
CGS_FUNC_ADEV;
WREG32(offset, value);
}
static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
enum cgs_ind_reg space,
unsigned index)
{
CGS_FUNC_ADEV;
switch (space) {
case CGS_IND_REG__MMIO:
return RREG32_IDX(index);
case CGS_IND_REG__PCIE:
return RREG32_PCIE(index);
case CGS_IND_REG__SMC:
return RREG32_SMC(index);
case CGS_IND_REG__UVD_CTX:
return RREG32_UVD_CTX(index);
case CGS_IND_REG__DIDT:
return RREG32_DIDT(index);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
}
WARN(1, "Invalid indirect register space");
return 0;
}
static void amdgpu_cgs_write_ind_register(void *cgs_device,
enum cgs_ind_reg space,
unsigned index, uint32_t value)
{
CGS_FUNC_ADEV;
switch (space) {
case CGS_IND_REG__MMIO:
return WREG32_IDX(index, value);
case CGS_IND_REG__PCIE:
return WREG32_PCIE(index, value);
case CGS_IND_REG__SMC:
return WREG32_SMC(index, value);
case CGS_IND_REG__UVD_CTX:
return WREG32_UVD_CTX(index, value);
case CGS_IND_REG__DIDT:
return WREG32_DIDT(index, value);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
}
WARN(1, "Invalid indirect register space");
}
static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint8_t val;
int ret = pci_read_config_byte(adev->pdev, addr, &val);
if (WARN(ret, "pci_read_config_byte error"))
return 0;
return val;
}
static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint16_t val;
int ret = pci_read_config_word(adev->pdev, addr, &val);
if (WARN(ret, "pci_read_config_word error"))
return 0;
return val;
}
static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
unsigned addr)
{
CGS_FUNC_ADEV;
uint32_t val;
int ret = pci_read_config_dword(adev->pdev, addr, &val);
if (WARN(ret, "pci_read_config_dword error"))
return 0;
return val;
}
static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
uint8_t value)
{
CGS_FUNC_ADEV;
int ret = pci_write_config_byte(adev->pdev, addr, value);
WARN(ret, "pci_write_config_byte error");
}
static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
uint16_t value)
{
CGS_FUNC_ADEV;
int ret = pci_write_config_word(adev->pdev, addr, value);
WARN(ret, "pci_write_config_word error");
}
static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
uint32_t value)
{
CGS_FUNC_ADEV;
int ret = pci_write_config_dword(adev->pdev, addr, value);
WARN(ret, "pci_write_config_dword error");
}
static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
unsigned table, uint16_t *size,
uint8_t *frev, uint8_t *crev)
{
CGS_FUNC_ADEV;
uint16_t data_start;
if (amdgpu_atom_parse_data_header(
adev->mode_info.atom_context, table, size,
frev, crev, &data_start))
return (uint8_t*)adev->mode_info.atom_context->bios +
data_start;
return NULL;
}
static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
uint8_t *frev, uint8_t *crev)
{
CGS_FUNC_ADEV;
if (amdgpu_atom_parse_cmd_header(
adev->mode_info.atom_context, table,
frev, crev))
return 0;
return -EINVAL;
}
static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
void *args)
{
CGS_FUNC_ADEV;
return amdgpu_atom_execute_table(
adev->mode_info.atom_context, table, args);
}
static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
int active)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
enum cgs_clock clock, unsigned freq)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
enum cgs_engine engine, int powered)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
enum cgs_clock clock,
struct cgs_clock_limits *limits)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
const uint32_t *voltages)
{
DRM_ERROR("not implemented");
return -EPERM;
}
struct cgs_irq_params {
unsigned src_id;
cgs_irq_source_set_func_t set;
cgs_irq_handler_func_t handler;
void *private_data;
};
static int cgs_set_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
struct cgs_irq_params *irq_params =
(struct cgs_irq_params *)src->data;
if (!irq_params)
return -EINVAL;
if (!irq_params->set)
return -EINVAL;
return irq_params->set(irq_params->private_data,
irq_params->src_id,
type,
(int)state);
}
static int cgs_process_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct cgs_irq_params *irq_params =
(struct cgs_irq_params *)source->data;
if (!irq_params)
return -EINVAL;
if (!irq_params->handler)
return -EINVAL;
return irq_params->handler(irq_params->private_data,
irq_params->src_id,
entry->iv_entry);
}
static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
.set = cgs_set_irq_state,
.process = cgs_process_irq,
};
static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
unsigned num_types,
cgs_irq_source_set_func_t set,
cgs_irq_handler_func_t handler,
void *private_data)
{
CGS_FUNC_ADEV;
int ret = 0;
struct cgs_irq_params *irq_params;
struct amdgpu_irq_src *source =
kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
if (!source)
return -ENOMEM;
irq_params =
kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
if (!irq_params) {
kfree(source);
return -ENOMEM;
}
source->num_types = num_types;
source->funcs = &cgs_irq_funcs;
irq_params->src_id = src_id;
irq_params->set = set;
irq_params->handler = handler;
irq_params->private_data = private_data;
source->data = (void *)irq_params;
ret = amdgpu_irq_add_id(adev, src_id, source);
if (ret) {
kfree(irq_params);
kfree(source);
}
return ret;
}
static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
}
static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
}
int amdgpu_cgs_set_clockgating_state(void *cgs_device,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
{
CGS_FUNC_ADEV;
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_clockgating_state(
(void *)adev,
state);
break;
}
}
return r;
}
int amdgpu_cgs_set_powergating_state(void *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state)
{
CGS_FUNC_ADEV;
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_powergating_state(
(void *)adev,
state);
break;
}
}
return r;
}
static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
{
CGS_FUNC_ADEV;
enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
switch (fw_type) {
case CGS_UCODE_ID_SDMA0:
result = AMDGPU_UCODE_ID_SDMA0;
break;
case CGS_UCODE_ID_SDMA1:
result = AMDGPU_UCODE_ID_SDMA1;
break;
case CGS_UCODE_ID_CP_CE:
result = AMDGPU_UCODE_ID_CP_CE;
break;
case CGS_UCODE_ID_CP_PFP:
result = AMDGPU_UCODE_ID_CP_PFP;
break;
case CGS_UCODE_ID_CP_ME:
result = AMDGPU_UCODE_ID_CP_ME;
break;
case CGS_UCODE_ID_CP_MEC:
case CGS_UCODE_ID_CP_MEC_JT1:
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
if (adev->asic_type == CHIP_TONGA)
result = AMDGPU_UCODE_ID_CP_MEC2;
else if (adev->asic_type == CHIP_CARRIZO)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_RLC_G:
result = AMDGPU_UCODE_ID_RLC_G;
break;
default:
DRM_ERROR("Firmware type not supported\n");
}
return result;
}
static int amdgpu_cgs_get_firmware_info(void *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
{
CGS_FUNC_ADEV;
if (CGS_UCODE_ID_SMU != type) {
uint64_t gpu_addr;
uint32_t data_size;
const struct gfx_firmware_header_v1_0 *header;
enum AMDGPU_UCODE_ID id;
struct amdgpu_firmware_info *ucode;
id = fw_type_convert(cgs_device, type);
ucode = &adev->firmware.ucode[id];
if (ucode->fw == NULL)
return -EINVAL;
gpu_addr = ucode->mc_addr;
header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
data_size = le32_to_cpu(header->header.ucode_size_bytes);
if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
(type == CGS_UCODE_ID_CP_MEC_JT2)) {
gpu_addr += le32_to_cpu(header->jt_offset) << 2;
data_size = le32_to_cpu(header->jt_size) << 2;
}
info->mc_addr = gpu_addr;
info->image_size = data_size;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
char fw_name[30] = {0};
int err = 0;
uint32_t ucode_size;
uint32_t ucode_start_address;
const uint8_t *src;
const struct smc_firmware_header_v1_0 *hdr;
switch (adev->asic_type) {
case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
}
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err) {
DRM_ERROR("Failed to request firmware\n");
return err;
}
err = amdgpu_ucode_validate(adev->pm.fw);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return err;
}
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
src = (const uint8_t *)(adev->pm.fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
info->version = adev->pm.fw_version;
info->image_size = ucode_size;
info->kptr = (void *)src;
}
return 0;
}
static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_gpu_mem_info,
amdgpu_cgs_gmap_kmem,
amdgpu_cgs_gunmap_kmem,
amdgpu_cgs_alloc_gpu_mem,
amdgpu_cgs_free_gpu_mem,
amdgpu_cgs_gmap_gpu_mem,
amdgpu_cgs_gunmap_gpu_mem,
amdgpu_cgs_kmap_gpu_mem,
amdgpu_cgs_kunmap_gpu_mem,
amdgpu_cgs_read_register,
amdgpu_cgs_write_register,
amdgpu_cgs_read_ind_register,
amdgpu_cgs_write_ind_register,
amdgpu_cgs_read_pci_config_byte,
amdgpu_cgs_read_pci_config_word,
amdgpu_cgs_read_pci_config_dword,
amdgpu_cgs_write_pci_config_byte,
amdgpu_cgs_write_pci_config_word,
amdgpu_cgs_write_pci_config_dword,
amdgpu_cgs_atom_get_data_table,
amdgpu_cgs_atom_get_cmd_table_revs,
amdgpu_cgs_atom_exec_cmd_table,
amdgpu_cgs_create_pm_request,
amdgpu_cgs_destroy_pm_request,
amdgpu_cgs_set_pm_request,
amdgpu_cgs_pm_request_clock,
amdgpu_cgs_pm_request_engine,
amdgpu_cgs_pm_query_clock_limits,
amdgpu_cgs_set_camera_voltages,
amdgpu_cgs_get_firmware_info,
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
amdgpu_cgs_import_gpu_mem,
amdgpu_cgs_add_irq_source,
amdgpu_cgs_irq_get,
amdgpu_cgs_irq_put
};
void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
{
struct amdgpu_cgs_device *cgs_device =
kmalloc(sizeof(*cgs_device), GFP_KERNEL);
if (!cgs_device) {
DRM_ERROR("Couldn't allocate CGS device structure\n");
return NULL;
}
cgs_device->base.ops = &amdgpu_cgs_ops;
cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
cgs_device->adev = adev;
return cgs_device;
}
void amdgpu_cgs_destroy_device(void *cgs_device)
{
kfree(cgs_device);
}

View File

@ -126,12 +126,54 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0;
}
static void amdgpu_job_work_func(struct work_struct *work)
{
struct amdgpu_cs_parser *sched_job =
container_of(work, struct amdgpu_cs_parser,
job_work);
mutex_lock(&sched_job->job_lock);
if (sched_job->free_job)
sched_job->free_job(sched_job);
mutex_unlock(&sched_job->job_lock);
/* after processing job, free memory */
fence_put(&sched_job->s_fence->base);
kfree(sched_job);
}
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct amdgpu_ctx *ctx,
struct amdgpu_ib *ibs,
uint32_t num_ibs)
{
struct amdgpu_cs_parser *parser;
int i;
parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
if (!parser)
return NULL;
parser->adev = adev;
parser->filp = filp;
parser->ctx = ctx;
parser->ibs = ibs;
parser->num_ibs = num_ibs;
if (amdgpu_enable_scheduler) {
mutex_init(&parser->job_lock);
INIT_WORK(&parser->job_work, amdgpu_job_work_func);
}
for (i = 0; i < num_ibs; i++)
ibs[i].ctx = ctx;
return parser;
}
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array = NULL;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list *bo_list = NULL;
unsigned size, i;
int r = 0;
@ -143,17 +185,30 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
r = -EINVAL;
goto out;
}
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (!amdgpu_enable_scheduler)
p->bo_list = bo_list;
else {
if (bo_list && !bo_list->has_userptr) {
p->bo_list = amdgpu_bo_list_clone(bo_list);
amdgpu_bo_list_put(bo_list);
if (!p->bo_list)
return -ENOMEM;
} else if (bo_list && bo_list->has_userptr)
p->bo_list = bo_list;
else
p->bo_list = NULL;
}
/* get chunks */
INIT_LIST_HEAD(&p->validated);
chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (chunk_array == NULL) {
r = -ENOMEM;
goto out;
}
chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks);
chunk_array_user = (uint64_t __user *)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
r = -EFAULT;
@ -161,7 +216,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
p->nchunks = cs->in.num_chunks;
p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk),
p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
if (p->chunks == NULL) {
r = -ENOMEM;
@ -173,7 +228,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
chunk_ptr = (void __user *)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
r = -EFAULT;
@ -183,7 +238,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
cdata = (void __user *)user_chunk.chunk_data;
p->chunks[i].user_ptr = cdata;
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
@ -235,11 +290,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
}
p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!p->ibs) {
p->ibs = kmalloc_array(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!p->ibs)
r = -ENOMEM;
goto out;
}
out:
kfree(chunk_array);
@ -415,18 +469,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
}
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
{
unsigned i;
if (!error) {
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
@ -447,11 +491,19 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
}
static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
{
unsigned i;
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list);
if (parser->bo_list) {
if (amdgpu_enable_scheduler && !parser->bo_list->has_userptr)
amdgpu_bo_list_free(parser->bo_list);
else
amdgpu_bo_list_put(parser->bo_list);
}
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
@ -462,6 +514,29 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
kfree(parser->ibs);
if (parser->uf.bo)
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
if (!amdgpu_enable_scheduler)
kfree(parser);
}
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
{
amdgpu_cs_parser_fini_early(parser, error, backoff);
amdgpu_cs_parser_fini_late(parser);
}
static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
{
amdgpu_cs_parser_fini_late(sched_job);
return 0;
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@ -476,12 +551,18 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r)
return r;
r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
if (r)
return r;
r = amdgpu_vm_clear_freed(adev, vm);
if (r)
return r;
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
struct fence *f;
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
if (!bo)
@ -495,7 +576,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r)
return r;
amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
f = bo_va->last_pt_update;
r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
if (r)
return r;
}
}
@ -529,9 +613,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
goto out;
}
amdgpu_cs_sync_rings(parser);
r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
parser->filp);
if (!amdgpu_enable_scheduler)
r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
parser->filp);
out:
mutex_unlock(&vm->mutex);
@ -650,7 +734,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ib->oa_size = amdgpu_bo_size(oa);
}
}
/* wrap the last IB with user fence */
if (parser->uf.bo) {
struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
@ -693,9 +776,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
sizeof(struct drm_amdgpu_cs_chunk_dep);
for (j = 0; j < num_deps; ++j) {
struct amdgpu_fence *fence;
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
struct fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@ -707,50 +790,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
if (ctx == NULL)
return -EINVAL;
r = amdgpu_fence_recreate(ring, p->filp,
deps[j].handle,
&fence);
if (r) {
fence = amdgpu_ctx_get_fence(ctx, ring,
deps[j].handle);
if (IS_ERR(fence)) {
r = PTR_ERR(fence);
amdgpu_ctx_put(ctx);
return r;
}
amdgpu_sync_fence(&ib->sync, fence);
amdgpu_fence_unref(&fence);
amdgpu_ctx_put(ctx);
} else if (fence) {
r = amdgpu_sync_fence(adev, &ib->sync, fence);
fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
return r;
}
}
}
return 0;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job)
{
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser;
int r, i;
struct amdgpu_cs_parser *parser = sched_job;
struct amdgpu_device *adev = sched_job->adev;
bool reserved_buffers = false;
down_read(&adev->exclusive_lock);
if (!adev->accel_working) {
up_read(&adev->exclusive_lock);
return -EBUSY;
}
/* initialize parser */
memset(&parser, 0, sizeof(struct amdgpu_cs_parser));
parser.filp = filp;
parser.adev = adev;
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
amdgpu_cs_parser_fini(&parser, r, false);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
r = amdgpu_cs_parser_relocs(&parser);
r = amdgpu_cs_parser_relocs(parser);
if (r) {
if (r != -ERESTARTSYS) {
if (r == -ENOMEM)
@ -762,30 +829,114 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!r) {
reserved_buffers = true;
r = amdgpu_cs_ib_fill(adev, &parser);
r = amdgpu_cs_ib_fill(adev, parser);
}
if (!r) {
r = amdgpu_cs_dependencies(adev, parser);
if (r)
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
}
if (r) {
amdgpu_cs_parser_fini(parser, r, reserved_buffers);
return r;
}
if (!r)
r = amdgpu_cs_dependencies(adev, &parser);
for (i = 0; i < parser->num_ibs; i++)
trace_amdgpu_cs(parser, i);
r = amdgpu_cs_ib_vm_chunk(adev, parser);
return r;
}
static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
struct amdgpu_device *adev,
struct amdgpu_cs_parser *parser)
{
int i, r;
struct amdgpu_cs_chunk *chunk;
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
struct amdgpu_ring *ring;
for (i = 0; i < parser->nchunks; i++) {
chunk = &parser->chunks[i];
chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
continue;
r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
chunk_ib->ip_instance, chunk_ib->ring,
&ring);
if (r)
return NULL;
break;
}
return ring;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser *parser;
int r;
down_read(&adev->exclusive_lock);
if (!adev->accel_working) {
up_read(&adev->exclusive_lock);
return -EBUSY;
}
parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
if (!parser)
return -ENOMEM;
r = amdgpu_cs_parser_init(parser, data);
if (r) {
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
DRM_ERROR("Failed to initialize parser !\n");
amdgpu_cs_parser_fini(parser, r, false);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
for (i = 0; i < parser.num_ibs; i++)
trace_amdgpu_cs(&parser, i);
if (amdgpu_enable_scheduler && parser->num_ibs) {
struct amdgpu_ring * ring =
amdgpu_cs_parser_get_ring(adev, parser);
r = amdgpu_cs_parser_prepare_job(parser);
if (r)
goto out;
parser->ring = ring;
parser->free_job = amdgpu_cs_parser_free_job;
mutex_lock(&parser->job_lock);
r = amd_sched_push_job(ring->scheduler,
&parser->ctx->rings[ring->idx].entity,
parser,
&parser->s_fence);
if (r) {
mutex_unlock(&parser->job_lock);
goto out;
}
parser->ibs[parser->num_ibs - 1].sequence =
amdgpu_ctx_add_fence(parser->ctx, ring,
&parser->s_fence->base,
parser->s_fence->v_seq);
cs->out.handle = parser->s_fence->v_seq;
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
&parser->s_fence->base);
r = amdgpu_cs_ib_vm_chunk(adev, &parser);
if (r) {
goto out;
mutex_unlock(&parser->job_lock);
up_read(&adev->exclusive_lock);
return 0;
}
r = amdgpu_cs_parser_prepare_job(parser);
if (r)
goto out;
cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
out:
amdgpu_cs_parser_fini(&parser, r, true);
amdgpu_cs_parser_fini(parser, r, true);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
@ -806,30 +957,29 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_fence *fence = NULL;
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
struct fence *fence;
long r;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
wait->in.ring, &ring);
if (r)
return r;
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
wait->in.ring, &ring);
if (r) {
amdgpu_ctx_put(ctx);
return r;
}
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) {
r = fence_wait_timeout(fence, true, timeout);
fence_put(fence);
} else
r = 1;
r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
if (r) {
amdgpu_ctx_put(ctx);
return r;
}
r = fence_wait_timeout(&fence->base, true, timeout);
amdgpu_fence_unref(&fence);
amdgpu_ctx_put(ctx);
if (r < 0)
return r;
@ -864,7 +1014,16 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!reloc->bo_va)
continue;
list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
if (mapping->it.start > addr ||
addr > mapping->it.last)
continue;
*bo = reloc->bo_va->bo;
return mapping;
}
list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
if (mapping->it.start > addr ||
addr > mapping->it.last)
continue;

View File

@ -25,54 +25,107 @@
#include <drm/drmP.h>
#include "amdgpu.h"
static void amdgpu_ctx_do_release(struct kref *ref)
int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
struct amdgpu_ctx *ctx)
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
unsigned i, j;
int r;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
mgr = &ctx->fpriv->ctx_mgr;
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
ctx->rings[i].sequence = 1;
idr_remove(&mgr->ctx_handles, ctx->id);
kfree(ctx);
if (amdgpu_enable_scheduler) {
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amd_sched_rq *rq;
if (kernel)
rq = &adev->rings[i]->scheduler->kernel_rq;
else
rq = &adev->rings[i]->scheduler->sched_rq;
r = amd_sched_entity_init(adev->rings[i]->scheduler,
&ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
break;
}
if (i < adev->num_rings) {
for (j = 0; j < i; j++)
amd_sched_entity_fini(adev->rings[j]->scheduler,
&ctx->rings[j].entity);
kfree(ctx);
return r;
}
}
return 0;
}
int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
{
struct amdgpu_device *adev = ctx->adev;
unsigned i, j;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
fence_put(ctx->rings[i].fences[j]);
if (amdgpu_enable_scheduler) {
for (i = 0; i < adev->num_rings; i++)
amd_sched_entity_fini(adev->rings[i]->scheduler,
&ctx->rings[i].entity);
}
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
uint32_t *id)
{
int r;
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
struct amdgpu_ctx *ctx;
int r;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mutex_lock(&mgr->lock);
r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
return r;
}
*id = (uint32_t)r;
memset(ctx, 0, sizeof(*ctx));
ctx->id = *id;
ctx->fpriv = fpriv;
kref_init(&ctx->refcount);
r = amdgpu_ctx_init(adev, false, ctx);
mutex_unlock(&mgr->lock);
return 0;
return r;
}
int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
amdgpu_ctx_fini(ctx);
kfree(ctx);
}
static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
struct amdgpu_ctx *ctx;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (ctx) {
idr_remove(&mgr->ctx_handles, id);
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
mutex_unlock(&mgr->lock);
return 0;
@ -86,9 +139,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
union drm_amdgpu_ctx_out *out)
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
struct amdgpu_ctx_mgr *mgr;
unsigned reset_counter;
if (!fpriv)
return -EINVAL;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (!ctx) {
@ -97,8 +154,8 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
}
/* TODO: these two are always zero */
out->state.flags = ctx->state.flags;
out->state.hangs = ctx->state.hangs;
out->state.flags = 0x0;
out->state.hangs = 0x0;
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read(&adev->gpu_reset_counter);
@ -113,28 +170,11 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
return 0;
}
void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
{
struct idr *idp;
struct amdgpu_ctx *ctx;
uint32_t id;
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
idp = &mgr->ctx_handles;
idr_for_each_entry(idp,ctx,id) {
if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
}
mutex_destroy(&mgr->lock);
}
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
int r;
uint32_t id;
uint32_t flags;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@ -142,15 +182,14 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
r = 0;
id = args->in.ctx_id;
flags = args->in.flags;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
r = amdgpu_ctx_alloc(adev, fpriv, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
r = amdgpu_ctx_free(adev, fpriv, id);
r = amdgpu_ctx_free(fpriv, id);
break;
case AMDGPU_CTX_OP_QUERY_STATE:
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
@ -165,7 +204,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
struct amdgpu_ctx_mgr *mgr;
if (!fpriv)
return NULL;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
@ -177,17 +221,96 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
{
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx_mgr *mgr;
if (ctx == NULL)
return -EINVAL;
fpriv = ctx->fpriv;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
mutex_unlock(&mgr->lock);
return 0;
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence, uint64_t queued_seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = 0;
unsigned idx = 0;
struct fence *other = NULL;
if (amdgpu_enable_scheduler)
seq = queued_seq;
else
seq = cring->sequence;
idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
other = cring->fences[idx];
if (other) {
signed long r;
r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
fence_get(fence);
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
if (!amdgpu_enable_scheduler)
cring->sequence++;
spin_unlock(&ctx->ring_lock);
fence_put(other);
return seq;
}
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
struct fence *fence;
uint64_t queued_seq;
spin_lock(&ctx->ring_lock);
if (amdgpu_enable_scheduler)
queued_seq = amd_sched_next_queued_seq(&cring->entity);
else
queued_seq = cring->sequence;
if (seq >= queued_seq) {
spin_unlock(&ctx->ring_lock);
return ERR_PTR(-EINVAL);
}
if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) {
spin_unlock(&ctx->ring_lock);
return NULL;
}
fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
spin_unlock(&ctx->ring_lock);
return fence;
}
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);
idr_init(&mgr->ctx_handles);
}
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id;
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id) {
if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
DRM_ERROR("ctx %p is still alive\n", ctx);
}
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}

View File

@ -55,6 +55,7 @@ static const char *amdgpu_asic_name[] = {
"MULLINS",
"TOPAZ",
"TONGA",
"FIJI",
"CARRIZO",
"LAST",
};
@ -63,7 +64,7 @@ bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
if (adev->flags & AMDGPU_IS_PX)
if (adev->flags & AMD_IS_PX)
return true;
return false;
}
@ -1160,6 +1161,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_CARRIZO:
if (adev->asic_type == CHIP_CARRIZO)
adev->family = AMDGPU_FAMILY_CZ;
@ -1377,7 +1379,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ddev = ddev;
adev->pdev = pdev;
adev->flags = flags;
adev->asic_type = flags & AMDGPU_ASIC_MASK;
adev->asic_type = flags & AMD_ASIC_MASK;
adev->is_atom_bios = false;
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
adev->mc.gtt_size = 512 * 1024 * 1024;
@ -1523,6 +1525,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
if (r) {
dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
return r;
}
r = amdgpu_ib_ring_tests(adev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
@ -1584,6 +1591,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->shutdown = true;
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ctx_fini(&adev->kernel_ctx);
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
@ -1627,8 +1635,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
struct amdgpu_device *adev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
bool force_completion = false;
int r;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@ -1667,21 +1674,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
/* wait for gpu to finish processing current batch */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
r = amdgpu_fence_wait_empty(ring);
if (r) {
/* delay GPU reset to resume */
force_completion = true;
}
}
if (force_completion) {
amdgpu_fence_driver_force_completion(adev);
}
amdgpu_fence_driver_suspend(adev);
r = amdgpu_suspend(adev);
@ -1739,6 +1732,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_resume(adev);
amdgpu_fence_driver_resume(adev);
r = amdgpu_ib_ring_tests(adev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);

View File

@ -35,6 +35,36 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
struct fence **f)
{
struct amdgpu_fence *fence;
long r;
if (*f == NULL)
return;
fence = to_amdgpu_fence(*f);
if (fence) {
r = fence_wait(&fence->base, false);
if (r == -EDEADLK) {
up_read(&adev->exclusive_lock);
r = amdgpu_gpu_reset(adev);
down_read(&adev->exclusive_lock);
}
} else
r = fence_wait(*f, false);
if (r)
DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);
/* We continue with the page flip even if we failed to wait on
* the fence, otherwise the DRM core and userspace will be
* confused about which BO the CRTC is scanning out
*/
fence_put(*f);
*f = NULL;
}
static void amdgpu_flip_work_func(struct work_struct *__work)
{
@ -44,34 +74,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base;
struct amdgpu_fence *fence;
unsigned long flags;
int r;
unsigned i;
down_read(&adev->exclusive_lock);
if (work->fence) {
fence = to_amdgpu_fence(work->fence);
if (fence) {
r = amdgpu_fence_wait(fence, false);
if (r == -EDEADLK) {
up_read(&adev->exclusive_lock);
r = amdgpu_gpu_reset(adev);
down_read(&adev->exclusive_lock);
}
} else
r = fence_wait(work->fence, false);
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
/* We continue with the page flip even if we failed to wait on
* the fence, otherwise the DRM core and userspace will be
* confused about which BO the CRTC is scanning out
*/
fence_put(work->fence);
work->fence = NULL;
}
amdgpu_flip_wait_fence(adev, &work->excl);
for (i = 0; i < work->shared_count; ++i)
amdgpu_flip_wait_fence(adev, &work->shared[i]);
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@ -108,6 +117,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
DRM_ERROR("failed to reserve buffer after flip\n");
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
kfree(work->shared);
kfree(work);
}
@ -127,7 +137,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags;
u64 tiling_flags;
u64 base;
int r;
int i, r;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@ -167,7 +177,19 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup;
}
work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(new_rbo);
DRM_ERROR("failed to get fences for buffer\n");
goto cleanup;
}
fence_get(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_get(work->shared[i]);
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
amdgpu_bo_unreserve(new_rbo);
@ -212,7 +234,10 @@ pflip_cleanup:
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
fence_put(work->fence);
fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
return r;

View File

@ -63,7 +63,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
int amdgpu_lockup_timeout = 10000;
int amdgpu_lockup_timeout = 0;
int amdgpu_dpm = -1;
int amdgpu_smc_load_fw = 1;
int amdgpu_aspm = -1;
@ -75,6 +75,9 @@ int amdgpu_deep_color = 0;
int amdgpu_vm_size = 8;
int amdgpu_vm_block_size = -1;
int amdgpu_exp_hw_support = 0;
int amdgpu_enable_scheduler = 0;
int amdgpu_sched_jobs = 16;
int amdgpu_sched_hw_submission = 2;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@ -103,7 +106,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@ -139,36 +142,45 @@ module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
static struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
/* Bonaire */
{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
@ -190,39 +202,39 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
{0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
/* Kabini */
{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
/* mullins */
{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
#endif
/* topaz */
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@ -240,12 +252,14 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
{0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
/* fiji */
{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
/* carrizo */
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0, 0, 0}
};
@ -281,7 +295,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
unsigned long flags = ent->driver_data;
int ret;
if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
DRM_INFO("This hardware requires experimental hardware support.\n"
"See modparam exp_hw_support\n");
return -ENODEV;

View File

@ -31,7 +31,7 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include "amdgpu_family.h"
#include "amd_shared.h"
/* General customization:
*/

View File

@ -53,9 +53,9 @@ static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@ -179,7 +179,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *rbo = NULL;
struct device *device = &adev->pdev->dev;
int ret;
unsigned long tmp;
@ -201,9 +200,9 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
rbo = gem_to_amdgpu_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
if (info == NULL) {
ret = -ENOMEM;
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out_unref;
}
@ -212,14 +211,13 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out_unref;
goto out_destroy_fbi;
}
fb = &rfbdev->rfb.base;
/* setup helper */
rfbdev->helper.fb = fb;
rfbdev->helper.fbdev = info;
memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
@ -239,11 +237,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out_unref;
}
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = adev->mc.aper_size;
@ -251,13 +244,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
}
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unref;
goto out_destroy_fbi;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
@ -269,6 +256,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
return 0;
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unref:
if (rbo) {
@ -290,17 +279,10 @@ void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{
struct fb_info *info;
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
if (rfbdev->helper.fbdev) {
info = rfbdev->helper.fbdev;
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
drm_fb_helper_unregister_fbi(&rfbdev->helper);
drm_fb_helper_release_fbi(&rfbdev->helper);
if (rfb->obj) {
amdgpufb_destroy_pinned_object(rfb->obj);
@ -395,7 +377,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
{
if (adev->mode_info.rfbdev)
fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state);
drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
state);
}
int amdgpu_fbdev_total_size(struct amdgpu_device *adev)

View File

@ -126,7 +126,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
(*fence)->ring = ring;
(*fence)->owner = owner;
fence_init(&(*fence)->base, &amdgpu_fence_ops,
&adev->fence_queue.lock, adev->fence_context + ring->idx,
&ring->fence_drv.fence_queue.lock,
adev->fence_context + ring->idx,
(*fence)->seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
(*fence)->seq,
@ -135,38 +136,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
return 0;
}
/**
* amdgpu_fence_recreate - recreate a fence from an user fence
*
* @ring: ring the fence is associated with
* @owner: creator of the fence
* @seq: user fence sequence number
* @fence: resulting amdgpu fence object
*
* Recreates a fence command from the user fence sequence number (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
uint64_t seq, struct amdgpu_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
if (seq > ring->fence_drv.sync_seq[ring->idx])
return -EINVAL;
*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
if ((*fence) == NULL)
return -ENOMEM;
(*fence)->seq = seq;
(*fence)->ring = ring;
(*fence)->owner = owner;
fence_init(&(*fence)->base, &amdgpu_fence_ops,
&adev->fence_queue.lock, adev->fence_context + ring->idx,
(*fence)->seq);
return 0;
}
/**
* amdgpu_fence_check_signaled - callback from fence_queue
*
@ -196,9 +165,7 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
else
FENCE_TRACE(&fence->base, "was already signaled\n");
amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
fence->ring->fence_drv.irq_type);
__remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
fence_put(&fence->base);
} else
FENCE_TRACE(&fence->base, "pending\n");
@ -299,14 +266,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
return;
}
if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
fence_drv->delayed_irq = false;
amdgpu_irq_update(ring->adev, fence_drv->irq_src,
fence_drv->irq_type);
if (amdgpu_fence_activity(ring)) {
wake_up_all(&ring->fence_drv.fence_queue);
}
if (amdgpu_fence_activity(ring))
wake_up_all(&ring->adev->fence_queue);
else if (amdgpu_ring_is_lockup(ring)) {
/* good news we believe it's a lockup */
dev_warn(ring->adev->dev, "GPU lockup (current fence id "
@ -316,7 +278,7 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
/* remember that we need an reset */
ring->adev->needs_reset = true;
wake_up_all(&ring->adev->fence_queue);
wake_up_all(&ring->fence_drv.fence_queue);
}
up_read(&ring->adev->exclusive_lock);
}
@ -332,62 +294,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
*/
void amdgpu_fence_process(struct amdgpu_ring *ring)
{
uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0;
bool wake = false;
/* Note there is a scenario here for an infinite loop but it's
* very unlikely to happen. For it to happen, the current polling
* process need to be interrupted by another process and another
* process needs to update the last_seq btw the atomic read and
* xchg of the current process.
*
* More over for this to go in infinite loop there need to be
* continuously new fence signaled ie amdgpu_fence_read needs
* to return a different value each time for both the currently
* polling process and the other process that xchg the last_seq
* btw atomic read and xchg of the current process. And the
* value the other process set as last seq must be higher than
* the seq value we just read. Which means that current process
* need to be interrupted after amdgpu_fence_read and before
* atomic xchg.
*
* To be even more safe we count the number of time we loop and
* we bail after 10 loop just accepting the fact that we might
* have temporarly set the last_seq not to the true real last
* seq but to an older one.
*/
last_seq = atomic64_read(&ring->fence_drv.last_seq);
do {
last_emitted = ring->fence_drv.sync_seq[ring->idx];
seq = amdgpu_fence_read(ring);
seq |= last_seq & 0xffffffff00000000LL;
if (seq < last_seq) {
seq &= 0xffffffff;
seq |= last_emitted & 0xffffffff00000000LL;
}
if (seq <= last_seq || seq > last_emitted) {
break;
}
/* If we loop over we don't want to return without
* checking if a fence is signaled as it means that the
* seq we just read is different from the previous on.
*/
wake = true;
last_seq = seq;
if ((count_loop++) > 10) {
/* We looped over too many time leave with the
* fact that we might have set an older fence
* seq then the current real last seq as signaled
* by the hw.
*/
break;
}
} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
if (wake)
wake_up_all(&ring->adev->fence_queue);
if (amdgpu_fence_activity(ring))
wake_up_all(&ring->fence_drv.fence_queue);
}
/**
@ -447,284 +355,49 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
struct amdgpu_device *adev = ring->adev;
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
return false;
if (down_read_trylock(&adev->exclusive_lock)) {
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
if (amdgpu_fence_activity(ring))
wake_up_all_locked(&adev->fence_queue);
/* did fence get signaled after we enabled the sw irq? */
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
up_read(&adev->exclusive_lock);
return false;
}
up_read(&adev->exclusive_lock);
} else {
/* we're probably in a lockup, lets not fiddle too much */
if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type))
ring->fence_drv.delayed_irq = true;
amdgpu_fence_schedule_check(ring);
}
fence->fence_wake.flags = 0;
fence->fence_wake.private = NULL;
fence->fence_wake.func = amdgpu_fence_check_signaled;
__add_wait_queue(&adev->fence_queue, &fence->fence_wake);
__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
fence_get(f);
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
/**
* amdgpu_fence_signaled - check if a fence has signaled
/*
* amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
* @ring: ring to wait on for the seq number
* @seq: seq number wait for
*
* @fence: amdgpu fence object
*
* Check if the requested fence has signaled (all asics).
* Returns true if the fence has signaled or false if it has not.
* return value:
* 0: seq signaled, and gpu not hang
* -EDEADL: GPU hang detected
* -EINVAL: some paramter is not valid
*/
bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
{
if (!fence)
return true;
struct amdgpu_device *adev = ring->adev;
bool signaled = false;
if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
if (!fence_signal(&fence->base))
FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
return true;
}
BUG_ON(!ring);
if (seq > ring->fence_drv.sync_seq[ring->idx])
return -EINVAL;
return false;
}
if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
return 0;
/**
* amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
*
* @adev: amdgpu device pointer
* @seq: sequence numbers
*
* Check if the last signaled fence sequnce number is >= the requested
* sequence number (all asics).
* Returns true if any has signaled (current value is >= requested value)
* or false if it has not. Helper function for amdgpu_fence_wait_seq.
*/
static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
{
unsigned i;
wait_event(ring->fence_drv.fence_queue, (
(signaled = amdgpu_fence_seq_signaled(ring, seq))
|| adev->needs_reset));
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (!adev->rings[i] || !seq[i])
continue;
if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
return true;
}
return false;
}
/**
* amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
*
* @adev: amdgpu device pointer
* @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
* @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
*
* Wait for the requested sequence number(s) to be written by any ring
* (all asics). Sequnce number array is indexed by ring id.
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
* for amdgpu_fence_wait_*().
* Returns remaining time if the sequence number has passed, 0 when
* the wait timeout, or an error for all other cases.
* -EDEADLK is returned when a GPU lockup has been detected.
*/
static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
u64 *target_seq, bool intr,
long timeout)
{
uint64_t last_seq[AMDGPU_MAX_RINGS];
bool signaled;
int i;
long r;
if (timeout == 0) {
return amdgpu_fence_any_seq_signaled(adev, target_seq);
}
while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
/* Save current sequence values, used to check for GPU lockups */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !target_seq[i])
continue;
last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
if (intr) {
r = wait_event_interruptible_timeout(adev->fence_queue, (
(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
|| adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
} else {
r = wait_event_timeout(adev->fence_queue, (
(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
|| adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !target_seq[i])
continue;
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
}
if (unlikely(r < 0))
return r;
if (unlikely(!signaled)) {
if (adev->needs_reset)
return -EDEADLK;
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (r)
continue;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !target_seq[i])
continue;
if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
break;
}
if (i != AMDGPU_MAX_RINGS)
continue;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (!adev->rings[i] || !target_seq[i])
continue;
if (amdgpu_ring_is_lockup(adev->rings[i]))
break;
}
if (i < AMDGPU_MAX_RINGS) {
/* good news we believe it's a lockup */
dev_warn(adev->dev, "GPU lockup (waiting for "
"0x%016llx last fence id 0x%016llx on"
" ring %d)\n",
target_seq[i], last_seq[i], i);
/* remember that we need an reset */
adev->needs_reset = true;
wake_up_all(&adev->fence_queue);
return -EDEADLK;
}
if (timeout < MAX_SCHEDULE_TIMEOUT) {
timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
if (timeout <= 0) {
return 0;
}
}
}
}
return timeout;
}
/**
* amdgpu_fence_wait - wait for a fence to signal
*
* @fence: amdgpu fence object
* @intr: use interruptable sleep
*
* Wait for the requested fence to signal (all asics).
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the fence.
* Returns 0 if the fence has passed, error for all other cases.
*/
int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
{
uint64_t seq[AMDGPU_MAX_RINGS] = {};
long r;
seq[fence->ring->idx] = fence->seq;
r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
return r;
}
r = fence_signal(&fence->base);
if (!r)
FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
return 0;
}
/**
* amdgpu_fence_wait_any - wait for a fence to signal on any ring
*
* @adev: amdgpu device pointer
* @fences: amdgpu fence object(s)
* @intr: use interruptable sleep
*
* Wait for any requested fence to signal (all asics). Fence
* array is indexed by ring id. @intr selects whether to use
* interruptable (true) or non-interruptable (false) sleep when
* waiting for the fences. Used by the suballocator.
* Returns 0 if any fence has passed, error for all other cases.
*/
int amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **fences,
bool intr)
{
uint64_t seq[AMDGPU_MAX_RINGS];
unsigned i, num_rings = 0;
long r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
seq[i] = 0;
if (!fences[i]) {
continue;
}
seq[i] = fences[i]->seq;
++num_rings;
}
/* nothing to wait for ? */
if (num_rings == 0)
return -ENOENT;
r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
return r;
}
return 0;
if (signaled)
return 0;
else
return -EDEADLK;
}
/**
@ -739,19 +412,12 @@ int amdgpu_fence_wait_any(struct amdgpu_device *adev,
*/
int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
{
uint64_t seq[AMDGPU_MAX_RINGS] = {};
long r;
uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
/* nothing to wait for, last_seq is
already the last emited fence */
if (seq >= ring->fence_drv.sync_seq[ring->idx])
return -ENOENT;
}
r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
return 0;
return amdgpu_fence_ring_wait_seq(ring, seq);
}
/**
@ -766,23 +432,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint64_t seq[AMDGPU_MAX_RINGS] = {};
long r;
uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx];
if (!seq[ring->idx])
if (!seq)
return 0;
r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
if (r == -EDEADLK)
return -EDEADLK;
dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
ring->idx, r);
}
return 0;
return amdgpu_fence_ring_wait_seq(ring, seq);
}
/**
@ -933,9 +588,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
}
amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
ring->fence_drv.initialized = true;
amdgpu_irq_get(adev, irq_src, irq_type);
ring->fence_drv.irq_src = irq_src;
ring->fence_drv.irq_type = irq_type;
ring->fence_drv.initialized = true;
dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
"cpu addr 0x%p\n", ring->idx,
ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
@ -966,6 +624,16 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
amdgpu_fence_check_lockup);
ring->fence_drv.ring = ring;
if (amdgpu_enable_scheduler) {
ring->scheduler = amd_sched_create((void *)ring->adev,
&amdgpu_sched_ops,
ring->idx, 5, 0,
amdgpu_sched_hw_submission);
if (!ring->scheduler)
DRM_ERROR("Failed to create scheduler on ring %d.\n",
ring->idx);
}
}
/**
@ -982,7 +650,6 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
init_waitqueue_head(&adev->fence_queue);
if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n");
@ -1011,12 +678,77 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
/* no need to trigger GPU reset as we are unloading */
amdgpu_fence_driver_force_completion(adev);
}
wake_up_all(&adev->fence_queue);
wake_up_all(&ring->fence_drv.fence_queue);
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
if (ring->scheduler)
amd_sched_destroy(ring->scheduler);
ring->fence_drv.initialized = false;
}
mutex_unlock(&adev->ring_lock);
}
/**
* amdgpu_fence_driver_suspend - suspend the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
* Suspend the fence driver for all possible rings (all asics).
*/
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
{
int i, r;
mutex_lock(&adev->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
continue;
/* wait for gpu to finish processing current batch */
r = amdgpu_fence_wait_empty(ring);
if (r) {
/* delay GPU reset to resume */
amdgpu_fence_driver_force_completion(adev);
}
/* disable the interrupt */
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
mutex_unlock(&adev->ring_lock);
}
/**
* amdgpu_fence_driver_resume - resume the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
* Resume the fence driver for all possible rings (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has using
* amdgpu_fence_driver_start_ring().
* Returns 0 for success.
*/
void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
{
int i;
mutex_lock(&adev->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
continue;
/* enable the interrupt */
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
mutex_unlock(&adev->ring_lock);
}
/**
* amdgpu_fence_driver_force_completion - force all fence waiter to complete
*
@ -1104,6 +836,22 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
{
int idx;
struct amdgpu_fence *fence;
idx = 0;
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
fence = fences[idx];
if (fence) {
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
}
}
return false;
}
struct amdgpu_wait_cb {
struct fence_cb base;
struct task_struct *task;
@ -1119,14 +867,35 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
signed long t)
{
struct amdgpu_fence *array[AMDGPU_MAX_RINGS];
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_device *adev = fence->ring->adev;
struct amdgpu_wait_cb cb;
cb.task = current;
memset(&array[0], 0, sizeof(array));
array[0] = fence;
if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb))
return t;
return amdgpu_fence_wait_any(adev, array, intr, t);
}
/* wait until any fence in array signaled */
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **array, bool intr, signed long t)
{
long idx = 0;
struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS];
struct amdgpu_fence *fence;
BUG_ON(!array);
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
fence = array[idx];
if (fence) {
cb[idx].task = current;
if (fence_add_callback(&fence->base,
&cb[idx].base, amdgpu_fence_wait_cb))
return t; /* return if fence is already signaled */
}
}
while (t > 0) {
if (intr)
@ -1135,10 +904,10 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
set_current_state(TASK_UNINTERRUPTIBLE);
/*
* amdgpu_test_signaled must be called after
* amdgpu_test_signaled_any must be called after
* set_current_state to prevent a race with wake_up_process
*/
if (amdgpu_test_signaled(fence))
if (amdgpu_test_signaled_any(array))
break;
if (adev->needs_reset) {
@ -1153,7 +922,13 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
}
__set_current_state(TASK_RUNNING);
fence_remove_callback(f, &cb.base);
idx = 0;
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
fence = array[idx];
if (fence)
fence_remove_callback(&fence->base, &cb[idx].base);
}
return t;
}

Some files were not shown because too many files have changed in this diff Show More