Merge remote-tracking branch 'net-next/master' into mac80211-next

Merge net-next, which pulled in net, so I can merge a few more
patches that would otherwise conflict.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Johannes Berg 2018-10-08 09:48:31 +02:00
commit 188de5dd80
916 changed files with 28873 additions and 19444 deletions

View File

@ -1,4 +1,4 @@
Device-Tree bindings for input/gpio_keys.c keyboard driver Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
Required properties: Required properties:
- compatible = "gpio-keys"; - compatible = "gpio-keys";

View File

@ -41,3 +41,19 @@ Example:
compatible = "mscc,ocelot-cpu-syscon", "syscon"; compatible = "mscc,ocelot-cpu-syscon", "syscon";
reg = <0x70000000 0x2c>; reg = <0x70000000 0x2c>;
}; };
o HSIO regs:
The SoC has a few registers (HSIO) handling miscellaneous functionalities:
configuration and status of PLL5, RCOMP, SyncE, SerDes configurations and
status, SerDes muxing and a thermal sensor.
Required properties:
- compatible: Should be "mscc,ocelot-hsio", "syscon", "simple-mfd"
- reg : Should contain registers location and length
Example:
syscon@10d0000 {
compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd";
reg = <0x10d0000 0x10000>;
};

View File

@ -12,7 +12,6 @@ Required properties:
- "sys" - "sys"
- "rew" - "rew"
- "qs" - "qs"
- "hsio"
- "qsys" - "qsys"
- "ana" - "ana"
- "portX" with X from 0 to the number of last port index available on that - "portX" with X from 0 to the number of last port index available on that
@ -45,7 +44,6 @@ Example:
reg = <0x1010000 0x10000>, reg = <0x1010000 0x10000>,
<0x1030000 0x10000>, <0x1030000 0x10000>,
<0x1080000 0x100>, <0x1080000 0x100>,
<0x10d0000 0x10000>,
<0x11e0000 0x100>, <0x11e0000 0x100>,
<0x11f0000 0x100>, <0x11f0000 0x100>,
<0x1200000 0x100>, <0x1200000 0x100>,
@ -59,10 +57,9 @@ Example:
<0x1280000 0x100>, <0x1280000 0x100>,
<0x1800000 0x80000>, <0x1800000 0x80000>,
<0x1880000 0x10000>; <0x1880000 0x10000>;
reg-names = "sys", "rew", "qs", "hsio", "port0", reg-names = "sys", "rew", "qs", "port0", "port1", "port2",
"port1", "port2", "port3", "port4", "port5", "port3", "port4", "port5", "port6", "port7",
"port6", "port7", "port8", "port9", "port10", "port8", "port9", "port10", "qsys", "ana";
"qsys", "ana";
interrupts = <21 22>; interrupts = <21 22>;
interrupt-names = "xtr", "inj"; interrupt-names = "xtr", "inj";

View File

@ -0,0 +1,43 @@
Microsemi Ocelot SerDes muxing driver
-------------------------------------
On Microsemi Ocelot, there is a handful of registers in HSIO address
space for setting up the SerDes to switch port muxing.
A SerDes X can be "muxed" to work with switch port Y or Z for example.
One specific SerDes can also be used as a PCIe interface.
Hence, a SerDes represents an interface, be it an Ethernet or a PCIe one.
There are two kinds of SerDes: SERDES1G supports 10/100Mbps in
half/full-duplex and 1000Mbps in full-duplex mode while SERDES6G supports
10/100Mbps in half/full-duplex and 1000/2500Mbps in full-duplex mode.
Also, SERDES6G number (aka "macro") 0 is the only interface supporting
QSGMII.
This is a child of the HSIO syscon ("mscc,ocelot-hsio", see
Documentation/devicetree/bindings/mips/mscc.txt) on the Microsemi Ocelot.
Required properties:
- compatible: should be "mscc,vsc7514-serdes"
- #phy-cells : from the generic phy bindings, must be 2.
The first number defines the input port to use for a given
SerDes macro. The second defines the macro to use. They are
defined in dt-bindings/phy/phy-ocelot-serdes.h
Example:
serdes: serdes {
compatible = "mscc,vsc7514-serdes";
#phy-cells = <2>;
};
ethernet {
port1 {
phy-handle = <&phy_foo>;
/* Link SERDES1G_5 to port1 */
phys = <&serdes 1 SERDES1G_5>;
};
};

View File

@ -15,7 +15,8 @@ than x86. Check the v86d documentation for a list of currently supported
arches. arches.
v86d source code can be downloaded from the following website: v86d source code can be downloaded from the following website:
http://dev.gentoo.org/~spock/projects/uvesafb
https://github.com/mjanusz/v86d
Please refer to the v86d documentation for detailed configuration and Please refer to the v86d documentation for detailed configuration and
installation instructions. installation instructions.
@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
-- --
Michal Januszewski <spock@gentoo.org> Michal Januszewski <spock@gentoo.org>
Last updated: 2009-03-30 Last updated: 2017-10-10
Documentation of the uvesafb options is loosely based on vesafb.txt. Documentation of the uvesafb options is loosely based on vesafb.txt.

View File

@ -0,0 +1,18 @@
enable_sriov [DEVICE, GENERIC]
Configuration mode: Permanent
ignore_ari [DEVICE, GENERIC]
Configuration mode: Permanent
msix_vec_per_pf_max [DEVICE, GENERIC]
Configuration mode: Permanent
msix_vec_per_pf_min [DEVICE, GENERIC]
Configuration mode: Permanent
gre_ver_check [DEVICE, DRIVER-SPECIFIC]
Generic Routing Encapsulation (GRE) version check will
be enabled in the device. If disabled, device skips
version checking for incoming packets.
Type: Boolean
Configuration mode: Permanent

View File

@ -0,0 +1,42 @@
Devlink configuration parameters
================================
Following is the list of configuration parameters via devlink interface.
Each parameter can be generic or driver specific and are device level
parameters.
Note that the driver-specific files should contain the generic params
they support to, with supported config modes.
Each parameter can be set in different configuration modes:
runtime - set while driver is running, no reset required.
driverinit - applied while driver initializes, requires restart
driver by devlink reload command.
permanent - written to device's non-volatile memory, hard reset
required.
Following is the list of parameters:
====================================
enable_sriov [DEVICE, GENERIC]
Enable Single Root I/O Virtualisation (SRIOV) in
the device.
Type: Boolean
ignore_ari [DEVICE, GENERIC]
Ignore Alternative Routing-ID Interpretation (ARI)
capability. If enabled, adapter will ignore ARI
capability even when platforms has the support
enabled and creates same number of partitions when
platform does not support ARI.
Type: Boolean
msix_vec_per_pf_max [DEVICE, GENERIC]
Provides the maximum number of MSIX interrupts that
a device can create. Value is same across all
physical functions (PFs) in the device.
Type: u32
msix_vec_per_pf_min [DEVICE, GENERIC]
Provides the minimum number of MSIX interrupts required
for the device initialization. Value is same across all
physical functions (PFs) in the device.
Type: u32

View File

@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
1 - Disabled by default, enabled when an ICMP black hole detected 1 - Disabled by default, enabled when an ICMP black hole detected
2 - Always enabled, use initial MSS of tcp_base_mss. 2 - Always enabled, use initial MSS of tcp_base_mss.
tcp_probe_interval - INTEGER tcp_probe_interval - UNSIGNED INTEGER
Controls how often to start TCP Packetization-Layer Path MTU Controls how often to start TCP Packetization-Layer Path MTU
Discovery reprobe. The default is reprobing every 10 minutes as Discovery reprobe. The default is reprobing every 10 minutes as
per RFC4821. per RFC4821.

View File

@ -1069,6 +1069,31 @@ The kernel interface functions are as follows:
This function may transmit a PING ACK. This function may transmit a PING ACK.
(*) Get reply timestamp.
bool rxrpc_kernel_get_reply_time(struct socket *sock,
struct rxrpc_call *call,
ktime_t *_ts)
This allows the timestamp on the first DATA packet of the reply of a
client call to be queried, provided that it is still in the Rx ring. If
successful, the timestamp will be stored into *_ts and true will be
returned; false will be returned otherwise.
(*) Get remote client epoch.
u32 rxrpc_kernel_get_epoch(struct socket *sock,
struct rxrpc_call *call)
This allows the epoch that's contained in packets of an incoming client
call to be queried. This value is returned. The function always
successful if the call is still in progress. It shouldn't be called once
the call has expired. Note that calling this on a local client call only
returns the local epoch.
This value can be used to determine if the remote client has been
restarted as it shouldn't change otherwise.
======================= =======================
CONFIGURABLE PARAMETERS CONFIGURABLE PARAMETERS

View File

@ -324,7 +324,6 @@ F: Documentation/ABI/testing/sysfs-bus-acpi
F: Documentation/ABI/testing/configfs-acpi F: Documentation/ABI/testing/configfs-acpi
F: drivers/pci/*acpi* F: drivers/pci/*acpi*
F: drivers/pci/*/*acpi* F: drivers/pci/*/*acpi*
F: drivers/pci/*/*/*acpi*
F: tools/power/acpi/ F: tools/power/acpi/
ACPI APEI ACPI APEI
@ -1251,7 +1250,7 @@ N: meson
ARM/Annapurna Labs ALPINE ARCHITECTURE ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <tsahee@annapurnalabs.com> M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
M: Antoine Tenart <antoine.tenart@free-electrons.com> M: Antoine Tenart <antoine.tenart@bootlin.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: arch/arm/mach-alpine/ F: arch/arm/mach-alpine/
@ -2956,7 +2955,6 @@ F: include/linux/bcm963xx_tag.h
BROADCOM BNX2 GIGABIT ETHERNET DRIVER BROADCOM BNX2 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rasesh.mody@cavium.com> M: Rasesh Mody <rasesh.mody@cavium.com>
M: Harish Patil <harish.patil@cavium.com>
M: Dept-GELinuxNICDev@cavium.com M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
@ -2977,6 +2975,7 @@ F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
M: Ariel Elior <ariel.elior@cavium.com> M: Ariel Elior <ariel.elior@cavium.com>
M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
M: everest-linux-l2@cavium.com M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
@ -5470,7 +5469,8 @@ S: Odd Fixes
F: drivers/net/ethernet/agere/ F: drivers/net/ethernet/agere/
ETHERNET BRIDGE ETHERNET BRIDGE
M: Stephen Hemminger <stephen@networkplumber.org> M: Roopa Prabhu <roopa@cumulusnetworks.com>
M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
L: bridge@lists.linux-foundation.org (moderated for non-subscribers) L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net:Bridge W: http://www.linuxfoundation.org/en/Net:Bridge
@ -8190,7 +8190,7 @@ S: Maintained
F: net/dsa/tag_gswip.c F: net/dsa/tag_gswip.c
F: drivers/net/ethernet/lantiq_xrx200.c F: drivers/net/ethernet/lantiq_xrx200.c
F: drivers/net/dsa/lantiq_pce.h F: drivers/net/dsa/lantiq_pce.h
F: drivers/net/dsa/intel_gswip.c F: drivers/net/dsa/lantiq_gswip.c
LANTIQ MIPS ARCHITECTURE LANTIQ MIPS ARCHITECTURE
M: John Crispin <john@phrozen.org> M: John Crispin <john@phrozen.org>
@ -8607,7 +8607,6 @@ F: include/linux/spinlock*.h
F: arch/*/include/asm/spinlock*.h F: arch/*/include/asm/spinlock*.h
F: include/linux/rwlock*.h F: include/linux/rwlock*.h
F: include/linux/mutex*.h F: include/linux/mutex*.h
F: arch/*/include/asm/mutex*.h
F: include/linux/rwsem*.h F: include/linux/rwsem*.h
F: arch/*/include/asm/rwsem.h F: arch/*/include/asm/rwsem.h
F: include/linux/seqlock.h F: include/linux/seqlock.h
@ -8753,7 +8752,7 @@ M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/dsa/mv88e6xxx/ F: drivers/net/dsa/mv88e6xxx/
F: linux/platform_data/mv88e6xxx.h F: include/linux/platform_data/mv88e6xxx.h
F: Documentation/devicetree/bindings/net/dsa/marvell.txt F: Documentation/devicetree/bindings/net/dsa/marvell.txt
MARVELL ARMADA DRM SUPPORT MARVELL ARMADA DRM SUPPORT
@ -9725,13 +9724,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained S: Maintained
F: drivers/media/dvb-frontends/mn88473* F: drivers/media/dvb-frontends/mn88473*
PCI DRIVER FOR MOBIVEIL PCIE IP
M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
F: drivers/pci/controller/pcie-mobiveil.c
MODULE SUPPORT MODULE SUPPORT
M: Jessica Yu <jeyu@kernel.org> M: Jessica Yu <jeyu@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@ -10958,7 +10950,7 @@ M: Willy Tarreau <willy@haproxy.com>
M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com> M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
S: Odd Fixes S: Odd Fixes
F: Documentation/auxdisplay/lcd-panel-cgram.txt F: Documentation/auxdisplay/lcd-panel-cgram.txt
F: drivers/misc/panel.c F: drivers/auxdisplay/panel.c
PARALLEL PORT SUBSYSTEM PARALLEL PORT SUBSYSTEM
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@ -11146,6 +11138,13 @@ F: include/uapi/linux/switchtec_ioctl.h
F: include/linux/switchtec.h F: include/linux/switchtec.h
F: drivers/ntb/hw/mscc/ F: drivers/ntb/hw/mscc/
PCI DRIVER FOR MOBIVEIL PCIE IP
M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
F: drivers/pci/controller/pcie-mobiveil.c
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
M: Jason Cooper <jason@lakedaemon.net> M: Jason Cooper <jason@lakedaemon.net>
@ -11212,8 +11211,14 @@ F: tools/pci/
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
M: Russell Currey <ruscur@russell.cc> M: Russell Currey <ruscur@russell.cc>
M: Sam Bobroff <sbobroff@linux.ibm.com>
M: Oliver O'Halloran <oohall@gmail.com>
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
S: Supported S: Supported
F: Documentation/PCI/pci-error-recovery.txt
F: drivers/pci/pcie/aer.c
F: drivers/pci/pcie/dpc.c
F: drivers/pci/pcie/err.c
F: Documentation/powerpc/eeh-pci-error-recovery.txt F: Documentation/powerpc/eeh-pci-error-recovery.txt
F: arch/powerpc/kernel/eeh*.c F: arch/powerpc/kernel/eeh*.c
F: arch/powerpc/platforms/*/eeh*.c F: arch/powerpc/platforms/*/eeh*.c
@ -11982,7 +11987,7 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/ F: drivers/scsi/qla4xxx/
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
M: Harish Patil <harish.patil@cavium.com> M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
M: Manish Chopra <manish.chopra@cavium.com> M: Manish Chopra <manish.chopra@cavium.com>
M: Dept-GELinuxNICDev@cavium.com M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
@ -11990,7 +11995,6 @@ S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/ F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER QLOGIC QLGE 10Gb ETHERNET DRIVER
M: Harish Patil <harish.patil@cavium.com>
M: Manish Chopra <manish.chopra@cavium.com> M: Manish Chopra <manish.chopra@cavium.com>
M: Dept-GELinuxNICDev@cavium.com M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
@ -15398,7 +15402,7 @@ S: Maintained
UVESAFB DRIVER UVESAFB DRIVER
M: Michal Januszewski <spock@gentoo.org> M: Michal Januszewski <spock@gentoo.org>
L: linux-fbdev@vger.kernel.org L: linux-fbdev@vger.kernel.org
W: http://dev.gentoo.org/~spock/projects/uvesafb/ W: https://github.com/mjanusz/v86d
S: Maintained S: Maintained
F: Documentation/fb/uvesafb.txt F: Documentation/fb/uvesafb.txt
F: drivers/video/fbdev/uvesafb.* F: drivers/video/fbdev/uvesafb.*

View File

@ -2,7 +2,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc6
NAME = Merciless Moray NAME = Merciless Moray
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -11,6 +11,7 @@
#include "sama5d2-pinfunc.h" #include "sama5d2-pinfunc.h"
#include <dt-bindings/mfd/atmel-flexcom.h> #include <dt-bindings/mfd/atmel-flexcom.h>
#include <dt-bindings/gpio/gpio.h> #include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/at91.h>
/ { / {
model = "Atmel SAMA5D2 PTC EK"; model = "Atmel SAMA5D2 PTC EK";
@ -299,6 +300,7 @@
<PIN_PA30__NWE_NANDWE>, <PIN_PA30__NWE_NANDWE>,
<PIN_PB2__NRD_NANDOE>; <PIN_PB2__NRD_NANDOE>;
bias-pull-up; bias-pull-up;
atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
}; };
ale_cle_rdy_cs { ale_cle_rdy_cs {

View File

@ -106,21 +106,23 @@
global_timer: timer@1e200 { global_timer: timer@1e200 {
compatible = "arm,cortex-a9-global-timer"; compatible = "arm,cortex-a9-global-timer";
reg = <0x1e200 0x20>; reg = <0x1e200 0x20>;
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&axi_clk>; clocks = <&axi_clk>;
}; };
local_timer: local-timer@1e600 { local_timer: local-timer@1e600 {
compatible = "arm,cortex-a9-twd-timer"; compatible = "arm,cortex-a9-twd-timer";
reg = <0x1e600 0x20>; reg = <0x1e600 0x20>;
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
IRQ_TYPE_EDGE_RISING)>;
clocks = <&axi_clk>; clocks = <&axi_clk>;
}; };
twd_watchdog: watchdog@1e620 { twd_watchdog: watchdog@1e620 {
compatible = "arm,cortex-a9-twd-wdt"; compatible = "arm,cortex-a9-twd-wdt";
reg = <0x1e620 0x20>; reg = <0x1e620 0x20>;
interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
IRQ_TYPE_LEVEL_HIGH)>;
}; };
armpll: armpll { armpll: armpll {
@ -158,7 +160,7 @@
serial0: serial@600 { serial0: serial@600 {
compatible = "brcm,bcm6345-uart"; compatible = "brcm,bcm6345-uart";
reg = <0x600 0x1b>; reg = <0x600 0x1b>;
interrupts = <GIC_SPI 32 0>; interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&periph_clk>; clocks = <&periph_clk>;
clock-names = "periph"; clock-names = "periph";
status = "disabled"; status = "disabled";
@ -167,7 +169,7 @@
serial1: serial@620 { serial1: serial@620 {
compatible = "brcm,bcm6345-uart"; compatible = "brcm,bcm6345-uart";
reg = <0x620 0x1b>; reg = <0x620 0x1b>;
interrupts = <GIC_SPI 33 0>; interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&periph_clk>; clocks = <&periph_clk>;
clock-names = "periph"; clock-names = "periph";
status = "disabled"; status = "disabled";
@ -180,7 +182,7 @@
reg = <0x2000 0x600>, <0xf0 0x10>; reg = <0x2000 0x600>, <0xf0 0x10>;
reg-names = "nand", "nand-int-base"; reg-names = "nand", "nand-int-base";
status = "disabled"; status = "disabled";
interrupts = <GIC_SPI 38 0>; interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "nand"; interrupt-names = "nand";
}; };

View File

@ -1078,8 +1078,8 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc SPI6_K>; clocks = <&rcc SPI6_K>;
resets = <&rcc SPI6_R>; resets = <&rcc SPI6_R>;
dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>, dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
<&mdma1 35 0x0 0x40002 0x0 0x0 0>; <&mdma1 35 0x0 0x40002 0x0 0x0>;
dma-names = "rx", "tx"; dma-names = "rx", "tx";
status = "disabled"; status = "disabled";
}; };

View File

@ -800,8 +800,7 @@
}; };
hdmi_phy: hdmi-phy@1ef0000 { hdmi_phy: hdmi-phy@1ef0000 {
compatible = "allwinner,sun8i-r40-hdmi-phy", compatible = "allwinner,sun8i-r40-hdmi-phy";
"allwinner,sun50i-a64-hdmi-phy";
reg = <0x01ef0000 0x10000>; reg = <0x01ef0000 0x10000>;
clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>, clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
<&ccu 7>, <&ccu 16>; <&ccu 7>, <&ccu 16>;

View File

@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
{ {
BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
return ioremap_page_range(PCI_IO_VIRT_BASE + offset, return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
PCI_IO_VIRT_BASE + offset + SZ_64K, PCI_IO_VIRT_BASE + offset + SZ_64K,

View File

@ -413,3 +413,4 @@
396 common pkey_free sys_pkey_free 396 common pkey_free sys_pkey_free
397 common statx sys_statx 397 common statx sys_statx
398 common rseq sys_rseq 398 common rseq sys_rseq
399 common io_pgetevents sys_io_pgetevents

View File

@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
} }
static int validate_core_offset(const struct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id);
int size;
switch (off) {
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]):
case KVM_REG_ARM_CORE_REG(regs.sp):
case KVM_REG_ARM_CORE_REG(regs.pc):
case KVM_REG_ARM_CORE_REG(regs.pstate):
case KVM_REG_ARM_CORE_REG(sp_el1):
case KVM_REG_ARM_CORE_REG(elr_el1):
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
size = sizeof(__u64);
break;
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
size = sizeof(__uint128_t);
break;
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
size = sizeof(__u32);
break;
default:
return -EINVAL;
}
if (KVM_REG_SIZE(reg->id) == size &&
IS_ALIGNED(off, size / sizeof(__u32)))
return 0;
return -EINVAL;
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {
/* /*
@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT; return -ENOENT;
if (validate_core_offset(reg))
return -EINVAL;
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
return -EFAULT; return -EFAULT;
@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT; return -ENOENT;
if (validate_core_offset(reg))
return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL; return -EINVAL;
@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
} }
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK; u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) { switch (mode) {
case PSR_AA32_MODE_USR: case PSR_AA32_MODE_USR:
if (!system_supports_32bit_el0())
return -EINVAL;
break;
case PSR_AA32_MODE_FIQ: case PSR_AA32_MODE_FIQ:
case PSR_AA32_MODE_IRQ: case PSR_AA32_MODE_IRQ:
case PSR_AA32_MODE_SVC: case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT: case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND: case PSR_AA32_MODE_UND:
if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
case PSR_MODE_EL0t: case PSR_MODE_EL0t:
case PSR_MODE_EL1t: case PSR_MODE_EL1t:
case PSR_MODE_EL1h: case PSR_MODE_EL1h:
if (vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break; break;
default: default:
err = -EINVAL; err = -EINVAL;

View File

@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
/* /*
* If HW_AFDBM is enabled, then the HW could turn on * If HW_AFDBM is enabled, then the HW could turn on
* the dirty bit for any page in the set, so check * the dirty or accessed bit for any page in the set,
* them all. All hugetlb entries are already young. * so check them all.
*/ */
if (pte_dirty(pte)) if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte); orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
} }
if (valid) { if (valid) {
@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return get_clear_flush(mm, addr, ptep, pgsize, ncontig); return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
} }
/*
* huge_ptep_set_access_flags will update access flags (dirty, accesssed)
* and write permission.
*
* For a contiguous huge pte range we need to check whether or not write
* permission has to change only on the first pte in the set. Then for
* all the contiguous ptes we need to check whether or not there is a
* discrepancy between dirty or young.
*/
static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
{
int i;
if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
return 1;
for (i = 0; i < ncontig; i++) {
pte_t orig_pte = huge_ptep_get(ptep + i);
if (pte_dirty(pte) != pte_dirty(orig_pte))
return 1;
if (pte_young(pte) != pte_young(orig_pte))
return 1;
}
return 0;
}
int huge_ptep_set_access_flags(struct vm_area_struct *vma, int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned long addr, pte_t *ptep,
pte_t pte, int dirty) pte_t pte, int dirty)
{ {
int ncontig, i, changed = 0; int ncontig, i;
size_t pgsize = 0; size_t pgsize = 0;
unsigned long pfn = pte_pfn(pte), dpfn; unsigned long pfn = pte_pfn(pte), dpfn;
pgprot_t hugeprot; pgprot_t hugeprot;
@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize); ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT; dpfn = pgsize >> PAGE_SHIFT;
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); if (!__cont_access_flags_changed(ptep, pte, ncontig))
if (!pte_same(orig_pte, pte)) return 0;
changed = 1;
/* Make sure we don't lose the dirty state */ orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
/* Make sure we don't lose the dirty or young state */
if (pte_dirty(orig_pte)) if (pte_dirty(orig_pte))
pte = pte_mkdirty(pte); pte = pte_mkdirty(pte);
if (pte_young(orig_pte))
pte = pte_mkyoung(pte);
hugeprot = pte_pgprot(pte); hugeprot = pte_pgprot(pte);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
return changed; return 1;
} }
void huge_ptep_set_wrprotect(struct mm_struct *mm, void huge_ptep_set_wrprotect(struct mm_struct *mm,

View File

@ -107,7 +107,6 @@
reg = <0x1010000 0x10000>, reg = <0x1010000 0x10000>,
<0x1030000 0x10000>, <0x1030000 0x10000>,
<0x1080000 0x100>, <0x1080000 0x100>,
<0x10d0000 0x10000>,
<0x11e0000 0x100>, <0x11e0000 0x100>,
<0x11f0000 0x100>, <0x11f0000 0x100>,
<0x1200000 0x100>, <0x1200000 0x100>,
@ -121,10 +120,10 @@
<0x1280000 0x100>, <0x1280000 0x100>,
<0x1800000 0x80000>, <0x1800000 0x80000>,
<0x1880000 0x10000>; <0x1880000 0x10000>;
reg-names = "sys", "rew", "qs", "hsio", "port0", reg-names = "sys", "rew", "qs", "port0", "port1",
"port1", "port2", "port3", "port4", "port5", "port2", "port3", "port4", "port5", "port6",
"port6", "port7", "port8", "port9", "port10", "port7", "port8", "port9", "port10", "qsys",
"qsys", "ana"; "ana";
interrupts = <21 22>; interrupts = <21 22>;
interrupt-names = "xtr", "inj"; interrupt-names = "xtr", "inj";
@ -231,5 +230,15 @@
pinctrl-0 = <&miim1>; pinctrl-0 = <&miim1>;
status = "disabled"; status = "disabled";
}; };
hsio: syscon@10d0000 {
compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd";
reg = <0x10d0000 0x10000>;
serdes: serdes {
compatible = "mscc,vsc7514-serdes";
#phy-cells = <2>;
};
};
}; };
}; };

View File

@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data; extern unsigned int rtas_data;
extern unsigned long long memory_limit; extern unsigned long long memory_limit;
extern bool init_mem_is_free;
extern unsigned long klimit; extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);

View File

@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
#ifdef CONFIG_PPC_DENORMALISATION #ifdef CONFIG_PPC_DENORMALISATION
mfspr r10,SPRN_HSRR1 mfspr r10,SPRN_HSRR1
mfspr r11,SPRN_HSRR0 /* save HSRR0 */
andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
addi r11,r11,-4 /* HSRR0 is next instruction */
bne+ denorm_assist bne+ denorm_assist
#endif #endif
@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
*/ */
XVCPSGNDP32(32) XVCPSGNDP32(32)
denorm_done: denorm_done:
mfspr r11,SPRN_HSRR0
subi r11,r11,4
mtspr SPRN_HSRR0,r11 mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9 mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13) ld r9,PACA_EXGEN+EX_R9(r13)

View File

@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
std r1, PACATMSCRATCH(r13) std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13) ld r1, PACAR1(r13)
/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */ std r11, GPR11(r1) /* Temporary stash */
/*
* Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
* clobbered by an exception once we turn on MSR_RI below.
*/
ld r11, PACATMSCRATCH(r13)
std r11, GPR1(r1)
/*
* Store r13 away so we can free up the scratch SPR for the SLB fault
* handler (needed once we start accessing the thread_struct).
*/
GET_SCRATCH0(r11)
std r11, GPR13(r1)
/* Reset MSR RI so we can take SLB faults again */ /* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI li r11, MSR_RI
mtmsrd r11, 1 mtmsrd r11, 1
/* Store the PPR in r11 and reset to decent value */
mfspr r11, SPRN_PPR mfspr r11, SPRN_PPR
HMT_MEDIUM HMT_MEDIUM
@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
SAVE_GPR(8, r7) /* user r8 */ SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */ SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */ SAVE_GPR(10, r7) /* user r10 */
ld r3, PACATMSCRATCH(r13) /* user r1 */ ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */ ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */ ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */ ld r6, GPR12(r1) /* user r12 */
GET_SCRATCH0(8) /* user r13 */ ld r8, GPR13(r1) /* user r13 */
std r3, GPR1(r7) std r3, GPR1(r7)
std r4, GPR7(r7) std r4, GPR7(r7)
std r5, GPR11(r7) std r5, GPR11(r7)

View File

@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/ */
local_irq_disable(); local_irq_disable();
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
/*
* If the PTE disappeared temporarily due to a THP
* collapse, just return and let the guest try again.
*/
if (!ptep) {
local_irq_enable();
if (page)
put_page(page);
return RESUME_GUEST;
}
pte = *ptep; pte = *ptep;
local_irq_enable(); local_irq_enable();

View File

@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
addc r0, r8, r9 addc r0, r8, r9
ld r10, 0(r4) ld r10, 0(r4)
ld r11, 8(r4) ld r11, 8(r4)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
rotldi r5, r5, 8
#endif
adde r0, r0, r10 adde r0, r0, r10
add r5, r5, r7 add r5, r5, r7
adde r0, r0, r11 adde r0, r0, r11

View File

@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
{ {
int err; int err;
/* Make sure we aren't patching a freed init section */
if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
return 0;
}
__put_user_size(instr, patch_addr, 4, err); __put_user_size(instr, patch_addr, 4, err);
if (err) if (err)
return err; return err;

View File

@ -63,6 +63,7 @@
#endif #endif
unsigned long long memory_limit; unsigned long long memory_limit;
bool init_mem_is_free;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
pte_t *kmap_pte; pte_t *kmap_pte;
@ -396,6 +397,7 @@ void free_initmem(void)
{ {
ppc_md.progress = ppc_printk_progress; ppc_md.progress = ppc_printk_progress;
mark_initmem_nx(); mark_initmem_nx();
init_mem_is_free = true;
free_initmem_default(POISON_FREE_INITMEM); free_initmem_default(POISON_FREE_INITMEM);
} }

View File

@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
int new_nid; int new_nid;
/* Use associativity from first thread for all siblings */ /* Use associativity from first thread for all siblings */
vphn_get_associativity(cpu, associativity); if (vphn_get_associativity(cpu, associativity))
return cpu_to_node(cpu);
new_nid = associativity_to_nid(associativity); new_nid = associativity_to_nid(associativity);
if (new_nid < 0 || !node_possible(new_nid)) if (new_nid < 0 || !node_possible(new_nid))
new_nid = first_online_node; new_nid = first_online_node;
@ -1452,7 +1454,8 @@ static struct timer_list topology_timer;
static void reset_topology_timer(void) static void reset_topology_timer(void)
{ {
mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); if (vphn_enabled)
mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP

View File

@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
* Since any pkey can be used for data or execute, we will just treat * Since any pkey can be used for data or execute, we will just treat
* all keys as equal and track them as one entity. * all keys as equal and track them as one entity.
*/ */
pkeys_total = be32_to_cpu(vals[0]); pkeys_total = vals[0];
pkeys_devtree_defined = true; pkeys_devtree_defined = true;
} }

View File

@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
level_shift = entries_shift + 3; level_shift = entries_shift + 3;
level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT); level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
if ((level_shift - 3) * levels + page_shift >= 60) if ((level_shift - 3) * levels + page_shift >= 55)
return -EINVAL; return -EINVAL;
/* Allocate TCE table */ /* Allocate TCE table */

View File

@ -0,0 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_PROTOTYPES_H
#include <linux/ftrace.h>
#include <asm-generic/asm-prototypes.h>
#endif /* _ASM_RISCV_PROTOTYPES_H */

View File

@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
BUG_ON(mem_size == 0); BUG_ON(mem_size == 0);
set_max_mapnr(PFN_DOWN(mem_size)); set_max_mapnr(PFN_DOWN(mem_size));
max_low_pfn = pfn_base + PFN_DOWN(mem_size); max_low_pfn = memblock_end_of_DRAM();
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
setup_initrd(); setup_initrd();

View File

@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
push %ebx push %ebx
push %ecx push %ecx
push %edx push %edx
push %edi
/*
* RIP-relative addressing is needed to access the encryption bit
* variable. Since we are running in 32-bit mode we need this call/pop
* sequence to get the proper relative addressing.
*/
call 1f
1: popl %edi
subl $1b, %edi
movl enc_bit(%edi), %eax
cmpl $0, %eax
jge .Lsev_exit
/* Check if running under a hypervisor */ /* Check if running under a hypervisor */
movl $1, %eax movl $1, %eax
@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
movl %ebx, %eax movl %ebx, %eax
andl $0x3f, %eax /* Return the encryption bit location */ andl $0x3f, %eax /* Return the encryption bit location */
movl %eax, enc_bit(%edi)
jmp .Lsev_exit jmp .Lsev_exit
.Lno_sev: .Lno_sev:
xor %eax, %eax xor %eax, %eax
movl %eax, enc_bit(%edi)
.Lsev_exit: .Lsev_exit:
pop %edi
pop %edx pop %edx
pop %ecx pop %ecx
pop %ebx pop %ebx
@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
ENDPROC(set_sev_encryption_mask) ENDPROC(set_sev_encryption_mask)
.data .data
enc_bit:
.int 0xffffffff
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8 .balign 8

View File

@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-fno-omit-frame-pointer -foptimize-sibling-calls \ -fno-omit-frame-pointer -foptimize-sibling-calls \
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS) -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
ifdef CONFIG_RETPOLINE
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
CFL += $(RETPOLINE_VDSO_CFLAGS)
endif
endif
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
ifdef CONFIG_RETPOLINE
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
endif
endif
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
$(obj)/vdso32.so.dbg: FORCE \ $(obj)/vdso32.so.dbg: FORCE \

View File

@ -43,8 +43,9 @@ extern u8 hvclock_page
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{ {
long ret; long ret;
asm("syscall" : "=a" (ret) : asm ("syscall" : "=a" (ret), "=m" (*ts) :
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
"memory", "rcx", "r11");
return ret; return ret;
} }
@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{ {
long ret; long ret;
asm("syscall" : "=a" (ret) : asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
"memory", "rcx", "r11");
return ret; return ret;
} }
@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{ {
long ret; long ret;
asm( asm (
"mov %%ebx, %%edx \n" "mov %%ebx, %%edx \n"
"mov %2, %%ebx \n" "mov %[clock], %%ebx \n"
"call __kernel_vsyscall \n" "call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n" "mov %%edx, %%ebx \n"
: "=a" (ret) : "=a" (ret), "=m" (*ts)
: "0" (__NR_clock_gettime), "g" (clock), "c" (ts) : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
: "memory", "edx"); : "memory", "edx");
return ret; return ret;
} }
@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{ {
long ret; long ret;
asm( asm (
"mov %%ebx, %%edx \n" "mov %%ebx, %%edx \n"
"mov %2, %%ebx \n" "mov %[tv], %%ebx \n"
"call __kernel_vsyscall \n" "call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n" "mov %%edx, %%ebx \n"
: "=a" (ret) : "=a" (ret), "=m" (*tv), "=m" (*tz)
: "0" (__NR_gettimeofday), "g" (tv), "c" (tz) : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
: "memory", "edx"); : "memory", "edx");
return ret; return ret;
} }

View File

@ -36,6 +36,7 @@
static int num_counters_llc; static int num_counters_llc;
static int num_counters_nb; static int num_counters_nb;
static bool l3_mask;
static HLIST_HEAD(uncore_unused_list); static HLIST_HEAD(uncore_unused_list);
@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1; hwc->idx = -1;
/*
* SliceMask and ThreadMask need to be set for certain L3 events in
* Family 17h. For other events, the two fields do not affect the count.
*/
if (l3_mask)
hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
if (event->cpu < 0) if (event->cpu < 0)
return -EINVAL; return -EINVAL;
@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l3"; amd_llc_pmu.name = "amd_l3";
format_attr_event_df.show = &event_show_df; format_attr_event_df.show = &event_show_df;
format_attr_event_l3.show = &event_show_l3; format_attr_event_l3.show = &event_show_l3;
l3_mask = true;
} else { } else {
num_counters_nb = NUM_COUNTERS_NB; num_counters_nb = NUM_COUNTERS_NB;
num_counters_llc = NUM_COUNTERS_L2; num_counters_llc = NUM_COUNTERS_L2;
@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l2"; amd_llc_pmu.name = "amd_l2";
format_attr_event_df = format_attr_event; format_attr_event_df = format_attr_event;
format_attr_event_l3 = format_attr_event; format_attr_event_l3 = format_attr_event;
l3_mask = false;
} }
amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;

View File

@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
void bdx_uncore_cpu_init(void) void bdx_uncore_cpu_init(void)
{ {
int pkg = topology_phys_to_logical_pkg(0); int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
}, },
{ /* M3UPI0 Link 0 */ { /* M3UPI0 Link 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0), .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
}, },
{ /* M3UPI0 Link 1 */ { /* M3UPI0 Link 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1), .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
}, },
{ /* M3UPI1 Link 2 */ { /* M3UPI1 Link 2 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2), .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
}, },
{ /* end: all zeroes */ } { /* end: all zeroes */ }
}; };

View File

@ -46,6 +46,14 @@
#define INTEL_ARCH_EVENT_MASK \ #define INTEL_ARCH_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
#define AMD64_L3_SLICE_SHIFT 48
#define AMD64_L3_SLICE_MASK \
((0xFULL) << AMD64_L3_SLICE_SHIFT)
#define AMD64_L3_THREAD_SHIFT 56
#define AMD64_L3_THREAD_MASK \
((0xFFULL) << AMD64_L3_THREAD_SHIFT)
#define X86_RAW_EVENT_MASK \ #define X86_RAW_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_EVENT | \ (ARCH_PERFMON_EVENTSEL_EVENT | \
ARCH_PERFMON_EVENTSEL_UMASK | \ ARCH_PERFMON_EVENTSEL_UMASK | \

View File

@ -10,8 +10,13 @@ struct cpumask;
struct mm_struct; struct mm_struct;
#ifdef CONFIG_X86_UV #ifdef CONFIG_X86_UV
#include <linux/efi.h>
extern enum uv_system_type get_uv_system_type(void); extern enum uv_system_type get_uv_system_type(void);
static inline bool is_early_uv_system(void)
{
return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
}
extern int is_uv_system(void); extern int is_uv_system(void);
extern int is_uv_hubless(void); extern int is_uv_hubless(void);
extern void uv_cpu_init(void); extern void uv_cpu_init(void);
@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
#else /* X86_UV */ #else /* X86_UV */
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
static inline bool is_early_uv_system(void) { return 0; }
static inline int is_uv_system(void) { return 0; } static inline int is_uv_system(void) { return 0; }
static inline int is_uv_hubless(void) { return 0; } static inline int is_uv_hubless(void) { return 0; }
static inline void uv_cpu_init(void) { } static inline void uv_cpu_init(void) { }

View File

@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{ {
/* AMD errata T13 (order #21922) */ /* AMD errata T13 (order #21922) */
if ((c->x86 == 6)) { if (c->x86 == 6) {
/* Duron Rev A0 */ /* Duron Rev A0 */
if (c->x86_model == 3 && c->x86_stepping == 0) if (c->x86_model == 3 && c->x86_stepping == 0)
size = 64; size = 64;

View File

@ -26,6 +26,7 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
#include <asm/i8259.h> #include <asm/i8259.h>
#include <asm/uv/uv.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz); EXPORT_SYMBOL(cpu_khz);
@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
{ {
if (!boot_cpu_has(X86_FEATURE_TSC)) if (!boot_cpu_has(X86_FEATURE_TSC))
return; return;
/* Don't change UV TSC multi-chassis synchronization */
if (is_early_uv_system())
return;
if (!determine_cpu_tsc_frequencies(true)) if (!determine_cpu_tsc_frequencies(true))
return; return;
loops_per_jiffy = get_loops_per_jiffy(); loops_per_jiffy = get_loops_per_jiffy();

View File

@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
*/ */
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
/*
* In some cases, we need to preserve the GFN of a non-present or reserved
* SPTE when we usurp the upper five bits of the physical address space to
* defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
* shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
* left into the reserved bits, i.e. the GFN in the SPTE will be split into
* high and low parts. This mask covers the lower bits of the GFN.
*/
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
static void mmu_spte_set(u64 *sptep, u64 spte); static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
static gfn_t get_mmio_spte_gfn(u64 spte) static gfn_t get_mmio_spte_gfn(u64 spte)
{ {
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask | u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
shadow_nonpresent_or_rsvd_mask;
u64 gpa = spte & ~mask;
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
& shadow_nonpresent_or_rsvd_mask; & shadow_nonpresent_or_rsvd_mask;
@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
static void kvm_mmu_reset_all_pte_masks(void) static void kvm_mmu_reset_all_pte_masks(void)
{ {
u8 low_phys_bits;
shadow_user_mask = 0; shadow_user_mask = 0;
shadow_accessed_mask = 0; shadow_accessed_mask = 0;
shadow_dirty_mask = 0; shadow_dirty_mask = 0;
@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
* appropriate mask to guard against L1TF attacks. Otherwise, it is * appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF. * assumed that the CPU is not vulnerable to L1TF.
*/ */
low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_data.x86_phys_bits < if (boot_cpu_data.x86_phys_bits <
52 - shadow_nonpresent_or_rsvd_mask_len) 52 - shadow_nonpresent_or_rsvd_mask_len) {
shadow_nonpresent_or_rsvd_mask = shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_phys_bits - rsvd_bits(boot_cpu_data.x86_phys_bits -
shadow_nonpresent_or_rsvd_mask_len, shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_phys_bits - 1); boot_cpu_data.x86_phys_bits - 1);
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
}
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
} }
static int is_cpuid_PSE36(void) static int is_cpuid_PSE36(void)

View File

@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
#define MSR_BITMAP_MODE_X2APIC 1 #define MSR_BITMAP_MODE_X2APIC 1
#define MSR_BITMAP_MODE_X2APIC_APICV 2 #define MSR_BITMAP_MODE_X2APIC_APICV 2
#define MSR_BITMAP_MODE_LM 4
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
@ -857,6 +856,7 @@ struct nested_vmx {
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
u64 vmcs01_debugctl; u64 vmcs01_debugctl;
u64 vmcs01_guest_bndcfgs;
u16 vpid02; u16 vpid02;
u16 last_vpid; u16 last_vpid;
@ -2899,8 +2899,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
} }
if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#else #else
savesegment(fs, fs_sel); savesegment(fs, fs_sel);
savesegment(gs, gs_sel); savesegment(gs, gs_sel);
@ -2951,8 +2950,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
vmx->loaded_cpu_state = NULL; vmx->loaded_cpu_state = NULL;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif #endif
if (host_state->ldt_sel || (host_state->gs_sel & 7)) { if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
kvm_load_ldt(host_state->ldt_sel); kvm_load_ldt(host_state->ldt_sel);
@ -2980,24 +2978,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
{ {
if (is_long_mode(&vmx->vcpu)) { preempt_disable();
preempt_disable(); if (vmx->loaded_cpu_state)
if (vmx->loaded_cpu_state) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
rdmsrl(MSR_KERNEL_GS_BASE, preempt_enable();
vmx->msr_guest_kernel_gs_base);
preempt_enable();
}
return vmx->msr_guest_kernel_gs_base; return vmx->msr_guest_kernel_gs_base;
} }
static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
{ {
if (is_long_mode(&vmx->vcpu)) { preempt_disable();
preempt_disable(); if (vmx->loaded_cpu_state)
if (vmx->loaded_cpu_state) wrmsrl(MSR_KERNEL_GS_BASE, data);
wrmsrl(MSR_KERNEL_GS_BASE, data); preempt_enable();
preempt_enable();
}
vmx->msr_guest_kernel_gs_base = data; vmx->msr_guest_kernel_gs_base = data;
} }
#endif #endif
@ -3533,9 +3526,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
if (kvm_mpx_supported())
msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */ /* We support free control of debug control saving. */
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
@ -3552,8 +3542,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
VM_ENTRY_LOAD_IA32_PAT; VM_ENTRY_LOAD_IA32_PAT;
msrs->entry_ctls_high |= msrs->entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
if (kvm_mpx_supported())
msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */ /* We support free control of debug control loading. */
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
@ -3601,12 +3589,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
msrs->secondary_ctls_high); msrs->secondary_ctls_high);
msrs->secondary_ctls_low = 0; msrs->secondary_ctls_low = 0;
msrs->secondary_ctls_high &= msrs->secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_DESC | SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING; SECONDARY_EXEC_WBINVD_EXITING;
/* /*
* We can emulate "VMCS shadowing," even if the hardware * We can emulate "VMCS shadowing," even if the hardware
* doesn't support it. * doesn't support it.
@ -3663,6 +3651,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
msrs->secondary_ctls_high |= msrs->secondary_ctls_high |=
SECONDARY_EXEC_UNRESTRICTED_GUEST; SECONDARY_EXEC_UNRESTRICTED_GUEST;
if (flexpriority_enabled)
msrs->secondary_ctls_high |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
/* miscellaneous data */ /* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC, rdmsr(MSR_IA32_VMX_MISC,
msrs->misc_low, msrs->misc_low,
@ -5073,19 +5065,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
if (!msr) if (!msr)
return; return;
/*
* MSR_KERNEL_GS_BASE is not intercepted when the guest is in
* 64-bit mode as a 64-bit kernel may frequently access the
* MSR. This means we need to manually save/restore the MSR
* when switching between guest and host state, but only if
* the guest is in 64-bit mode. Sync our cached value if the
* guest is transitioning to 32-bit mode and the CPU contains
* guest state, i.e. the cache is stale.
*/
#ifdef CONFIG_X86_64
if (!(efer & EFER_LMA))
(void)vmx_read_guest_kernel_gs_base(vmx);
#endif
vcpu->arch.efer = efer; vcpu->arch.efer = efer;
if (efer & EFER_LMA) { if (efer & EFER_LMA) {
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
@ -6078,9 +6057,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
mode |= MSR_BITMAP_MODE_X2APIC_APICV; mode |= MSR_BITMAP_MODE_X2APIC_APICV;
} }
if (is_long_mode(vcpu))
mode |= MSR_BITMAP_MODE_LM;
return mode; return mode;
} }
@ -6121,9 +6097,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
if (!changed) if (!changed)
return; return;
vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
!(mode & MSR_BITMAP_MODE_LM));
if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
@ -6189,6 +6162,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
nested_mark_vmcs12_pages_dirty(vcpu); nested_mark_vmcs12_pages_dirty(vcpu);
} }
static u8 vmx_get_rvi(void)
{
return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
}
static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
@ -6201,7 +6179,7 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
return false; return false;
rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff; rvi = vmx_get_rvi();
vapic_page = kmap(vmx->nested.virtual_apic_page); vapic_page = kmap(vmx->nested.virtual_apic_page);
vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
@ -10245,15 +10223,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
if (!lapic_in_kernel(vcpu)) if (!lapic_in_kernel(vcpu))
return; return;
if (!flexpriority_enabled &&
!cpu_has_vmx_virtualize_x2apic_mode())
return;
/* Postpone execution until vmcs01 is the current VMCS. */ /* Postpone execution until vmcs01 is the current VMCS. */
if (is_guest_mode(vcpu)) { if (is_guest_mode(vcpu)) {
to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true; to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
return; return;
} }
if (!cpu_need_tpr_shadow(vcpu))
return;
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@ -10375,6 +10354,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
return max_irr; return max_irr;
} }
static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
{
u8 rvi = vmx_get_rvi();
u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
return ((rvi & 0xf0) > (vppr & 0xf0));
}
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
{ {
if (!kvm_vcpu_apicv_active(vcpu)) if (!kvm_vcpu_apicv_active(vcpu))
@ -11264,6 +11251,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
#undef cr4_fixed1_update #undef cr4_fixed1_update
} }
static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (kvm_mpx_supported()) {
bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
if (mpx_enabled) {
vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
} else {
vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
}
}
}
static void vmx_cpuid_update(struct kvm_vcpu *vcpu) static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
@ -11280,8 +11284,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
if (nested_vmx_allowed(vcpu)) if (nested_vmx_allowed(vcpu)) {
nested_vmx_cr_fixed1_bits_update(vcpu); nested_vmx_cr_fixed1_bits_update(vcpu);
nested_vmx_entry_exit_ctls_update(vcpu);
}
} }
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@ -12049,8 +12055,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
set_cr4_guest_host_mask(vmx); set_cr4_guest_host_mask(vmx);
if (vmx_mpx_supported()) if (kvm_mpx_supported()) {
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); if (vmx->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
else
vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
}
if (enable_vpid) { if (enable_vpid) {
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@ -12595,15 +12606,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
bool from_vmentry = !!exit_qual; bool from_vmentry = !!exit_qual;
u32 dummy_exit_qual; u32 dummy_exit_qual;
u32 vmcs01_cpu_exec_ctrl; bool evaluate_pending_interrupts;
int r = 0; int r = 0;
vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
enter_guest_mode(vcpu); enter_guest_mode(vcpu);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
if (kvm_mpx_supported() &&
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
vmx_segment_cache_clear(vmx); vmx_segment_cache_clear(vmx);
@ -12643,16 +12660,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
* to L1 or delivered directly to L2 (e.g. In case L1 don't * to L1 or delivered directly to L2 (e.g. In case L1 don't
* intercept EXTERNAL_INTERRUPT). * intercept EXTERNAL_INTERRUPT).
* *
* Usually this would be handled by L0 requesting a * Usually this would be handled by the processor noticing an
* IRQ/NMI window by setting VMCS accordingly. However, * IRQ/NMI window request, or checking RVI during evaluation of
* this setting was done on VMCS01 and now VMCS02 is active * pending virtual interrupts. However, this setting was done
* instead. Thus, we force L0 to perform pending event * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
* evaluation by requesting a KVM_REQ_EVENT. * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
*/ */
if (vmcs01_cpu_exec_ctrl & if (unlikely(evaluate_pending_interrupts))
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
}
/* /*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point * Note no nested_vmx_succeed or nested_vmx_fail here. At this point

View File

@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void)
*/ */
switch (msrs_to_save[i]) { switch (msrs_to_save[i]) {
case MSR_IA32_BNDCFGS: case MSR_IA32_BNDCFGS:
if (!kvm_x86_ops->mpx_supported()) if (!kvm_mpx_supported())
continue; continue;
break; break;
case MSR_TSC_AUX: case MSR_TSC_AUX:

View File

@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
/* /*
* __blk_mq_update_nr_hw_queues will update the nr_hw_queues and * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
* queue_hw_ctx after freeze the queue. So we could use q_usage_counter * queue_hw_ctx after freeze the queue, so we use q_usage_counter
* to avoid race with it. __blk_mq_update_nr_hw_queues will users * to avoid race with it.
* synchronize_rcu to ensure all of the users go out of the critical
* section below and see zeroed q_usage_counter.
*/ */
rcu_read_lock(); if (!percpu_ref_tryget(&q->q_usage_counter))
if (percpu_ref_is_zero(&q->q_usage_counter)) {
rcu_read_unlock();
return; return;
}
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags; struct blk_mq_tags *tags = hctx->tags;
@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
} }
rcu_read_unlock(); blk_queue_exit(q);
} }
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,

View File

@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(!rq->q); BUG_ON(!rq->q);
if (rq->mq_ctx != this_ctx) { if (rq->mq_ctx != this_ctx) {
if (this_ctx) { if (this_ctx) {
trace_block_unplug(this_q, depth, from_schedule); trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx, blk_mq_sched_insert_requests(this_q, this_ctx,
&ctx_list, &ctx_list,
from_schedule); from_schedule);
@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
* on 'ctx_list'. Do those. * on 'ctx_list'. Do those.
*/ */
if (this_ctx) { if (this_ctx) {
trace_block_unplug(this_q, depth, from_schedule); trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
from_schedule); from_schedule);
} }

View File

@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
; ;
if (q->nr_sorted && printed++ < 10) { if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
printk(KERN_ERR "%s: forced dispatching is broken " printk(KERN_ERR "%s: forced dispatching is broken "
"(nr_sorted=%u), please report this\n", "(nr_sorted=%u), please report this\n",
q->elevator->type->elevator_name, q->nr_sorted); q->elevator->type->elevator_name, q->nr_sorted);

View File

@ -2689,11 +2689,10 @@ static void ns_poll(struct timer_list *unused)
PRINTK("nicstar: Entering ns_poll().\n"); PRINTK("nicstar: Entering ns_poll().\n");
for (i = 0; i < num_cards; i++) { for (i = 0; i < num_cards; i++) {
card = cards[i]; card = cards[i];
if (spin_is_locked(&card->int_lock)) { if (!spin_trylock_irqsave(&card->int_lock, flags)) {
/* Probably it isn't worth spinning */ /* Probably it isn't worth spinning */
continue; continue;
} }
spin_lock_irqsave(&card->int_lock, flags);
stat_w = 0; stat_w = 0;
stat_r = readl(card->membase + STAT); stat_r = readl(card->membase + STAT);

View File

@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async); dpm_wait_for_subordinate(dev, async);
if (async_error) if (async_error) {
dev->power.direct_complete = false;
goto Complete; goto Complete;
}
/* /*
* If a device configured to wake up the system from sleep states * If a device configured to wake up the system from sleep states
@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_wakeup_event(dev, 0); pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) { if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
async_error = -EBUSY; async_error = -EBUSY;
goto Complete; goto Complete;
} }

View File

@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
list_del(&gnt_list_entry->node); list_del(&gnt_list_entry->node);
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
rinfo->persistent_gnts_c--; rinfo->persistent_gnts_c--;
__free_page(gnt_list_entry->page); gnt_list_entry->gref = GRANT_INVALID_REF;
kfree(gnt_list_entry); list_add_tail(&gnt_list_entry->node, &rinfo->grants);
} }
spin_unlock_irqrestore(&rinfo->ring_lock, flags); spin_unlock_irqrestore(&rinfo->ring_lock, flags);

View File

@ -203,10 +203,11 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
static inline void ath3k_log_failed_loading(int err, int len, int size) static inline void ath3k_log_failed_loading(int err, int len, int size,
int count)
{ {
BT_ERR("Error in firmware loading err = %d, len = %d, size = %d", BT_ERR("Firmware loading err = %d, len = %d, size = %d, count = %d",
err, len, size); err, len, size, count);
} }
#define USB_REQ_DFU_DNLOAD 1 #define USB_REQ_DFU_DNLOAD 1
@ -257,7 +258,7 @@ static int ath3k_load_firmware(struct usb_device *udev,
&len, 3000); &len, 3000);
if (err || (len != size)) { if (err || (len != size)) {
ath3k_log_failed_loading(err, len, size); ath3k_log_failed_loading(err, len, size, count);
goto error; goto error;
} }
@ -356,7 +357,7 @@ static int ath3k_load_fwfile(struct usb_device *udev,
err = usb_bulk_msg(udev, pipe, send_buf, size, err = usb_bulk_msg(udev, pipe, send_buf, size,
&len, 3000); &len, 3000);
if (err || (len != size)) { if (err || (len != size)) {
ath3k_log_failed_loading(err, len, size); ath3k_log_failed_loading(err, len, size, count);
kfree(send_buf); kfree(send_buf);
return err; return err;
} }

View File

@ -448,7 +448,7 @@ static int bt3c_load_firmware(struct bt3c_info *info,
{ {
char *ptr = (char *) firmware; char *ptr = (char *) firmware;
char b[9]; char b[9];
unsigned int iobase, tmp; unsigned int iobase, tmp, tn;
unsigned long size, addr, fcs; unsigned long size, addr, fcs;
int i, err = 0; int i, err = 0;
@ -490,7 +490,9 @@ static int bt3c_load_firmware(struct bt3c_info *info,
memset(b, 0, sizeof(b)); memset(b, 0, sizeof(b));
for (tmp = 0, i = 0; i < size; i++) { for (tmp = 0, i = 0; i < size; i++) {
memcpy(b, ptr + (i * 2) + 2, 2); memcpy(b, ptr + (i * 2) + 2, 2);
tmp += simple_strtol(b, NULL, 16); if (kstrtouint(b, 16, &tn))
return -EINVAL;
tmp += tn;
} }
if (((tmp + fcs) & 0xff) != 0xff) { if (((tmp + fcs) & 0xff) != 0xff) {
@ -505,7 +507,8 @@ static int bt3c_load_firmware(struct bt3c_info *info,
memset(b, 0, sizeof(b)); memset(b, 0, sizeof(b));
for (i = 0; i < (size - 4) / 2; i++) { for (i = 0; i < (size - 4) / 2; i++) {
memcpy(b, ptr + (i * 4) + 12, 4); memcpy(b, ptr + (i * 4) + 12, 4);
tmp = simple_strtoul(b, NULL, 16); if (kstrtouint(b, 16, &tmp))
return -EINVAL;
bt3c_put(iobase, tmp); bt3c_put(iobase, tmp);
} }
} }

View File

@ -324,6 +324,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */ { 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */ { 0x610c, "BCM4354" }, /* 003.001.012 */
{ 0x2122, "BCM4343A0" }, /* 001.001.034 */ { 0x2122, "BCM4343A0" }, /* 001.001.034 */
{ 0x2209, "BCM43430A1" }, /* 001.002.009 */ { 0x2209, "BCM43430A1" }, /* 001.002.009 */

View File

@ -21,8 +21,9 @@
#include <net/rsi_91x.h> #include <net/rsi_91x.h>
#include <net/genetlink.h> #include <net/genetlink.h>
#define RSI_HEADROOM_FOR_BT_HAL 16 #define RSI_DMA_ALIGN 8
#define RSI_FRAME_DESC_SIZE 16 #define RSI_FRAME_DESC_SIZE 16
#define RSI_HEADROOM_FOR_BT_HAL (RSI_FRAME_DESC_SIZE + RSI_DMA_ALIGN)
struct rsi_hci_adapter { struct rsi_hci_adapter {
void *priv; void *priv;
@ -70,6 +71,16 @@ static int rsi_hci_send_pkt(struct hci_dev *hdev, struct sk_buff *skb)
bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb); bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb);
kfree_skb(skb); kfree_skb(skb);
skb = new_skb; skb = new_skb;
if (!IS_ALIGNED((unsigned long)skb->data, RSI_DMA_ALIGN)) {
u8 *skb_data = skb->data;
int skb_len = skb->len;
skb_push(skb, RSI_DMA_ALIGN);
skb_pull(skb, PTR_ALIGN(skb->data,
RSI_DMA_ALIGN) - skb->data);
memmove(skb->data, skb_data, skb_len);
skb_trim(skb, skb_len);
}
} }
return h_adapter->proto_ops->coex_send_pkt(h_adapter->priv, skb, return h_adapter->proto_ops->coex_send_pkt(h_adapter->priv, skb,

View File

@ -138,6 +138,13 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8761a_fw.bin", .fw_name = "rtl_bt/rtl8761a_fw.bin",
.cfg_name = "rtl_bt/rtl8761a_config" }, .cfg_name = "rtl_bt/rtl8761a_config" },
/* 8822C with USB interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc),
.config_needed = false,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8822cu_fw.bin",
.cfg_name = "rtl_bt/rtl8822cu_config" },
/* 8822B */ /* 8822B */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb), { IC_INFO(RTL_ROM_LMP_8822B, 0xb),
.config_needed = true, .config_needed = true,
@ -206,7 +213,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev, struct btrtl_device_info *btrtl_dev,
unsigned char **_buf) unsigned char **_buf)
{ {
const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; static const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
struct rtl_epatch_header *epatch_info; struct rtl_epatch_header *epatch_info;
unsigned char *buf; unsigned char *buf;
int i, len; int i, len;
@ -228,6 +235,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8822B, 8 }, { RTL_ROM_LMP_8822B, 8 },
{ RTL_ROM_LMP_8723B, 9 }, /* 8723D */ { RTL_ROM_LMP_8723B, 9 }, /* 8723D */
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */ { RTL_ROM_LMP_8821A, 10 }, /* 8821C */
{ RTL_ROM_LMP_8822B, 13 }, /* 8822C */
}; };
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;

View File

@ -3096,6 +3096,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->set_diag = btintel_set_diag; hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr; hdev->set_bdaddr = btintel_set_bdaddr;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
} }

View File

@ -167,7 +167,8 @@ struct qca_serdev {
}; };
static int qca_power_setup(struct hci_uart *hu, bool on); static int qca_power_setup(struct hci_uart *hu, bool on);
static void qca_power_shutdown(struct hci_dev *hdev); static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
static void __serial_clock_on(struct tty_struct *tty) static void __serial_clock_on(struct tty_struct *tty)
{ {
@ -499,7 +500,6 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca; hu->priv = qca;
if (hu->serdev) { if (hu->serdev) {
serdev_device_open(hu->serdev);
qcadev = serdev_device_get_drvdata(hu->serdev); qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->btsoc_type != QCA_WCN3990) { if (qcadev->btsoc_type != QCA_WCN3990) {
@ -609,11 +609,10 @@ static int qca_close(struct hci_uart *hu)
if (hu->serdev) { if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev); qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->btsoc_type == QCA_WCN3990) if (qcadev->btsoc_type == QCA_WCN3990)
qca_power_shutdown(hu->hdev); qca_power_shutdown(hu);
else else
gpiod_set_value_cansleep(qcadev->bt_en, 0); gpiod_set_value_cansleep(qcadev->bt_en, 0);
serdev_device_close(hu->serdev);
} }
kfree_skb(qca->rx_skb); kfree_skb(qca->rx_skb);
@ -1101,8 +1100,26 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
static int qca_wcn3990_init(struct hci_uart *hu) static int qca_wcn3990_init(struct hci_uart *hu)
{ {
struct hci_dev *hdev = hu->hdev; struct hci_dev *hdev = hu->hdev;
struct qca_serdev *qcadev;
int ret; int ret;
/* Check for vregs status, may be hci down has turned
* off the voltage regulator.
*/
qcadev = serdev_device_get_drvdata(hu->serdev);
if (!qcadev->bt_power->vregs_on) {
serdev_device_close(hu->serdev);
ret = qca_power_setup(hu, true);
if (ret)
return ret;
ret = serdev_device_open(hu->serdev);
if (ret) {
bt_dev_err(hu->hdev, "failed to open port");
return ret;
}
}
/* Forcefully enable wcn3990 to enter in to boot mode. */ /* Forcefully enable wcn3990 to enter in to boot mode. */
host_set_baudrate(hu, 2400); host_set_baudrate(hu, 2400);
ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
@ -1154,6 +1171,12 @@ static int qca_setup(struct hci_uart *hu)
if (qcadev->btsoc_type == QCA_WCN3990) { if (qcadev->btsoc_type == QCA_WCN3990) {
bt_dev_info(hdev, "setting up wcn3990"); bt_dev_info(hdev, "setting up wcn3990");
/* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
* setup for every hci up.
*/
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
hu->hdev->shutdown = qca_power_off;
ret = qca_wcn3990_init(hu); ret = qca_wcn3990_init(hu);
if (ret) if (ret)
return ret; return ret;
@ -1232,13 +1255,24 @@ static const struct qca_vreg_data qca_soc_data = {
.num_vregs = 4, .num_vregs = 4,
}; };
static void qca_power_shutdown(struct hci_dev *hdev) static void qca_power_shutdown(struct hci_uart *hu)
{
struct serdev_device *serdev = hu->serdev;
unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE;
host_set_baudrate(hu, 2400);
hci_uart_set_flow_control(hu, true);
serdev_device_write_buf(serdev, &cmd, sizeof(cmd));
hci_uart_set_flow_control(hu, false);
qca_power_setup(hu, false);
}
static int qca_power_off(struct hci_dev *hdev)
{ {
struct hci_uart *hu = hci_get_drvdata(hdev); struct hci_uart *hu = hci_get_drvdata(hdev);
host_set_baudrate(hu, 2400); qca_power_shutdown(hu);
qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); return 0;
qca_power_setup(hu, false);
} }
static int qca_enable_regulator(struct qca_vreg vregs, static int qca_enable_regulator(struct qca_vreg vregs,
@ -1413,7 +1447,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
if (qcadev->btsoc_type == QCA_WCN3990) if (qcadev->btsoc_type == QCA_WCN3990)
qca_power_shutdown(qcadev->serdev_hu.hdev); qca_power_shutdown(&qcadev->serdev_hu);
else else
clk_disable_unprepare(qcadev->susclk); clk_disable_unprepare(qcadev->susclk);

View File

@ -57,9 +57,10 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
{ {
struct sk_buff *skb = hu->tx_skb; struct sk_buff *skb = hu->tx_skb;
if (!skb) if (!skb) {
skb = hu->proto->dequeue(hu); if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
else skb = hu->proto->dequeue(hu);
} else
hu->tx_skb = NULL; hu->tx_skb = NULL;
return skb; return skb;
@ -94,7 +95,7 @@ static void hci_uart_write_work(struct work_struct *work)
hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
kfree_skb(skb); kfree_skb(skb);
} }
} while(test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
clear_bit(HCI_UART_SENDING, &hu->tx_state); clear_bit(HCI_UART_SENDING, &hu->tx_state);
} }
@ -368,6 +369,7 @@ void hci_uart_unregister_device(struct hci_uart *hu)
{ {
struct hci_dev *hdev = hu->hdev; struct hci_dev *hdev = hu->hdev;
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
hci_unregister_dev(hdev); hci_unregister_dev(hdev);
hci_free_dev(hdev); hci_free_dev(hdev);

View File

@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
data->base = of_iomap(node, 0); data->base = of_iomap(node, 0);
if (!data->base) { if (!data->base) {
pr_err("Could not map PIT address\n"); pr_err("Could not map PIT address\n");
return -ENXIO; ret = -ENXIO;
goto exit;
} }
data->mck = of_clk_get(node, 0); data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck)) { if (IS_ERR(data->mck)) {
pr_err("Unable to get mck clk\n"); pr_err("Unable to get mck clk\n");
return PTR_ERR(data->mck); ret = PTR_ERR(data->mck);
goto exit;
} }
ret = clk_prepare_enable(data->mck); ret = clk_prepare_enable(data->mck);
if (ret) { if (ret) {
pr_err("Unable to enable mck\n"); pr_err("Unable to enable mck\n");
return ret; goto exit;
} }
/* Get the interrupts property */ /* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0); data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq) { if (!data->irq) {
pr_err("Unable to get IRQ from DT\n"); pr_err("Unable to get IRQ from DT\n");
return -EINVAL; ret = -EINVAL;
goto exit;
} }
/* /*
@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
ret = clocksource_register_hz(&data->clksrc, pit_rate); ret = clocksource_register_hz(&data->clksrc, pit_rate);
if (ret) { if (ret) {
pr_err("Failed to register clocksource\n"); pr_err("Failed to register clocksource\n");
return ret; goto exit;
} }
/* Set up irq handler */ /* Set up irq handler */
@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
"at91_tick", data); "at91_tick", data);
if (ret) { if (ret) {
pr_err("Unable to setup IRQ\n"); pr_err("Unable to setup IRQ\n");
return ret; clocksource_unregister(&data->clksrc);
goto exit;
} }
/* Set up and register clockevents */ /* Set up and register clockevents */
@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
clockevents_register_device(&data->clkevt); clockevents_register_device(&data->clkevt);
return 0; return 0;
exit:
kfree(data);
return ret;
} }
TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init); at91sam926x_pit_dt_init);

View File

@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
cr &= ~fttmr010->t1_enable_val; cr &= ~fttmr010->t1_enable_val;
writel(cr, fttmr010->base + TIMER_CR); writel(cr, fttmr010->base + TIMER_CR);
/* Setup the match register forward/backward in time */ if (fttmr010->count_down) {
cr = readl(fttmr010->base + TIMER1_COUNT); /*
if (fttmr010->count_down) * ASPEED Timer Controller will load TIMER1_LOAD register
cr -= cycles; * into TIMER1_COUNT register when the timer is re-enabled.
else */
cr += cycles; writel(cycles, fttmr010->base + TIMER1_LOAD);
writel(cr, fttmr010->base + TIMER1_MATCH1); } else {
/* Setup the match register forward in time */
cr = readl(fttmr010->base + TIMER1_COUNT);
writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
}
/* Start */ /* Start */
cr = readl(fttmr010->base + TIMER_CR); cr = readl(fttmr010->base + TIMER_CR);

View File

@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
return -ENXIO; return -ENXIO;
} }
if (!of_machine_is_compatible("ti,am43"))
ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
ti_32k_timer.counter = ti_32k_timer.base; ti_32k_timer.counter = ti_32k_timer.base;
/* /*

View File

@ -44,7 +44,7 @@ enum _msm8996_version {
struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
{ {
size_t len; size_t len;
u32 *msm_id; u32 *msm_id;
@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
} }
module_init(qcom_cpufreq_kryo_init); module_init(qcom_cpufreq_kryo_init);
static void __init qcom_cpufreq_kryo_exit(void) static void __exit qcom_cpufreq_kryo_exit(void)
{ {
platform_device_unregister(kryo_cpufreq_pdev); platform_device_unregister(kryo_cpufreq_pdev);
platform_driver_unregister(&qcom_cpufreq_kryo_driver); platform_driver_unregister(&qcom_cpufreq_kryo_driver);

View File

@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->src_nents = src_nents; edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents; edesc->dst_nents = dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes; desc_bytes);
edesc->iv_dir = DMA_TO_DEVICE; edesc->iv_dir = DMA_TO_DEVICE;
/* Make sure IV is located in a DMAable area */ /* Make sure IV is located in a DMAable area */
@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
edesc->src_nents = src_nents; edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents; edesc->dst_nents = dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes; desc_bytes);
edesc->iv_dir = DMA_FROM_DEVICE; edesc->iv_dir = DMA_FROM_DEVICE;
/* Make sure IV is located in a DMAable area */ /* Make sure IV is located in a DMAable area */

View File

@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
walk->to = (struct phys_sge_pairs *)(dsgl + 1); walk->to = (struct phys_sge_pairs *)(dsgl + 1);
} }
static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
int pci_chan_id)
{ {
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
phys_cpl->rss_hdr_int.qid = htons(qid); phys_cpl->rss_hdr_int.qid = htons(qid);
phys_cpl->rss_hdr_int.hash_val = 0; phys_cpl->rss_hdr_int.hash_val = 0;
phys_cpl->rss_hdr_int.channel = pci_chan_id;
} }
static inline void dsgl_walk_add_page(struct dsgl_walk *walk, static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
!!lcb, ctx->tx_qidx); !!lcb, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
qid); qid);
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
((sizeof(chcr_req->wreq)) >> 4))); ((sizeof(chcr_req->wreq)) >> 4)));
@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
adap->vres.ncrypto_fc); adap->vres.ncrypto_fc);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan; txq_perchan = ntxq / u_ctx->lldi.nchan;
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
txq_idx = ctx->dev->tx_channel_id * txq_perchan;
txq_idx += id % txq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev); spin_lock(&ctx->dev->lock_chcr_dev);
ctx->rx_qidx = rxq_idx; ctx->tx_chan_id = ctx->dev->tx_channel_id;
ctx->tx_qidx = txq_idx;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
ctx->dev->rx_channel_id = 0; ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev); spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
txq_idx = ctx->tx_chan_id * txq_perchan;
txq_idx += id % txq_perchan;
ctx->rx_qidx = rxq_idx;
ctx->tx_qidx = txq_idx;
/* Channel Id used by SGE to forward packet to Host.
* Same value should be used in cpl_fw6_pld RSS_CH field
* by FW. Driver programs PCI channel ID to be used in fw
* at the time of queue allocation with value "pi->tx_chan"
*/
ctx->pci_chan_id = txq_idx / txq_perchan;
} }
out: out:
return err; return err;
@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct dsgl_walk dsgl_walk; struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm); unsigned int authsize = crypto_aead_authsize(tfm);
struct chcr_context *ctx = a_ctx(tfm);
u32 temp; u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_init(&dsgl_walk, phys_cpl);
@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
temp = req->cryptlen + (reqctx->op ? -authsize : authsize); temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
dsgl_walk_end(&dsgl_walk, qid); dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
} }
void chcr_add_cipher_src_ent(struct ablkcipher_request *req, void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
unsigned short qid) unsigned short qid)
{ {
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
struct dsgl_walk dsgl_walk; struct dsgl_walk dsgl_walk;
dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_init(&dsgl_walk, phys_cpl);
@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
reqctx->dstsg = dsgl_walk.last_sg; reqctx->dstsg = dsgl_walk.last_sg;
reqctx->dst_ofst = dsgl_walk.last_sg_len; reqctx->dst_ofst = dsgl_walk.last_sg_len;
dsgl_walk_end(&dsgl_walk, qid); dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
} }
void chcr_add_hash_src_ent(struct ahash_request *req, void chcr_add_hash_src_ent(struct ahash_request *req,

View File

@ -255,6 +255,8 @@ struct chcr_context {
struct chcr_dev *dev; struct chcr_dev *dev;
unsigned char tx_qidx; unsigned char tx_qidx;
unsigned char rx_qidx; unsigned char rx_qidx;
unsigned char tx_chan_id;
unsigned char pci_chan_id;
struct __crypto_ctx crypto_ctx[0]; struct __crypto_ctx crypto_ctx[0];
}; };

View File

@ -63,7 +63,7 @@ struct dcp {
struct dcp_coherent_block *coh; struct dcp_coherent_block *coh;
struct completion completion[DCP_MAX_CHANS]; struct completion completion[DCP_MAX_CHANS];
struct mutex mutex[DCP_MAX_CHANS]; spinlock_t lock[DCP_MAX_CHANS];
struct task_struct *thread[DCP_MAX_CHANS]; struct task_struct *thread[DCP_MAX_CHANS];
struct crypto_queue queue[DCP_MAX_CHANS]; struct crypto_queue queue[DCP_MAX_CHANS];
}; };
@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
int ret; int ret;
do { while (!kthread_should_stop()) {
__set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
mutex_lock(&sdcp->mutex[chan]); spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]); backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]); arq = crypto_dequeue_request(&sdcp->queue[chan]);
mutex_unlock(&sdcp->mutex[chan]); spin_unlock(&sdcp->lock[chan]);
if (!backlog && !arq) {
schedule();
continue;
}
set_current_state(TASK_RUNNING);
if (backlog) if (backlog)
backlog->complete(backlog, -EINPROGRESS); backlog->complete(backlog, -EINPROGRESS);
@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
if (arq) { if (arq) {
ret = mxs_dcp_aes_block_crypt(arq); ret = mxs_dcp_aes_block_crypt(arq);
arq->complete(arq, ret); arq->complete(arq, ret);
continue;
} }
}
schedule();
} while (!kthread_should_stop());
return 0; return 0;
} }
@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
rctx->ecb = ecb; rctx->ecb = ecb;
actx->chan = DCP_CHAN_CRYPTO; actx->chan = DCP_CHAN_CRYPTO;
mutex_lock(&sdcp->mutex[actx->chan]); spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
mutex_unlock(&sdcp->mutex[actx->chan]); spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]); wake_up_process(sdcp->thread[actx->chan]);
@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
struct ahash_request *req; struct ahash_request *req;
int ret, fini; int ret, fini;
do { while (!kthread_should_stop()) {
__set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
mutex_lock(&sdcp->mutex[chan]); spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]); backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]); arq = crypto_dequeue_request(&sdcp->queue[chan]);
mutex_unlock(&sdcp->mutex[chan]); spin_unlock(&sdcp->lock[chan]);
if (!backlog && !arq) {
schedule();
continue;
}
set_current_state(TASK_RUNNING);
if (backlog) if (backlog)
backlog->complete(backlog, -EINPROGRESS); backlog->complete(backlog, -EINPROGRESS);
@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
ret = dcp_sha_req_to_buf(arq); ret = dcp_sha_req_to_buf(arq);
fini = rctx->fini; fini = rctx->fini;
arq->complete(arq, ret); arq->complete(arq, ret);
if (!fini)
continue;
} }
}
schedule();
} while (!kthread_should_stop());
return 0; return 0;
} }
@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
rctx->init = 1; rctx->init = 1;
} }
mutex_lock(&sdcp->mutex[actx->chan]); spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
mutex_unlock(&sdcp->mutex[actx->chan]); spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]); wake_up_process(sdcp->thread[actx->chan]);
mutex_unlock(&actx->mutex); mutex_unlock(&actx->mutex);
@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdcp); platform_set_drvdata(pdev, sdcp);
for (i = 0; i < DCP_MAX_CHANS; i++) { for (i = 0; i < DCP_MAX_CHANS; i++) {
mutex_init(&sdcp->mutex[i]); spin_lock_init(&sdcp->lock[i]);
init_completion(&sdcp->completion[i]); init_completion(&sdcp->completion[i]);
crypto_init_queue(&sdcp->queue[i], 50); crypto_init_queue(&sdcp->queue[i], 50);
} }

View File

@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data; struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH]; char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr; unsigned int i, bar_nr;
int ret, bar_mask; unsigned long bar_mask;
int ret;
switch (ent->device) { switch (ent->device) {
case ADF_C3XXX_PCI_DEVICE_ID: case ADF_C3XXX_PCI_DEVICE_ID:
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */ /* Find and map all the device's BARS */
i = 0; i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr); bar->base_addr = pci_resource_start(pdev, bar_nr);

View File

@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data; struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH]; char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr; unsigned int i, bar_nr;
int ret, bar_mask; unsigned long bar_mask;
int ret;
switch (ent->device) { switch (ent->device) {
case ADF_C3XXXIOV_PCI_DEVICE_ID: case ADF_C3XXXIOV_PCI_DEVICE_ID:
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */ /* Find and map all the device's BARS */
i = 0; i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr); bar->base_addr = pci_resource_start(pdev, bar_nr);

View File

@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data; struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH]; char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr; unsigned int i, bar_nr;
int ret, bar_mask; unsigned long bar_mask;
int ret;
switch (ent->device) { switch (ent->device) {
case ADF_C62X_PCI_DEVICE_ID: case ADF_C62X_PCI_DEVICE_ID:
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */ /* Find and map all the device's BARS */
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr); bar->base_addr = pci_resource_start(pdev, bar_nr);

View File

@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data; struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH]; char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr; unsigned int i, bar_nr;
int ret, bar_mask; unsigned long bar_mask;
int ret;
switch (ent->device) { switch (ent->device) {
case ADF_C62XIOV_PCI_DEVICE_ID: case ADF_C62XIOV_PCI_DEVICE_ID:
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */ /* Find and map all the device's BARS */
i = 0; i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr); bar->base_addr = pci_resource_start(pdev, bar_nr);

View File

@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data; struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH]; char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr; unsigned int i, bar_nr;
int ret, bar_mask; unsigned long bar_mask;
int ret;
switch (ent->device) { switch (ent->device) {
case ADF_DH895XCC_PCI_DEVICE_ID: case ADF_DH895XCC_PCI_DEVICE_ID:
@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */ /* Find and map all the device's BARS */
i = 0; i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr); bar->base_addr = pci_resource_start(pdev, bar_nr);

View File

@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data; struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH]; char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr; unsigned int i, bar_nr;
int ret, bar_mask; unsigned long bar_mask;
int ret;
switch (ent->device) { switch (ent->device) {
case ADF_DH895XCCIOV_PCI_DEVICE_ID: case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */ /* Find and map all the device's BARS */
i = 0; i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr); bar->base_addr = pci_resource_start(pdev, bar_nr);

View File

@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
} }
static const struct address_space_operations dev_dax_aops = {
.set_page_dirty = noop_set_page_dirty,
.invalidatepage = noop_invalidatepage,
};
static int dax_open(struct inode *inode, struct file *filp) static int dax_open(struct inode *inode, struct file *filp)
{ {
struct dax_device *dax_dev = inode_dax(inode); struct dax_device *dax_dev = inode_dax(inode);
@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
dev_dbg(&dev_dax->dev, "trace\n"); dev_dbg(&dev_dax->dev, "trace\n");
inode->i_mapping = __dax_inode->i_mapping; inode->i_mapping = __dax_inode->i_mapping;
inode->i_mapping->host = __dax_inode; inode->i_mapping->host = __dax_inode;
inode->i_mapping->a_ops = &dev_dax_aops;
filp->f_mapping = inode->i_mapping; filp->f_mapping = inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
filp->private_data = dev_dax; filp->private_data = dev_dax;

View File

@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (ret) if (ret)
goto out_free_descs; goto out_free_descs;
lh->descs[i] = desc; lh->descs[i] = desc;
count = i; count = i + 1;
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags); set_bit(FLAG_ACTIVE_LOW, &desc->flags);

View File

@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
{ {
int i; int i;
cancel_delayed_work_sync(&adev->vce.idle_work);
if (adev->vce.vcpu_bo == NULL) if (adev->vce.vcpu_bo == NULL)
return 0; return 0;
@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES) if (i == AMDGPU_MAX_VCE_HANDLES)
return 0; return 0;
cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */ /* TODO: suspending running encoding sessions isn't supported */
return -EINVAL; return -EINVAL;
} }

View File

@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
unsigned size; unsigned size;
void *ptr; void *ptr;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->vcn.vcpu_bo == NULL) if (adev->vcn.vcpu_bo == NULL)
return 0; return 0;
cancel_delayed_work_sync(&adev->vcn.idle_work);
size = amdgpu_bo_size(adev->vcn.vcpu_bo); size = amdgpu_bo_size(adev->vcn.vcpu_bo);
ptr = adev->vcn.cpu_addr; ptr = adev->vcn.cpu_addr;

View File

@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q, struct queue *q,
struct qcm_process_device *qpd) struct qcm_process_device *qpd)
{ {
int retval;
struct mqd_manager *mqd_mgr; struct mqd_manager *mqd_mgr;
int retval;
mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (!mqd_mgr) if (!mqd_mgr)
@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (!q->properties.is_active) if (!q->properties.is_active)
return 0; return 0;
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, if (WARN(q->process->mm != current->mm,
&q->properties, q->process->mm); "should only run in user thread"))
retval = -EFAULT;
else
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
&q->properties, current->mm);
if (retval) if (retval)
goto out_uninit_mqd; goto out_uninit_mqd;
@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = map_queues_cpsch(dqm); retval = map_queues_cpsch(dqm);
else if (q->properties.is_active && else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE || (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA)) q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, if (WARN(q->process->mm != current->mm,
&q->properties, q->process->mm); "should only run in user thread"))
retval = -EFAULT;
else
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
q->pipe, q->queue,
&q->properties, current->mm);
}
out_unlock: out_unlock:
dqm_unlock(dqm); dqm_unlock(dqm);
@ -653,6 +663,7 @@ out:
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd) struct qcm_process_device *qpd)
{ {
struct mm_struct *mm = NULL;
struct queue *q; struct queue *q;
struct mqd_manager *mqd_mgr; struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
kfd_flush_tlb(pdd); kfd_flush_tlb(pdd);
} }
/* Take a safe reference to the mm_struct, which may otherwise
* disappear even while the kfd_process is still referenced.
*/
mm = get_task_mm(pdd->process->lead_thread);
if (!mm) {
retval = -EFAULT;
goto out;
}
/* activate all active queues on the qpd */ /* activate all active queues on the qpd */
list_for_each_entry(q, &qpd->queues_list, list) { list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_evicted) if (!q->properties.is_evicted)
@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.is_evicted = false; q->properties.is_evicted = false;
q->properties.is_active = true; q->properties.is_active = true;
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties, q->queue, &q->properties, mm);
q->process->mm);
if (retval) if (retval)
goto out; goto out;
dqm->queue_count++; dqm->queue_count++;
} }
qpd->evicted = 0; qpd->evicted = 0;
out: out:
if (mm)
mmput(mm);
dqm_unlock(dqm); dqm_unlock(dqm);
return retval; return retval;
} }

View File

@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
return NULL; return NULL;
} }
static void emulated_link_detect(struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct display_sink_capability sink_caps = { 0 };
enum dc_edid_status edid_status;
struct dc_context *dc_ctx = link->ctx;
struct dc_sink *sink = NULL;
struct dc_sink *prev_sink = NULL;
link->type = dc_connection_none;
prev_sink = link->local_sink;
if (prev_sink != NULL)
dc_sink_retain(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
break;
}
case SIGNAL_TYPE_DVI_SINGLE_LINK: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
break;
}
case SIGNAL_TYPE_DVI_DUAL_LINK: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
}
case SIGNAL_TYPE_LVDS: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_LVDS;
break;
}
case SIGNAL_TYPE_EDP: {
sink_caps.transaction_type =
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
}
case SIGNAL_TYPE_DISPLAY_PORT: {
sink_caps.transaction_type =
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
break;
}
default:
DC_ERROR("Invalid connector type! signal:%d\n",
link->connector_signal);
return;
}
sink_init_data.link = link;
sink_init_data.sink_signal = sink_caps.signal;
sink = dc_sink_create(&sink_init_data);
if (!sink) {
DC_ERROR("Failed to create sink!\n");
return;
}
link->local_sink = sink;
edid_status = dm_helpers_read_local_edid(
link->ctx,
link,
sink);
if (edid_status != EDID_OK)
DC_ERROR("Failed to read EDID");
}
static int dm_resume(void *handle) static int dm_resume(void *handle)
{ {
struct amdgpu_device *adev = handle; struct amdgpu_device *adev = handle;
@ -654,6 +735,7 @@ static int dm_resume(void *handle)
struct drm_plane *plane; struct drm_plane *plane;
struct drm_plane_state *new_plane_state; struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state; struct dm_plane_state *dm_new_plane_state;
enum dc_connection_type new_connection_type = dc_connection_none;
int ret; int ret;
int i; int i;
@ -684,7 +766,13 @@ static int dm_resume(void *handle)
continue; continue;
mutex_lock(&aconnector->hpd_lock); mutex_lock(&aconnector->hpd_lock);
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none)
emulated_link_detect(aconnector->dc_link);
else
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
if (aconnector->fake_enable && aconnector->dc_link->local_sink) if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false; aconnector->fake_enable = false;
@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param)
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base; struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
/* In case of failure or MST no need to update connector status or notify the OS /* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in it's own context. * since (for MST case) MST does this in it's own context.
@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param)
if (aconnector->fake_enable) if (aconnector->fake_enable)
aconnector->fake_enable = false; aconnector->fake_enable = false;
if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_hotplug_event(dev);
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
amdgpu_dm_update_connector_after_detect(aconnector); amdgpu_dm_update_connector_after_detect(aconnector);
@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param)
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct dc_link *dc_link = aconnector->dc_link; struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state; bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
enum dc_connection_type new_connection_type = dc_connection_none;
/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be * conflict, after implement i2c helper, this mutex should be
@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) { !is_mst_root_connector) {
/* Downstream Port status changed. */ /* Downstream Port status changed. */
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { if (!dc_link_detect_sink(dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(dc_link);
if (aconnector->fake_enable)
aconnector->fake_enable = false;
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
drm_kms_helper_hotplug_event(dev);
} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
if (aconnector->fake_enable) if (aconnector->fake_enable)
aconnector->fake_enable = false; aconnector->fake_enable = false;
@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_mode_info *mode_info = &adev->mode_info; struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt; uint32_t link_cnt;
int32_t total_overlay_planes, total_primary_planes; int32_t total_overlay_planes, total_primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
link_cnt = dm->dc->caps.max_links; link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) { if (amdgpu_dm_mode_config_init(dm->adev)) {
@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link = dc_get_link_at_index(dm->dc, i); link = dc_get_link_at_index(dm->dc, i);
if (dc_link_detect(link, DETECT_REASON_BOOT)) { if (!dc_link_detect_sink(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
amdgpu_dm_update_connector_after_detect(aconnector);
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector); amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link); register_backlight_device(dm, link);
} }
@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dm_state && dm_state->freesync_capable) if (dm_state && dm_state->freesync_capable)
stream->ignore_msa_timing_param = true; stream->ignore_msa_timing_param = true;
finish: finish:
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
dc_sink_release(sink); dc_sink_release(sink);
return stream; return stream;
@ -4504,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
} }
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
/* Signal HW programming completion */
drm_atomic_helper_commit_hw_done(state);
if (wait_for_vblank) if (wait_for_vblank)
drm_atomic_helper_wait_for_flip_done(dev, state); drm_atomic_helper_wait_for_flip_done(dev, state);
/*
* FIXME:
* Delay hw_done() until flip_done() is signaled. This is to block
* another commit from freeing the CRTC state while we're still
* waiting on flip_done.
*/
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_helper_cleanup_planes(dev, state);
/* Finally, drop a runtime PM reference for each newly disabled CRTC, /* Finally, drop a runtime PM reference for each newly disabled CRTC,

View File

@ -195,7 +195,7 @@ static bool program_hpd_filter(
return result; return result;
} }
static bool detect_sink(struct dc_link *link, enum dc_connection_type *type) bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
{ {
uint32_t is_hpd_high = 0; uint32_t is_hpd_high = 0;
struct gpio *hpd_pin; struct gpio *hpd_pin;
@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
return false; return false;
if (false == detect_sink(link, &new_connection_type)) { if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER(); BREAK_TO_DEBUGGER();
return false; return false;
} }

View File

@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
bool dc_link_is_dp_sink_present(struct dc_link *link); bool dc_link_is_dp_sink_present(struct dc_link *link);
bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
/* /*
* DPCD access interfaces * DPCD access interfaces
*/ */

View File

@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg; dc->prev_display_config = *pp_display_cfg;
} }
void dce110_set_bandwidth( static void dce110_set_bandwidth(
struct dc *dc, struct dc *dc,
struct dc_state *context, struct dc_state *context,
bool decrease_allowed) bool decrease_allowed)

View File

@ -68,11 +68,6 @@ void dce110_fill_display_configs(
const struct dc_state *context, const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg); struct dm_pp_display_configuration *pp_display_cfg);
void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed);
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on); void dp_receiver_power_ctrl(struct dc_link *link, bool on);

View File

@ -244,17 +244,6 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false; dh_data->dchub_info_valid = false;
} }
static void dce120_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
if (context->stream_count <= 0)
return;
dce110_set_bandwidth(dc, context, decrease_allowed);
}
void dce120_hw_sequencer_construct(struct dc *dc) void dce120_hw_sequencer_construct(struct dc *dc)
{ {
/* All registers used by dce11.2 match those in dce11 in offset and /* All registers used by dce11.2 match those in dce11 in offset and
@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc); dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub; dc->hwss.update_dchub = dce120_update_dchub;
dc->hwss.set_bandwidth = dce120_set_bandwidth;
} }

View File

@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev)
drm->irq_enabled = true; drm->irq_enabled = true;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc); ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
drm_crtc_vblank_reset(&malidp->crtc);
if (ret < 0) { if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n"); DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail; goto vblank_fail;

View File

@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches, dma_addr_t *addrs, s32 *pitches,
int num_planes, u16 w, u16 h, u32 fmt_id) int num_planes, u16 w, u16 h, u32 fmt_id,
const s16 *rgb2yuv_coeffs)
{ {
u32 base = MALIDP500_SE_MEMWRITE_BASE; u32 base = MALIDP500_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
MALIDP500_SE_MEMWRITE_OUT_SIZE); MALIDP500_SE_MEMWRITE_OUT_SIZE);
if (rgb2yuv_coeffs) {
int i;
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
}
}
malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
return 0; return 0;
@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches, dma_addr_t *addrs, s32 *pitches,
int num_planes, u16 w, u16 h, u32 fmt_id) int num_planes, u16 w, u16 h, u32 fmt_id,
const s16 *rgb2yuv_coeffs)
{ {
u32 base = MALIDP550_SE_MEMWRITE_BASE; u32 base = MALIDP550_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
MALIDP550_SE_CONTROL); MALIDP550_SE_CONTROL);
if (rgb2yuv_coeffs) {
int i;
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
}
}
return 0; return 0;
} }

View File

@ -191,7 +191,8 @@ struct malidp_hw {
* @param fmt_id - internal format ID of output buffer * @param fmt_id - internal format ID of output buffer
*/ */
int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id); s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
const s16 *rgb2yuv_coeffs);
/* /*
* Disable the writing to memory of the next frame's content. * Disable the writing to memory of the next frame's content.

View File

@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
s32 pitches[2]; s32 pitches[2];
u8 format; u8 format;
u8 n_planes; u8 n_planes;
bool rgb2yuv_initialized;
const s16 *rgb2yuv_coeffs;
}; };
static int malidp_mw_connector_get_modes(struct drm_connector *connector) static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
static struct drm_connector_state * static struct drm_connector_state *
malidp_mw_connector_duplicate_state(struct drm_connector *connector) malidp_mw_connector_duplicate_state(struct drm_connector *connector)
{ {
struct malidp_mw_connector_state *mw_state; struct malidp_mw_connector_state *mw_state, *mw_current_state;
if (WARN_ON(!connector->state)) if (WARN_ON(!connector->state))
return NULL; return NULL;
@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
if (!mw_state) if (!mw_state)
return NULL; return NULL;
/* No need to preserve any of our driver-local data */ mw_current_state = to_mw_state(connector->state);
mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
__drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
return &mw_state->base; return &mw_state->base;
@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
}; };
static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
47, 157, 16,
-26, -87, 112,
112, -102, -10,
16, 128, 128
};
static int static int
malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state, struct drm_crtc_state *crtc_state,
@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
} }
mw_state->n_planes = n_planes; mw_state->n_planes = n_planes;
if (fb->format->is_yuv)
mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
return 0; return 0;
} }
@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
drm_writeback_queue_job(mw_conn, conn_state->writeback_job); drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
conn_state->writeback_job = NULL; conn_state->writeback_job = NULL;
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes, mw_state->pitches, mw_state->n_planes,
fb->width, fb->height, mw_state->format); fb->width, fb->height, mw_state->format,
!mw_state->rgb2yuv_initialized ?
mw_state->rgb2yuv_coeffs : NULL);
mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
} else { } else {
DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
hwdev->hw->disable_memwrite(hwdev); hwdev->hw->disable_memwrite(hwdev);

View File

@ -205,6 +205,7 @@
#define MALIDP500_SE_BASE 0x00c00 #define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c #define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74
#define MALIDP500_SE_MEMWRITE_BASE 0x00e00 #define MALIDP500_SE_MEMWRITE_BASE 0x00e00
#define MALIDP500_DC_IRQ_BASE 0x00f00 #define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00 #define MALIDP500_CONFIG_VALID 0x00f00
@ -238,6 +239,7 @@
#define MALIDP550_SE_CONTROL 0x08010 #define MALIDP550_SE_CONTROL 0x08010
#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) #define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078
#define MALIDP550_SE_MEMWRITE_BASE 0x08100 #define MALIDP550_SE_MEMWRITE_BASE 0x08100
#define MALIDP550_DC_BASE 0x0c000 #define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010 #define MALIDP550_DC_CONTROL 0x0c010

View File

@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
EXPORT_SYMBOL(drm_client_close); EXPORT_SYMBOL(drm_client_close);
/** /**
* drm_client_new - Create a DRM client * drm_client_init - Initialise a DRM client
* @dev: DRM device * @dev: DRM device
* @client: DRM client * @client: DRM client
* @name: Client name * @name: Client name
* @funcs: DRM client functions (optional) * @funcs: DRM client functions (optional)
* *
* This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
* The caller needs to hold a reference on @dev before calling this function. * The caller needs to hold a reference on @dev before calling this function.
* The client is freed when the &drm_device is unregistered. See drm_client_release(). * The client is freed when the &drm_device is unregistered. See drm_client_release().
* *
* Returns: * Returns:
* Zero on success or negative error code on failure. * Zero on success or negative error code on failure.
*/ */
int drm_client_new(struct drm_device *dev, struct drm_client_dev *client, int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
const char *name, const struct drm_client_funcs *funcs) const char *name, const struct drm_client_funcs *funcs)
{ {
int ret; int ret;
@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
if (ret) if (ret)
goto err_put_module; goto err_put_module;
mutex_lock(&dev->clientlist_mutex);
list_add(&client->list, &dev->clientlist);
mutex_unlock(&dev->clientlist_mutex);
drm_dev_get(dev); drm_dev_get(dev);
return 0; return 0;
@ -109,13 +106,33 @@ err_put_module:
return ret; return ret;
} }
EXPORT_SYMBOL(drm_client_new); EXPORT_SYMBOL(drm_client_init);
/**
* drm_client_add - Add client to the device list
* @client: DRM client
*
* Add the client to the &drm_device client list to activate its callbacks.
* @client must be initialized by a call to drm_client_init(). After
* drm_client_add() it is no longer permissible to call drm_client_release()
* directly (outside the unregister callback), instead cleanup will happen
* automatically on driver unload.
*/
void drm_client_add(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
mutex_lock(&dev->clientlist_mutex);
list_add(&client->list, &dev->clientlist);
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_add);
/** /**
* drm_client_release - Release DRM client resources * drm_client_release - Release DRM client resources
* @client: DRM client * @client: DRM client
* *
* Releases resources by closing the &drm_file that was opened by drm_client_new(). * Releases resources by closing the &drm_file that was opened by drm_client_init().
* It is called automatically if the &drm_client_funcs.unregister callback is _not_ set. * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
* *
* This function should only be called from the unregister callback. An exception * This function should only be called from the unregister callback. An exception

View File

@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
fb_helper = &fbdev_cma->fb_helper; fb_helper = &fbdev_cma->fb_helper;
ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL); ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
if (ret) if (ret)
goto err_free; goto err_free;
@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
if (ret) if (ret)
goto err_client_put; goto err_client_put;
drm_client_add(&fb_helper->client);
return fbdev_cma; return fbdev_cma;
err_client_put: err_client_put:

View File

@ -3218,12 +3218,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
if (!fb_helper) if (!fb_helper)
return -ENOMEM; return -ENOMEM;
ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) { if (ret) {
kfree(fb_helper); kfree(fb_helper);
return ret; return ret;
} }
drm_client_add(&fb_helper->client);
fb_helper->preferred_bpp = preferred_bpp; fb_helper->preferred_bpp = preferred_bpp;
drm_fbdev_client_hotplug(&fb_helper->client); drm_fbdev_client_hotplug(&fb_helper->client);

View File

@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
lessee_priv->is_master = 1; lessee_priv->is_master = 1;
lessee_priv->authenticated = 1; lessee_priv->authenticated = 1;
/* Hook up the fd */
fd_install(fd, lessee_file);
/* Pass fd back to userspace */ /* Pass fd back to userspace */
DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id); DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
cl->fd = fd; cl->fd = fd;
cl->lessee_id = lessee->lessee_id; cl->lessee_id = lessee->lessee_id;
/* Hook up the fd */
fd_install(fd, lessee_file);
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n"); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
return 0; return 0;

View File

@ -24,7 +24,6 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/module.h> #include <linux/module.h>
#include <drm/drm_device.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_panel.h> #include <drm/drm_panel.h>
@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector) if (panel->connector)
return -EBUSY; return -EBUSY;
panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
if (!panel->link) {
dev_err(panel->dev, "failed to link panel to %s\n",
dev_name(connector->dev->dev));
return -EINVAL;
}
panel->connector = connector; panel->connector = connector;
panel->drm = connector->dev; panel->drm = connector->dev;
@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
*/ */
int drm_panel_detach(struct drm_panel *panel) int drm_panel_detach(struct drm_panel *panel)
{ {
device_link_del(panel->link);
panel->connector = NULL; panel->connector = NULL;
panel->drm = NULL; panel->drm = NULL;

View File

@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
{ {
int ret; int ret;
WARN_ON(*fence);
*fence = drm_syncobj_fence_get(syncobj); *fence = drm_syncobj_fence_get(syncobj);
if (*fence) if (*fence)
return 1; return 1;
@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
if (entries[i].fence)
continue;
drm_syncobj_fence_get_or_add_callback(syncobjs[i], drm_syncobj_fence_get_or_add_callback(syncobjs[i],
&entries[i].fence, &entries[i].fence,
&entries[i].syncobj_cb, &entries[i].syncobj_cb,

View File

@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct component_match *match = NULL; struct component_match *match = NULL;
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!dev->platform_data) { if (!dev->platform_data) {
struct device_node *core_node; struct device_node *core_node;
@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
for_each_compatible_node(np, NULL, "vivante,gc") { for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np)) if (!of_device_is_available(np))
continue; continue;
pdev = platform_device_register_simple("etnaviv", -1,
NULL, 0); pdev = platform_device_alloc("etnaviv", -1);
if (IS_ERR(pdev)) { if (!pdev) {
ret = PTR_ERR(pdev); ret = -ENOMEM;
of_node_put(np); of_node_put(np);
goto unregister_platform_driver; goto unregister_platform_driver;
} }
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
/*
* Apply the same DMA configuration to the virtual etnaviv
* device as the GPU we found. This assumes that all Vivante
* GPUs in the system share the same DMA constraints.
*/
of_dma_configure(&pdev->dev, np, true);
ret = platform_device_add(pdev);
if (ret) {
platform_device_put(pdev);
of_node_put(np);
goto unregister_platform_driver;
}
etnaviv_drm = pdev; etnaviv_drm = pdev;
of_node_put(np); of_node_put(np);
break; break;

View File

@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv, static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
unsigned long start, unsigned long size) unsigned long start, unsigned long size)
{ {
struct iommu_domain *domain; priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
int ret;
domain = iommu_domain_alloc(priv->dma_dev->bus);
if (!domain)
return -ENOMEM;
ret = iommu_get_dma_cookie(domain);
if (ret)
goto free_domain;
ret = iommu_dma_init_domain(domain, start, size, NULL);
if (ret)
goto put_cookie;
priv->mapping = domain;
return 0; return 0;
put_cookie:
iommu_put_dma_cookie(domain);
free_domain:
iommu_domain_free(domain);
return ret;
} }
static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv) static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
{ {
struct iommu_domain *domain = priv->mapping;
iommu_put_dma_cookie(domain);
iommu_domain_free(domain);
priv->mapping = NULL; priv->mapping = NULL;
} }
@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
{ {
struct iommu_domain *domain = priv->mapping; struct iommu_domain *domain = priv->mapping;
return iommu_attach_device(domain, dev); if (dev != priv->dma_dev)
return iommu_attach_device(domain, dev);
return 0;
} }
static inline void __exynos_iommu_detach(struct exynos_drm_private *priv, static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
{ {
struct iommu_domain *domain = priv->mapping; struct iommu_domain *domain = priv->mapping;
iommu_detach_device(domain, dev); if (dev != priv->dma_dev)
iommu_detach_device(domain, dev);
} }
#else #else
#error Unsupported architecture and IOMMU/DMA-mapping glue code #error Unsupported architecture and IOMMU/DMA-mapping glue code

View File

@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
break; break;
} }
/* TDA9950 executes all retries for us */ /* TDA9950 executes all retries for us */
tx_status |= CEC_TX_STATUS_MAX_RETRIES; if (tx_status != CEC_TX_STATUS_OK)
tx_status |= CEC_TX_STATUS_MAX_RETRIES;
cec_transmit_done(priv->adap, tx_status, arb_lost_cnt, cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
nack_cnt, 0, err_cnt); nack_cnt, 0, err_cnt);
break; break;
@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
/* Wait up to .5s for it to signal non-busy */ /* Wait up to .5s for it to signal non-busy */
do { do {
csr = tda9950_read(client, REG_CSR); csr = tda9950_read(client, REG_CSR);
if (!(csr & CSR_BUSY) || --timeout) if (!(csr & CSR_BUSY) || !--timeout)
break; break;
msleep(10); msleep(10);
} while (1); } while (1);

View File

@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
return true; return true;
} }
static void *compress_next_page(struct drm_i915_error_object *dst)
{
unsigned long page;
if (dst->page_count >= dst->num_pages)
return ERR_PTR(-ENOSPC);
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return ERR_PTR(-ENOMEM);
return dst->pages[dst->page_count++] = (void *)page;
}
static int compress_page(struct compress *c, static int compress_page(struct compress *c,
void *src, void *src,
struct drm_i915_error_object *dst) struct drm_i915_error_object *dst)
@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
do { do {
if (zstream->avail_out == 0) { if (zstream->avail_out == 0) {
unsigned long page; zstream->next_out = compress_next_page(dst);
if (IS_ERR(zstream->next_out))
return PTR_ERR(zstream->next_out);
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return -ENOMEM;
dst->pages[dst->page_count++] = (void *)page;
zstream->next_out = (void *)page;
zstream->avail_out = PAGE_SIZE; zstream->avail_out = PAGE_SIZE;
} }
if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK) if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO; return -EIO;
} while (zstream->avail_in); } while (zstream->avail_in);
@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
return 0; return 0;
} }
static int compress_flush(struct compress *c,
struct drm_i915_error_object *dst)
{
struct z_stream_s *zstream = &c->zstream;
do {
switch (zlib_deflate(zstream, Z_FINISH)) {
case Z_OK: /* more space requested */
zstream->next_out = compress_next_page(dst);
if (IS_ERR(zstream->next_out))
return PTR_ERR(zstream->next_out);
zstream->avail_out = PAGE_SIZE;
break;
case Z_STREAM_END:
goto end;
default: /* any error */
return -EIO;
}
} while (1);
end:
memset(zstream->next_out, 0, zstream->avail_out);
dst->unused = zstream->avail_out;
return 0;
}
static void compress_fini(struct compress *c, static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst) struct drm_i915_error_object *dst)
{ {
struct z_stream_s *zstream = &c->zstream; struct z_stream_s *zstream = &c->zstream;
if (dst) {
zlib_deflate(zstream, Z_FINISH);
dst->unused = zstream->avail_out;
}
zlib_deflateEnd(zstream); zlib_deflateEnd(zstream);
kfree(zstream->workspace); kfree(zstream->workspace);
if (c->tmp) if (c->tmp)
free_page((unsigned long)c->tmp); free_page((unsigned long)c->tmp);
} }
@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
return 0; return 0;
} }
static int compress_flush(struct compress *c,
struct drm_i915_error_object *dst)
{
return 0;
}
static void compress_fini(struct compress *c, static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst) struct drm_i915_error_object *dst)
{ {
@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
unsigned long num_pages; unsigned long num_pages;
struct sgt_iter iter; struct sgt_iter iter;
dma_addr_t dma; dma_addr_t dma;
int ret;
if (!vma) if (!vma)
return NULL; return NULL;
@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->gtt_offset = vma->node.start; dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size; dst->gtt_size = vma->node.size;
dst->num_pages = num_pages;
dst->page_count = 0; dst->page_count = 0;
dst->unused = 0; dst->unused = 0;
@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
return NULL; return NULL;
} }
ret = -EINVAL;
for_each_sgt_dma(dma, iter, vma->pages) { for_each_sgt_dma(dma, iter, vma->pages) {
void __iomem *s; void __iomem *s;
int ret;
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst); ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
if (ret) if (ret)
goto unwind; break;
} }
goto out;
unwind: if (ret || compress_flush(&compress, dst)) {
while (dst->page_count--) while (dst->page_count--)
free_page((unsigned long)dst->pages[dst->page_count]); free_page((unsigned long)dst->pages[dst->page_count]);
kfree(dst); kfree(dst);
dst = NULL; dst = NULL;
}
out:
compress_fini(&compress, dst); compress_fini(&compress, dst);
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst; return dst;

View File

@ -135,6 +135,7 @@ struct i915_gpu_state {
struct drm_i915_error_object { struct drm_i915_error_object {
u64 gtt_offset; u64 gtt_offset;
u64 gtt_size; u64 gtt_size;
int num_pages;
int page_count; int page_count;
int unused; int unused;
u32 *pages[0]; u32 *pages[0];

View File

@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
spin_unlock(&i915->irq_lock); spin_unlock(&i915->irq_lock);
} }
static void static u32
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl, gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
u32 *iir)
{ {
void __iomem * const regs = dev_priv->regs; void __iomem * const regs = dev_priv->regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ)) if (!(master_ctl & GEN11_GU_MISC_IRQ))
return; return 0;
*iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
if (likely(*iir)) if (likely(iir))
raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir); raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
return iir;
} }
static void static void
gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
const u32 master_ctl, const u32 iir)
{ {
if (!(master_ctl & GEN11_GU_MISC_IRQ))
return;
if (unlikely(!iir)) {
DRM_ERROR("GU_MISC iir blank!\n");
return;
}
if (iir & GEN11_GU_MISC_GSE) if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(dev_priv); intel_opregion_asle_intr(dev_priv);
else
DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
} }
static irqreturn_t gen11_irq_handler(int irq, void *arg) static irqreturn_t gen11_irq_handler(int irq, void *arg)
@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(i915); enable_rpm_wakeref_asserts(i915);
} }
gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir); gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
/* Acknowledge and enable interrupts. */ /* Acknowledge and enable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir); gen11_gu_misc_irq_handler(i915, gu_misc_iir);
return IRQ_HANDLED; return IRQ_HANDLED;
} }

Some files were not shown because too many files have changed in this diff Show More