Merge branch 'perf/urgent' into perf/core
Conflicts: kernel/events/hw_breakpoint.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
2d074918fb
|
@ -1,110 +1,139 @@
|
|||
What: /sys/class/ata_...
|
||||
Date: August 2008
|
||||
Contact: Gwendal Grignou<gwendal@google.com>
|
||||
Description:
|
||||
|
||||
Provide a place in sysfs for storing the ATA topology of the system. This allows
|
||||
retrieving various information about ATA objects.
|
||||
Provide a place in sysfs for storing the ATA topology of the
|
||||
system. This allows retrieving various information about ATA
|
||||
objects.
|
||||
|
||||
Files under /sys/class/ata_port
|
||||
-------------------------------
|
||||
|
||||
For each port, a directory ataX is created where X is the ata_port_id of
|
||||
the port. The device parent is the ata host device.
|
||||
For each port, a directory ataX is created where X is the ata_port_id of the
|
||||
port. The device parent is the ata host device.
|
||||
|
||||
idle_irq (read)
|
||||
|
||||
Number of IRQ received by the port while idle [some ata HBA only].
|
||||
What: /sys/class/ata_port/ataX/nr_pmp_links
|
||||
What: /sys/class/ata_port/ataX/idle_irq
|
||||
Date: May, 2010
|
||||
KernelVersion: v2.6.37
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
nr_pmp_links: (RO) If a SATA Port Multiplier (PM) is
|
||||
connected, the number of links behind it.
|
||||
|
||||
nr_pmp_links (read)
|
||||
idle_irq: (RO) Number of IRQ received by the port while
|
||||
idle [some ata HBA only].
|
||||
|
||||
If a SATA Port Multiplier (PM) is connected, number of link behind it.
|
||||
|
||||
What: /sys/class/ata_port/ataX/port_no
|
||||
Date: May, 2013
|
||||
KernelVersion: v3.11
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
(RO) Host local port number. While registering host controller,
|
||||
port numbers are tracked based upon number of ports available on
|
||||
the controller. This attribute is needed by udev for composing
|
||||
persistent links in /dev/disk/by-path.
|
||||
|
||||
Files under /sys/class/ata_link
|
||||
-------------------------------
|
||||
|
||||
Behind each port, there is a ata_link. If there is a SATA PM in the
|
||||
topology, 15 ata_link objects are created.
|
||||
Behind each port, there is a ata_link. If there is a SATA PM in the topology, 15
|
||||
ata_link objects are created.
|
||||
|
||||
If a link is behind a port, the directory name is linkX, where X is
|
||||
ata_port_id of the port.
|
||||
If a link is behind a PM, its name is linkX.Y where X is ata_port_id
|
||||
of the parent port and Y the PM port.
|
||||
If a link is behind a port, the directory name is linkX, where X is ata_port_id
|
||||
of the port. If a link is behind a PM, its name is linkX.Y where X is
|
||||
ata_port_id of the parent port and Y the PM port.
|
||||
|
||||
hw_sata_spd_limit
|
||||
|
||||
Maximum speed supported by the connected SATA device.
|
||||
What: /sys/class/ata_link/linkX[.Y]/hw_sata_spd_limit
|
||||
What: /sys/class/ata_link/linkX[.Y]/sata_spd_limit
|
||||
What: /sys/class/ata_link/linkX[.Y]/sata_spd
|
||||
Date: May, 2010
|
||||
KernelVersion: v2.6.37
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
hw_sata_spd_limit: (RO) Maximum speed supported by the
|
||||
connected SATA device.
|
||||
|
||||
sata_spd_limit
|
||||
sata_spd_limit: (RO) Maximum speed imposed by libata.
|
||||
|
||||
Maximum speed imposed by libata.
|
||||
sata_spd: (RO) Current speed of the link
|
||||
eg. 1.5, 3 Gbps etc.
|
||||
|
||||
sata_spd
|
||||
|
||||
Current speed of the link [1.5, 3Gps,...].
|
||||
|
||||
Files under /sys/class/ata_device
|
||||
---------------------------------
|
||||
|
||||
Behind each link, up to two ata device are created.
|
||||
The name of the directory is devX[.Y].Z where:
|
||||
- X is ata_port_id of the port where the device is connected,
|
||||
- Y the port of the PM if any, and
|
||||
- Z the device id: for PATA, there is usually 2 devices [0,1],
|
||||
only 1 for SATA.
|
||||
Behind each link, up to two ata devices are created.
|
||||
The name of the directory is devX[.Y].Z where:
|
||||
- X is ata_port_id of the port where the device is connected,
|
||||
- Y the port of the PM if any, and
|
||||
- Z the device id: for PATA, there is usually 2 devices [0,1], only 1 for SATA.
|
||||
|
||||
class
|
||||
Device class. Can be "ata" for disk, "atapi" for packet device,
|
||||
"pmp" for PM, or "none" if no device was found behind the link.
|
||||
|
||||
dma_mode
|
||||
What: /sys/class/ata_device/devX[.Y].Z/spdn_cnt
|
||||
What: /sys/class/ata_device/devX[.Y].Z/gscr
|
||||
What: /sys/class/ata_device/devX[.Y].Z/ering
|
||||
What: /sys/class/ata_device/devX[.Y].Z/id
|
||||
What: /sys/class/ata_device/devX[.Y].Z/pio_mode
|
||||
What: /sys/class/ata_device/devX[.Y].Z/xfer_mode
|
||||
What: /sys/class/ata_device/devX[.Y].Z/dma_mode
|
||||
What: /sys/class/ata_device/devX[.Y].Z/class
|
||||
Date: May, 2010
|
||||
KernelVersion: v2.6.37
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
spdn_cnt: (RO) Number of times libata decided to lower the
|
||||
speed of link due to errors.
|
||||
|
||||
Transfer modes supported by the device when in DMA mode.
|
||||
Mostly used by PATA device.
|
||||
gscr: (RO) Cached result of the dump of PM GSCR
|
||||
register. Valid registers are:
|
||||
|
||||
pio_mode
|
||||
0: SATA_PMP_GSCR_PROD_ID,
|
||||
1: SATA_PMP_GSCR_REV,
|
||||
2: SATA_PMP_GSCR_PORT_INFO,
|
||||
32: SATA_PMP_GSCR_ERROR,
|
||||
33: SATA_PMP_GSCR_ERROR_EN,
|
||||
64: SATA_PMP_GSCR_FEAT,
|
||||
96: SATA_PMP_GSCR_FEAT_EN,
|
||||
130: SATA_PMP_GSCR_SII_GPIO
|
||||
|
||||
Transfer modes supported by the device when in PIO mode.
|
||||
Mostly used by PATA device.
|
||||
Only valid if the device is a PM.
|
||||
|
||||
xfer_mode
|
||||
ering: (RO) Formatted output of the error ring of the
|
||||
device.
|
||||
|
||||
Current transfer mode.
|
||||
id: (RO) Cached result of IDENTIFY command, as
|
||||
described in ATA8 7.16 and 7.17. Only valid if
|
||||
the device is not a PM.
|
||||
|
||||
id
|
||||
pio_mode: (RO) Transfer modes supported by the device when
|
||||
in PIO mode. Mostly used by PATA device.
|
||||
|
||||
Cached result of IDENTIFY command, as described in ATA8 7.16 and 7.17.
|
||||
Only valid if the device is not a PM.
|
||||
xfer_mode: (RO) Current transfer mode
|
||||
|
||||
gscr
|
||||
dma_mode: (RO) Transfer modes supported by the device when
|
||||
in DMA mode. Mostly used by PATA device.
|
||||
|
||||
Cached result of the dump of PM GSCR register.
|
||||
Valid registers are:
|
||||
0: SATA_PMP_GSCR_PROD_ID,
|
||||
1: SATA_PMP_GSCR_REV,
|
||||
2: SATA_PMP_GSCR_PORT_INFO,
|
||||
32: SATA_PMP_GSCR_ERROR,
|
||||
33: SATA_PMP_GSCR_ERROR_EN,
|
||||
64: SATA_PMP_GSCR_FEAT,
|
||||
96: SATA_PMP_GSCR_FEAT_EN,
|
||||
130: SATA_PMP_GSCR_SII_GPIO
|
||||
Only valid if the device is a PM.
|
||||
class: (RO) Device class. Can be "ata" for disk,
|
||||
"atapi" for packet device, "pmp" for PM, or
|
||||
"none" if no device was found behind the link.
|
||||
|
||||
trim
|
||||
|
||||
Shows the DSM TRIM mode currently used by the device. Valid
|
||||
values are:
|
||||
unsupported: Drive does not support DSM TRIM
|
||||
unqueued: Drive supports unqueued DSM TRIM only
|
||||
queued: Drive supports queued DSM TRIM
|
||||
forced_unqueued: Drive's queued DSM support is known to be
|
||||
buggy and only unqueued TRIM commands
|
||||
are sent
|
||||
What: /sys/class/ata_device/devX[.Y].Z/trim
|
||||
Date: May, 2015
|
||||
KernelVersion: v4.10
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
(RO) Shows the DSM TRIM mode currently used by the device. Valid
|
||||
values are:
|
||||
|
||||
spdn_cnt
|
||||
unsupported: Drive does not support DSM TRIM
|
||||
|
||||
Number of time libata decided to lower the speed of link due to errors.
|
||||
unqueued: Drive supports unqueued DSM TRIM only
|
||||
|
||||
ering
|
||||
queued: Drive supports queued DSM TRIM
|
||||
|
||||
Formatted output of the error ring of the device.
|
||||
forced_unqueued: Drive's queued DSM support is known to
|
||||
be buggy and only unqueued TRIM commands
|
||||
are sent
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
What: /sys/block/*/device/sw_activity
|
||||
Date: Jun, 2008
|
||||
KernelVersion: v2.6.27
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) Used by drivers which support software controlled activity
|
||||
LEDs.
|
||||
|
||||
It has the following valid values:
|
||||
|
||||
0 OFF - the LED is not activated on activity
|
||||
1 BLINK_ON - the LED blinks on every 10ms when activity is
|
||||
detected.
|
||||
2 BLINK_OFF - the LED is on when idle, and blinks off
|
||||
every 10ms when activity is detected.
|
||||
|
||||
Note that the user must turn sw_activity OFF it they wish to
|
||||
control the activity LED via the em_message file.
|
||||
|
||||
|
||||
What: /sys/block/*/device/unload_heads
|
||||
Date: Sep, 2008
|
||||
KernelVersion: v2.6.28
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) Hard disk shock protection
|
||||
|
||||
Writing an integer value to this file will take the heads of the
|
||||
respective drive off the platter and block all I/O operations
|
||||
for the specified number of milliseconds.
|
||||
|
||||
- If the device does not support the unload heads feature,
|
||||
access is denied with -EOPNOTSUPP.
|
||||
- The maximal value accepted for a timeout is 30000
|
||||
milliseconds.
|
||||
- A previously set timeout can be cancelled and disk can resume
|
||||
normal operation immediately by specifying a timeout of 0.
|
||||
- Some hard drives only comply with an earlier version of the
|
||||
ATA standard, but support the unload feature nonetheless.
|
||||
There is no safe way Linux can detect these devices, so this
|
||||
is not enabled by default. If it is known that your device
|
||||
does support the unload feature, then you can tell the kernel
|
||||
to enable it by writing -1. It can be disabled again by
|
||||
writing -2.
|
||||
- Values below -2 are rejected with -EINVAL
|
||||
|
||||
For more information, see
|
||||
Documentation/laptops/disk-shock-protection.txt
|
||||
|
||||
|
||||
What: /sys/block/*/device/ncq_prio_enable
|
||||
Date: Oct, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) Write to the file to turn on or off the SATA ncq (native
|
||||
command queueing) support. By default this feature is turned
|
||||
off.
|
|
@ -27,3 +27,92 @@ Description: This file contains the current status of the "SSD Smart Path"
|
|||
the direct i/o path to physical devices. This setting is
|
||||
controller wide, affecting all configured logical drives on the
|
||||
controller. This file is readable and writable.
|
||||
|
||||
What: /sys/class/scsi_host/hostX/link_power_management_policy
|
||||
Date: Oct, 2007
|
||||
KernelVersion: v2.6.24
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) This parameter allows the user to read and set the link
|
||||
(interface) power management.
|
||||
|
||||
There are four possible options:
|
||||
|
||||
min_power: Tell the controller to try to make the link use the
|
||||
least possible power when possible. This may sacrifice some
|
||||
performance due to increased latency when coming out of lower
|
||||
power states.
|
||||
|
||||
max_performance: Generally, this means no power management.
|
||||
Tell the controller to have performance be a priority over power
|
||||
management.
|
||||
|
||||
medium_power: Tell the controller to enter a lower power state
|
||||
when possible, but do not enter the lowest power state, thus
|
||||
improving latency over min_power setting.
|
||||
|
||||
med_power_with_dipm: Identical to the existing medium_power
|
||||
setting except that it enables dipm (device initiated power
|
||||
management) on top, which makes it match the Windows IRST (Intel
|
||||
Rapid Storage Technology) driver settings. This setting is also
|
||||
close to min_power, except that:
|
||||
a) It does not use host-initiated slumber mode, but it does
|
||||
allow device-initiated slumber
|
||||
b) It does not enable low power device sleep mode (DevSlp).
|
||||
|
||||
What: /sys/class/scsi_host/hostX/em_message
|
||||
What: /sys/class/scsi_host/hostX/em_message_type
|
||||
Date: Jun, 2008
|
||||
KernelVersion: v2.6.27
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
em_message: (RW) Enclosure management support. For the LED
|
||||
protocol, writes and reads correspond to the LED message format
|
||||
as defined in the AHCI spec.
|
||||
|
||||
The user must turn sw_activity (under /sys/block/*/device/) OFF
|
||||
it they wish to control the activity LED via the em_message
|
||||
file.
|
||||
|
||||
em_message_type: (RO) Displays the current enclosure management
|
||||
protocol that is being used by the driver (for eg. LED, SAF-TE,
|
||||
SES-2, SGPIO etc).
|
||||
|
||||
What: /sys/class/scsi_host/hostX/ahci_port_cmd
|
||||
What: /sys/class/scsi_host/hostX/ahci_host_caps
|
||||
What: /sys/class/scsi_host/hostX/ahci_host_cap2
|
||||
Date: Mar, 2010
|
||||
KernelVersion: v2.6.35
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
[to be documented]
|
||||
|
||||
What: /sys/class/scsi_host/hostX/ahci_host_version
|
||||
Date: Mar, 2010
|
||||
KernelVersion: v2.6.35
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RO) Display the version of the AHCI spec implemented by the
|
||||
host.
|
||||
|
||||
What: /sys/class/scsi_host/hostX/em_buffer
|
||||
Date: Apr, 2010
|
||||
KernelVersion: v2.6.35
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) Allows access to AHCI EM (enclosure management) buffer
|
||||
directly if the host supports EM.
|
||||
|
||||
For eg. the AHCI driver supports SGPIO EM messages but the
|
||||
SATA/AHCI specs do not define the SGPIO message format of the EM
|
||||
buffer. Different hardware(HW) vendors may have different
|
||||
definitions. With the em_buffer attribute, this issue can be
|
||||
solved by allowing HW vendors to provide userland drivers and
|
||||
tools for their SGPIO initiators.
|
||||
|
||||
What: /sys/class/scsi_host/hostX/em_message_supported
|
||||
Date: Oct, 2009
|
||||
KernelVersion: v2.6.39
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RO) Displays supported enclosure management message types.
|
||||
|
|
|
@ -16,6 +16,7 @@ Required properties:
|
|||
- ddc: phandle to the hdmi ddc node
|
||||
- phy: phandle to the hdmi phy node
|
||||
- samsung,syscon-phandle: phandle for system controller node for PMU.
|
||||
- #sound-dai-cells: should be 0.
|
||||
|
||||
Required properties for Exynos 4210, 4212, 5420 and 5433:
|
||||
- clocks: list of clock IDs from SoC clock driver.
|
||||
|
|
|
@ -3,11 +3,11 @@ Device-Tree bindings for sigma delta modulator
|
|||
Required properties:
|
||||
- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use
|
||||
as a generic SD modulator if modulator not specified in compatible list.
|
||||
- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
|
||||
- #io-channel-cells = <0>: See the IIO bindings section "IIO consumers".
|
||||
|
||||
Example node:
|
||||
|
||||
ads1202: adc@0 {
|
||||
compatible = "sd-modulator";
|
||||
#io-channel-cells = <1>;
|
||||
#io-channel-cells = <0>;
|
||||
};
|
||||
|
|
|
@ -50,14 +50,15 @@ Example:
|
|||
compatible = "marvell,mv88e6085";
|
||||
reg = <0>;
|
||||
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -74,23 +75,24 @@ Example:
|
|||
compatible = "marvell,mv88e6390";
|
||||
reg = <0>;
|
||||
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
mdio1 {
|
||||
compatible = "marvell,mv88e6xxx-mdio-external";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy9: switch1phy0@9 {
|
||||
reg = <9>;
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
mdio1 {
|
||||
compatible = "marvell,mv88e6xxx-mdio-external";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy9: switch1phy0@9 {
|
||||
reg = <9>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -27,7 +27,11 @@ Required properties:
|
|||
SoC-specific version corresponding to the platform first followed by
|
||||
the generic version.
|
||||
|
||||
- reg: offset and length of (1) the register block and (2) the stream buffer.
|
||||
- reg: Offset and length of (1) the register block and (2) the stream buffer.
|
||||
The region for the register block is mandatory.
|
||||
The region for the stream buffer is optional, as it is only present on
|
||||
R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A7796),
|
||||
and M3-N (R8A77965).
|
||||
- interrupts: A list of interrupt-specifiers, one for each entry in
|
||||
interrupt-names.
|
||||
If interrupt-names is not present, an interrupt specifier
|
||||
|
|
|
@ -20,8 +20,8 @@ TCP Segmentation Offload
|
|||
|
||||
TCP segmentation allows a device to segment a single frame into multiple
|
||||
frames with a data payload size specified in skb_shinfo()->gso_size.
|
||||
When TCP segmentation requested the bit for either SKB_GSO_TCP or
|
||||
SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and
|
||||
When TCP segmentation requested the bit for either SKB_GSO_TCPV4 or
|
||||
SKB_GSO_TCPV6 should be set in skb_shinfo()->gso_type and
|
||||
skb_shinfo()->gso_size should be set to a non-zero value.
|
||||
|
||||
TCP segmentation is dependent on support for the use of partial checksum
|
||||
|
@ -153,8 +153,18 @@ To signal this, gso_size is set to the special value GSO_BY_FRAGS.
|
|||
|
||||
Therefore, any code in the core networking stack must be aware of the
|
||||
possibility that gso_size will be GSO_BY_FRAGS and handle that case
|
||||
appropriately. (For size checks, the skb_gso_validate_*_len family of
|
||||
helpers do this automatically.)
|
||||
appropriately.
|
||||
|
||||
There are some helpers to make this easier:
|
||||
|
||||
- skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if
|
||||
an skb is an SCTP GSO skb.
|
||||
|
||||
- For size checks, the skb_gso_validate_*_len family of helpers correctly
|
||||
considers GSO_BY_FRAGS.
|
||||
|
||||
- For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
|
||||
will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
|
||||
|
||||
This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
|
||||
set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
|
||||
|
|
|
@ -10334,7 +10334,7 @@ F: drivers/oprofile/
|
|||
F: include/linux/oprofile.h
|
||||
|
||||
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
|
||||
M: Mark Fasheh <mfasheh@versity.com>
|
||||
M: Mark Fasheh <mark@fasheh.com>
|
||||
M: Joel Becker <jlbec@evilplan.org>
|
||||
L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
|
||||
W: http://ocfs2.wiki.kernel.org
|
||||
|
@ -10844,6 +10844,7 @@ F: drivers/platform/x86/peaq-wmi.c
|
|||
PER-CPU MEMORY ALLOCATOR
|
||||
M: Tejun Heo <tj@kernel.org>
|
||||
M: Christoph Lameter <cl@linux.com>
|
||||
M: Dennis Zhou <dennisszhou@gmail.com>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
|
||||
S: Maintained
|
||||
F: include/linux/percpu*.h
|
||||
|
|
11
Makefile
11
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -826,6 +826,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
|
|||
# disable invalid "can't wrap" optimizations for signed / pointers
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
|
||||
|
||||
# clang sets -fmerge-all-constants by default as optimization, but this
|
||||
# is non-conforming behavior for C and in fact breaks the kernel, so we
|
||||
# need to disable it here generally.
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants)
|
||||
|
||||
# for gcc -fno-merge-all-constants disables everything, but it is fine
|
||||
# to have actual conforming behavior enabled.
|
||||
KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
|
||||
|
||||
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
|
||||
|
||||
|
|
|
@ -972,3 +972,13 @@ int pmd_clear_huge(pmd_t *pmdp)
|
|||
pmd_clear(pmdp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pud_free_pmd_page(pud_t *pud)
|
||||
{
|
||||
return pud_none(*pud);
|
||||
}
|
||||
|
||||
int pmd_free_pte_page(pmd_t *pmd)
|
||||
{
|
||||
return pmd_none(*pmd);
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
#ifndef __H8300_BYTEORDER_H__
|
||||
#define __H8300_BYTEORDER_H__
|
||||
|
||||
#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
|
||||
#include <linux/byteorder/big_endian.h>
|
||||
|
||||
#endif
|
||||
|
|
|
@ -13,6 +13,8 @@ choice
|
|||
config SOC_AMAZON_SE
|
||||
bool "Amazon SE"
|
||||
select SOC_TYPE_XWAY
|
||||
select MFD_SYSCON
|
||||
select MFD_CORE
|
||||
|
||||
config SOC_XWAY
|
||||
bool "XWAY"
|
||||
|
|
|
@ -549,9 +549,9 @@ void __init ltq_soc_init(void)
|
|||
clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
|
||||
ltq_ar9_fpi_hz(), CLOCK_250M);
|
||||
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
|
||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
|
||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
|
||||
clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
|
||||
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
|
||||
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
|
||||
clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
|
||||
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
|
||||
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
|
||||
|
@ -560,7 +560,7 @@ void __init ltq_soc_init(void)
|
|||
} else {
|
||||
clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
|
||||
ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
|
||||
clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0);
|
||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
|
||||
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
|
||||
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
|
||||
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
|
||||
|
|
|
@ -170,6 +170,28 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
|
|||
u32 n1;
|
||||
u32 rev;
|
||||
|
||||
/* Early detection of CMP support */
|
||||
mips_cm_probe();
|
||||
mips_cpc_probe();
|
||||
|
||||
if (mips_cps_numiocu(0)) {
|
||||
/*
|
||||
* mips_cm_probe() wipes out bootloader
|
||||
* config for CM regions and we have to configure them
|
||||
* again. This SoC cannot talk to pamlbus devices
|
||||
* witout proper iocu region set up.
|
||||
*
|
||||
* FIXME: it would be better to do this with values
|
||||
* from DT, but we need this very early because
|
||||
* without this we cannot talk to pretty much anything
|
||||
* including serial.
|
||||
*/
|
||||
write_gcr_reg0_base(MT7621_PALMBUS_BASE);
|
||||
write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
|
||||
CM_GCR_REGn_MASK_CMTGT_IOCU0);
|
||||
__sync();
|
||||
}
|
||||
|
||||
n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
|
||||
n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
|
||||
|
||||
|
@ -194,26 +216,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
|
|||
|
||||
rt2880_pinmux_data = mt7621_pinmux_data;
|
||||
|
||||
/* Early detection of CMP support */
|
||||
mips_cm_probe();
|
||||
mips_cpc_probe();
|
||||
|
||||
if (mips_cps_numiocu(0)) {
|
||||
/*
|
||||
* mips_cm_probe() wipes out bootloader
|
||||
* config for CM regions and we have to configure them
|
||||
* again. This SoC cannot talk to pamlbus devices
|
||||
* witout proper iocu region set up.
|
||||
*
|
||||
* FIXME: it would be better to do this with values
|
||||
* from DT, but we need this very early because
|
||||
* without this we cannot talk to pretty much anything
|
||||
* including serial.
|
||||
*/
|
||||
write_gcr_reg0_base(MT7621_PALMBUS_BASE);
|
||||
write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
|
||||
CM_GCR_REGn_MASK_CMTGT_IOCU0);
|
||||
}
|
||||
|
||||
if (!register_cps_smp_ops())
|
||||
return;
|
||||
|
|
|
@ -96,16 +96,9 @@ static void ralink_restart(char *command)
|
|||
unreachable();
|
||||
}
|
||||
|
||||
static void ralink_halt(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
unreachable();
|
||||
}
|
||||
|
||||
static int __init mips_reboot_setup(void)
|
||||
{
|
||||
_machine_restart = ralink_restart;
|
||||
_machine_halt = ralink_halt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -315,19 +315,6 @@ config X86_L1_CACHE_SHIFT
|
|||
default "4" if MELAN || M486 || MGEODEGX1
|
||||
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
|
||||
|
||||
config X86_PPRO_FENCE
|
||||
bool "PentiumPro memory ordering errata workaround"
|
||||
depends on M686 || M586MMX || M586TSC || M586 || M486 || MGEODEGX1
|
||||
---help---
|
||||
Old PentiumPro multiprocessor systems had errata that could cause
|
||||
memory operations to violate the x86 ordering standard in rare cases.
|
||||
Enabling this option will attempt to work around some (but not all)
|
||||
occurrences of this problem, at the cost of much heavier spinlock and
|
||||
memory barrier operations.
|
||||
|
||||
If unsure, say n here. Even distro kernels should think twice before
|
||||
enabling this: there are few systems, and an unlikely bug.
|
||||
|
||||
config X86_F00F_BUG
|
||||
def_bool y
|
||||
depends on M586MMX || M586TSC || M586 || M486
|
||||
|
|
|
@ -223,6 +223,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
|
|||
|
||||
LDFLAGS := -m elf_$(UTS_MACHINE)
|
||||
|
||||
#
|
||||
# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
|
||||
# the linker to force 2MB page size regardless of the default page size used
|
||||
# by the linker.
|
||||
#
|
||||
ifdef CONFIG_X86_64
|
||||
LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
|
||||
endif
|
||||
|
||||
# Speed up the build
|
||||
KBUILD_CFLAGS += -pipe
|
||||
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
|
||||
|
|
|
@ -309,6 +309,10 @@ static void parse_elf(void *output)
|
|||
|
||||
switch (phdr->p_type) {
|
||||
case PT_LOAD:
|
||||
#ifdef CONFIG_X86_64
|
||||
if ((phdr->p_align % 0x200000) != 0)
|
||||
error("Alignment of LOAD segment isn't multiple of 2MB");
|
||||
#endif
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
dest = output;
|
||||
dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
|
||||
|
|
|
@ -1138,7 +1138,7 @@ apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
|
|||
#endif /* CONFIG_HYPERV */
|
||||
|
||||
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
|
||||
idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
|
||||
idtentry int3 do_int3 has_error_code=0
|
||||
idtentry stack_segment do_stack_segment has_error_code=1
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
#undef CONFIG_OPTIMIZE_INLINING
|
||||
#endif
|
||||
|
||||
#undef CONFIG_X86_PPRO_FENCE
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
|
|
|
@ -347,7 +347,7 @@ void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
|
|||
set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
|
||||
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
|
||||
#if CONFIG_PGTABLE_LEVELS >= 5
|
||||
p4d->p4d |= _PAGE_USER;
|
||||
set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
|
||||
#endif
|
||||
pud = pud_offset(p4d, VSYSCALL_ADDR);
|
||||
set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
|
||||
|
|
|
@ -1153,6 +1153,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
|
|||
if (pebs == NULL)
|
||||
return;
|
||||
|
||||
regs->flags &= ~PERF_EFLAGS_EXACT;
|
||||
sample_type = event->attr.sample_type;
|
||||
dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
|
||||
|
||||
|
@ -1197,7 +1198,6 @@ static void setup_pebs_sample_data(struct perf_event *event,
|
|||
*/
|
||||
*regs = *iregs;
|
||||
regs->flags = pebs->flags;
|
||||
set_linear_ip(regs, pebs->ip);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_REGS_INTR) {
|
||||
regs->ax = pebs->ax;
|
||||
|
@ -1233,13 +1233,22 @@ static void setup_pebs_sample_data(struct perf_event *event,
|
|||
#endif
|
||||
}
|
||||
|
||||
if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
|
||||
regs->ip = pebs->real_ip;
|
||||
regs->flags |= PERF_EFLAGS_EXACT;
|
||||
} else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
|
||||
regs->flags |= PERF_EFLAGS_EXACT;
|
||||
else
|
||||
regs->flags &= ~PERF_EFLAGS_EXACT;
|
||||
if (event->attr.precise_ip > 1) {
|
||||
/* Haswell and later have the eventing IP, so use it: */
|
||||
if (x86_pmu.intel_cap.pebs_format >= 2) {
|
||||
set_linear_ip(regs, pebs->real_ip);
|
||||
regs->flags |= PERF_EFLAGS_EXACT;
|
||||
} else {
|
||||
/* Otherwise use PEBS off-by-1 IP: */
|
||||
set_linear_ip(regs, pebs->ip);
|
||||
|
||||
/* ... and try to fix it up using the LBR entries: */
|
||||
if (intel_pmu_pebs_fixup_ip(regs))
|
||||
regs->flags |= PERF_EFLAGS_EXACT;
|
||||
}
|
||||
} else
|
||||
set_linear_ip(regs, pebs->ip);
|
||||
|
||||
|
||||
if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
|
||||
x86_pmu.intel_cap.pebs_format >= 1)
|
||||
|
|
|
@ -52,11 +52,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
|||
#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
|
||||
"lfence", X86_FEATURE_LFENCE_RDTSC)
|
||||
|
||||
#ifdef CONFIG_X86_PPRO_FENCE
|
||||
#define dma_rmb() rmb()
|
||||
#else
|
||||
#define dma_rmb() barrier()
|
||||
#endif
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -68,30 +64,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
|||
#define __smp_wmb() barrier()
|
||||
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
|
||||
#if defined(CONFIG_X86_PPRO_FENCE)
|
||||
|
||||
/*
|
||||
* For this option x86 doesn't have a strong TSO memory
|
||||
* model and we should fall back to full barriers.
|
||||
*/
|
||||
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
__smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
__smp_mb(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#else /* regular x86 TSO memory ordering */
|
||||
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
|
@ -107,8 +79,6 @@ do { \
|
|||
___p1; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
/* Atomic operations are already serializing on x86 */
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
|
|
@ -232,21 +232,6 @@ extern void set_iounmap_nonlazy(void);
|
|||
*/
|
||||
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
|
||||
|
||||
/*
|
||||
* Cache management
|
||||
*
|
||||
* This needed for two cases
|
||||
* 1. Out of order aware processors
|
||||
* 2. Accidentally out of order processors (PPro errata #51)
|
||||
*/
|
||||
|
||||
static inline void flush_write_buffers(void)
|
||||
{
|
||||
#if defined(CONFIG_X86_PPRO_FENCE)
|
||||
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
extern void native_io_delay(void);
|
||||
|
|
|
@ -352,6 +352,7 @@ enum vmcs_field {
|
|||
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
|
||||
#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
|
||||
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
|
||||
#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
|
||||
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
|
||||
|
||||
/* GUEST_INTERRUPTIBILITY_INFO flags. */
|
||||
|
|
|
@ -160,7 +160,6 @@ static const __initconst struct idt_data early_pf_idts[] = {
|
|||
*/
|
||||
static const __initconst struct idt_data dbg_idts[] = {
|
||||
INTG(X86_TRAP_DB, debug),
|
||||
INTG(X86_TRAP_BP, int3),
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -183,7 +182,6 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
|
|||
static const __initconst struct idt_data ist_idts[] = {
|
||||
ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
|
||||
ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
|
||||
SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
|
||||
ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
|
||||
#ifdef CONFIG_X86_MCE
|
||||
ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),
|
||||
|
|
|
@ -37,7 +37,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
|||
WARN_ON(size == 0);
|
||||
if (!check_addr("map_single", dev, bus, size))
|
||||
return NOMMU_MAPPING_ERROR;
|
||||
flush_write_buffers();
|
||||
return bus;
|
||||
}
|
||||
|
||||
|
@ -72,25 +71,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
|||
return 0;
|
||||
s->dma_length = s->length;
|
||||
}
|
||||
flush_write_buffers();
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void nommu_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_write_buffers();
|
||||
}
|
||||
|
||||
|
||||
static void nommu_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_write_buffers();
|
||||
}
|
||||
|
||||
static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == NOMMU_MAPPING_ERROR;
|
||||
|
@ -101,8 +84,6 @@ const struct dma_map_ops nommu_dma_ops = {
|
|||
.free = dma_generic_free_coherent,
|
||||
.map_sg = nommu_map_sg,
|
||||
.map_page = nommu_map_page,
|
||||
.sync_single_for_device = nommu_sync_single_for_device,
|
||||
.sync_sg_for_device = nommu_sync_sg_for_device,
|
||||
.is_phys = 1,
|
||||
.mapping_error = nommu_mapping_error,
|
||||
.dma_supported = x86_dma_supported,
|
||||
|
|
|
@ -577,7 +577,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
|
|||
}
|
||||
NOKPROBE_SYMBOL(do_general_protection);
|
||||
|
||||
/* May run on IST stack. */
|
||||
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
|
||||
{
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
@ -592,6 +591,13 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
|
|||
if (poke_int3_handler(regs))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Use ist_enter despite the fact that we don't use an IST stack.
|
||||
* We can be called from a kprobe in non-CONTEXT_KERNEL kernel
|
||||
* mode or even during context tracking state changes.
|
||||
*
|
||||
* This means that we can't schedule. That's okay.
|
||||
*/
|
||||
ist_enter(regs);
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
|
||||
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
|
||||
|
@ -609,15 +615,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
|
|||
SIGTRAP) == NOTIFY_STOP)
|
||||
goto exit;
|
||||
|
||||
/*
|
||||
* Let others (NMI) know that the debug stack is in use
|
||||
* as we may switch to the interrupt stack.
|
||||
*/
|
||||
debug_stack_usage_inc();
|
||||
cond_local_irq_enable(regs);
|
||||
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
|
||||
cond_local_irq_disable(regs);
|
||||
debug_stack_usage_dec();
|
||||
|
||||
exit:
|
||||
ist_exit(regs);
|
||||
}
|
||||
|
|
|
@ -1045,6 +1045,13 @@ static inline bool is_machine_check(u32 intr_info)
|
|||
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
|
||||
}
|
||||
|
||||
/* Undocumented: icebp/int1 */
|
||||
static inline bool is_icebp(u32 intr_info)
|
||||
{
|
||||
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
|
||||
== (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
|
||||
}
|
||||
|
||||
static inline bool cpu_has_vmx_msr_bitmap(void)
|
||||
{
|
||||
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
|
||||
|
@ -6179,7 +6186,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
|
|||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
|
||||
vcpu->arch.dr6 &= ~15;
|
||||
vcpu->arch.dr6 |= dr6 | DR6_RTM;
|
||||
if (!(dr6 & ~DR6_RESERVED)) /* icebp */
|
||||
if (is_icebp(intr_info))
|
||||
skip_emulated_instruction(vcpu);
|
||||
|
||||
kvm_queue_exception(vcpu, DB_VECTOR);
|
||||
|
|
|
@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
|||
|
||||
#define PAGE_INUSE 0xFD
|
||||
|
||||
static void __meminit free_pagetable(struct page *page, int order,
|
||||
struct vmem_altmap *altmap)
|
||||
static void __meminit free_pagetable(struct page *page, int order)
|
||||
{
|
||||
unsigned long magic;
|
||||
unsigned int nr_pages = 1 << order;
|
||||
|
||||
if (altmap) {
|
||||
vmem_altmap_free(altmap, nr_pages);
|
||||
return;
|
||||
}
|
||||
|
||||
/* bootmem page has reserved flag */
|
||||
if (PageReserved(page)) {
|
||||
__ClearPageReserved(page);
|
||||
|
@ -826,8 +820,16 @@ static void __meminit free_pagetable(struct page *page, int order,
|
|||
free_pages((unsigned long)page_address(page), order);
|
||||
}
|
||||
|
||||
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
|
||||
static void __meminit free_hugepage_table(struct page *page,
|
||||
struct vmem_altmap *altmap)
|
||||
{
|
||||
if (altmap)
|
||||
vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
|
||||
else
|
||||
free_pagetable(page, get_order(PMD_SIZE));
|
||||
}
|
||||
|
||||
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
||||
{
|
||||
pte_t *pte;
|
||||
int i;
|
||||
|
@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
|
|||
}
|
||||
|
||||
/* free a pte talbe */
|
||||
free_pagetable(pmd_page(*pmd), 0, altmap);
|
||||
free_pagetable(pmd_page(*pmd), 0);
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pmd_clear(pmd);
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
}
|
||||
|
||||
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
|
||||
struct vmem_altmap *altmap)
|
||||
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
int i;
|
||||
|
@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
|
|||
}
|
||||
|
||||
/* free a pmd talbe */
|
||||
free_pagetable(pud_page(*pud), 0, altmap);
|
||||
free_pagetable(pud_page(*pud), 0);
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pud_clear(pud);
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
}
|
||||
|
||||
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
|
||||
struct vmem_altmap *altmap)
|
||||
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
|
||||
{
|
||||
pud_t *pud;
|
||||
int i;
|
||||
|
@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
|
|||
}
|
||||
|
||||
/* free a pud talbe */
|
||||
free_pagetable(p4d_page(*p4d), 0, altmap);
|
||||
free_pagetable(p4d_page(*p4d), 0);
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
p4d_clear(p4d);
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
|
@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
|
|||
|
||||
static void __meminit
|
||||
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
|
||||
struct vmem_altmap *altmap, bool direct)
|
||||
bool direct)
|
||||
{
|
||||
unsigned long next, pages = 0;
|
||||
pte_t *pte;
|
||||
|
@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
|
|||
* freed when offlining, or simplely not in use.
|
||||
*/
|
||||
if (!direct)
|
||||
free_pagetable(pte_page(*pte), 0, altmap);
|
||||
free_pagetable(pte_page(*pte), 0);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
|
@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
|
|||
|
||||
page_addr = page_address(pte_page(*pte));
|
||||
if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
|
||||
free_pagetable(pte_page(*pte), 0, altmap);
|
||||
free_pagetable(pte_page(*pte), 0);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
|
@ -974,9 +974,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
|
|||
if (IS_ALIGNED(addr, PMD_SIZE) &&
|
||||
IS_ALIGNED(next, PMD_SIZE)) {
|
||||
if (!direct)
|
||||
free_pagetable(pmd_page(*pmd),
|
||||
get_order(PMD_SIZE),
|
||||
altmap);
|
||||
free_hugepage_table(pmd_page(*pmd),
|
||||
altmap);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pmd_clear(pmd);
|
||||
|
@ -989,9 +988,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
|
|||
page_addr = page_address(pmd_page(*pmd));
|
||||
if (!memchr_inv(page_addr, PAGE_INUSE,
|
||||
PMD_SIZE)) {
|
||||
free_pagetable(pmd_page(*pmd),
|
||||
get_order(PMD_SIZE),
|
||||
altmap);
|
||||
free_hugepage_table(pmd_page(*pmd),
|
||||
altmap);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pmd_clear(pmd);
|
||||
|
@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
|
|||
}
|
||||
|
||||
pte_base = (pte_t *)pmd_page_vaddr(*pmd);
|
||||
remove_pte_table(pte_base, addr, next, altmap, direct);
|
||||
free_pte_table(pte_base, pmd, altmap);
|
||||
remove_pte_table(pte_base, addr, next, direct);
|
||||
free_pte_table(pte_base, pmd);
|
||||
}
|
||||
|
||||
/* Call free_pmd_table() in remove_pud_table(). */
|
||||
|
@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
|
|||
IS_ALIGNED(next, PUD_SIZE)) {
|
||||
if (!direct)
|
||||
free_pagetable(pud_page(*pud),
|
||||
get_order(PUD_SIZE),
|
||||
altmap);
|
||||
get_order(PUD_SIZE));
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pud_clear(pud);
|
||||
|
@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
|
|||
if (!memchr_inv(page_addr, PAGE_INUSE,
|
||||
PUD_SIZE)) {
|
||||
free_pagetable(pud_page(*pud),
|
||||
get_order(PUD_SIZE),
|
||||
altmap);
|
||||
get_order(PUD_SIZE));
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pud_clear(pud);
|
||||
|
@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
|
|||
|
||||
pmd_base = pmd_offset(pud, 0);
|
||||
remove_pmd_table(pmd_base, addr, next, direct, altmap);
|
||||
free_pmd_table(pmd_base, pud, altmap);
|
||||
free_pmd_table(pmd_base, pud);
|
||||
}
|
||||
|
||||
if (direct)
|
||||
|
@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
|
|||
* to adapt for boot-time switching between 4 and 5 level page tables.
|
||||
*/
|
||||
if (CONFIG_PGTABLE_LEVELS == 5)
|
||||
free_pud_table(pud_base, p4d, altmap);
|
||||
free_pud_table(pud_base, p4d);
|
||||
}
|
||||
|
||||
if (direct)
|
||||
|
|
|
@ -702,4 +702,52 @@ int pmd_clear_huge(pmd_t *pmd)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pud_free_pmd_page - Clear pud entry and free pmd page.
|
||||
* @pud: Pointer to a PUD.
|
||||
*
|
||||
* Context: The pud range has been unmaped and TLB purged.
|
||||
* Return: 1 if clearing the entry succeeded. 0 otherwise.
|
||||
*/
|
||||
int pud_free_pmd_page(pud_t *pud)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
int i;
|
||||
|
||||
if (pud_none(*pud))
|
||||
return 1;
|
||||
|
||||
pmd = (pmd_t *)pud_page_vaddr(*pud);
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++)
|
||||
if (!pmd_free_pte_page(&pmd[i]))
|
||||
return 0;
|
||||
|
||||
pud_clear(pud);
|
||||
free_page((unsigned long)pmd);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* pmd_free_pte_page - Clear pmd entry and free pte page.
|
||||
* @pmd: Pointer to a PMD.
|
||||
*
|
||||
* Context: The pmd range has been unmaped and TLB purged.
|
||||
* Return: 1 if clearing the entry succeeded. 0 otherwise.
|
||||
*/
|
||||
int pmd_free_pte_page(pmd_t *pmd)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
return 1;
|
||||
|
||||
pte = (pte_t *)pmd_page_vaddr(*pmd);
|
||||
pmd_clear(pmd);
|
||||
free_page((unsigned long)pte);
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
||||
|
|
|
@ -1188,7 +1188,7 @@ skip_init_addrs:
|
|||
* may converge on the last pass. In such case do one more
|
||||
* pass to emit the final image
|
||||
*/
|
||||
for (pass = 0; pass < 10 || image; pass++) {
|
||||
for (pass = 0; pass < 20 || image; pass++) {
|
||||
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
|
||||
if (proglen <= 0) {
|
||||
image = NULL;
|
||||
|
@ -1215,6 +1215,7 @@ skip_init_addrs:
|
|||
}
|
||||
}
|
||||
oldproglen = proglen;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (bpf_jit_enable > 1)
|
||||
|
|
|
@ -227,7 +227,7 @@ int __init efi_alloc_page_tables(void)
|
|||
if (!pud) {
|
||||
if (CONFIG_PGTABLE_LEVELS > 4)
|
||||
free_page((unsigned long) pgd_page_vaddr(*pgd));
|
||||
free_page((unsigned long)efi_pgd);
|
||||
free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,11 +30,7 @@
|
|||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#ifdef CONFIG_X86_PPRO_FENCE
|
||||
#define dma_rmb() rmb()
|
||||
#else /* CONFIG_X86_PPRO_FENCE */
|
||||
#define dma_rmb() barrier()
|
||||
#endif /* CONFIG_X86_PPRO_FENCE */
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
|
|
@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void)
|
|||
res.start = gas->address;
|
||||
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
res.flags = IORESOURCE_MEM;
|
||||
res.end = res.start + ALIGN(gas->access_width, 4);
|
||||
res.end = res.start + ALIGN(gas->access_width, 4) - 1;
|
||||
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
res.flags = IORESOURCE_IO;
|
||||
res.end = res.start + gas->access_width;
|
||||
res.end = res.start + gas->access_width - 1;
|
||||
} else {
|
||||
pr_warn("Unsupported address space: %u\n",
|
||||
gas->space_id);
|
||||
|
|
|
@ -70,7 +70,6 @@ static async_cookie_t async_cookie;
|
|||
static bool battery_driver_registered;
|
||||
static int battery_bix_broken_package;
|
||||
static int battery_notification_delay_ms;
|
||||
static int battery_full_discharging;
|
||||
static unsigned int cache_time = 1000;
|
||||
module_param(cache_time, uint, 0644);
|
||||
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
|
||||
|
@ -215,12 +214,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
|
|||
return -ENODEV;
|
||||
switch (psp) {
|
||||
case POWER_SUPPLY_PROP_STATUS:
|
||||
if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
|
||||
if (battery_full_discharging && battery->rate_now == 0)
|
||||
val->intval = POWER_SUPPLY_STATUS_FULL;
|
||||
else
|
||||
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
|
||||
} else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
|
||||
if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
|
||||
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
|
||||
else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
|
||||
val->intval = POWER_SUPPLY_STATUS_CHARGING;
|
||||
else if (acpi_battery_is_charged(battery))
|
||||
val->intval = POWER_SUPPLY_STATUS_FULL;
|
||||
|
@ -1170,12 +1166,6 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
|
||||
{
|
||||
battery_full_discharging = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id bat_dmi_table[] __initconst = {
|
||||
{
|
||||
.callback = battery_bix_broken_package_quirk,
|
||||
|
@ -1193,38 +1183,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = battery_full_discharging_quirk,
|
||||
.ident = "ASUS GL502VSK",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = battery_full_discharging_quirk,
|
||||
.ident = "ASUS UX305LA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = battery_full_discharging_quirk,
|
||||
.ident = "ASUS UX360UA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = battery_full_discharging_quirk,
|
||||
.ident = "ASUS UX410UAK",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
|
|||
else
|
||||
ndr_desc->numa_node = NUMA_NO_NODE;
|
||||
|
||||
if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
|
||||
/*
|
||||
* Persistence domain bits are hierarchical, if
|
||||
* ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
|
||||
* ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
|
||||
*/
|
||||
if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
|
||||
set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
|
||||
|
||||
if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
|
||||
else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
|
||||
set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
|
||||
|
||||
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
||||
|
|
|
@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
|
|||
*/
|
||||
int acpi_map_pxm_to_online_node(int pxm)
|
||||
{
|
||||
int node, n, dist, min_dist;
|
||||
int node, min_node;
|
||||
|
||||
node = acpi_map_pxm_to_node(pxm);
|
||||
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = 0;
|
||||
|
||||
min_node = node;
|
||||
if (!node_online(node)) {
|
||||
min_dist = INT_MAX;
|
||||
int min_dist = INT_MAX, dist, n;
|
||||
|
||||
for_each_online_node(n) {
|
||||
dist = node_distance(node, n);
|
||||
if (dist < min_dist) {
|
||||
min_dist = dist;
|
||||
node = n;
|
||||
min_node = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return node;
|
||||
return min_node;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
|
||||
|
||||
|
|
|
@ -550,7 +550,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
|
||||
/* Promise */
|
||||
|
|
|
@ -665,6 +665,16 @@ int ahci_stop_engine(struct ata_port *ap)
|
|||
if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Don't try to issue commands but return with ENODEV if the
|
||||
* AHCI controller not available anymore (e.g. due to PCIe hot
|
||||
* unplugging). Otherwise a 500ms delay for each port is added.
|
||||
*/
|
||||
if (tmp == 0xffffffff) {
|
||||
dev_err(ap->host->dev, "AHCI controller unavailable!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* setting HBA to idle */
|
||||
tmp &= ~PORT_CMD_START;
|
||||
writel(tmp, port_mmio + PORT_CMD);
|
||||
|
|
|
@ -340,7 +340,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
|
|||
* 2) regulator for controlling the targets power (optional)
|
||||
* 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
|
||||
* or for non devicetree enabled platforms a single clock
|
||||
* 4) phys (optional)
|
||||
* 4) phys (optional)
|
||||
*
|
||||
* RETURNS:
|
||||
* The allocated ahci_host_priv on success, otherwise an ERR_PTR value
|
||||
|
|
|
@ -4530,6 +4530,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
|
||||
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
|
||||
|
||||
/* Crucial BX100 SSD 500GB has broken LPM support */
|
||||
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
|
||||
|
||||
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
|
||||
{ "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM |
|
||||
ATA_HORKAGE_NOLPM, },
|
||||
/* 512GB MX100 with newer firmware has only LPM issues */
|
||||
{ "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
|
||||
ATA_HORKAGE_NOLPM, },
|
||||
|
||||
/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
|
||||
{ "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM |
|
||||
ATA_HORKAGE_NOLPM, },
|
||||
{ "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM |
|
||||
ATA_HORKAGE_NOLPM, },
|
||||
|
||||
/* devices that don't properly handle queued TRIM commands */
|
||||
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
|
@ -4541,7 +4560,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
{ "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
|
@ -5401,8 +5422,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
|
|||
* We guarantee to LLDs that they will have at least one
|
||||
* non-zero sg if the command is a data command.
|
||||
*/
|
||||
if (WARN_ON_ONCE(ata_is_data(prot) &&
|
||||
(!qc->sg || !qc->n_elem || !qc->nbytes)))
|
||||
if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
|
||||
goto sys_err;
|
||||
|
||||
if (ata_is_dma(prot) || (ata_is_pio(prot) &&
|
||||
|
|
|
@ -815,7 +815,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
|
|||
|
||||
if (ap->pflags & ATA_PFLAG_LOADING)
|
||||
ap->pflags &= ~ATA_PFLAG_LOADING;
|
||||
else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
|
||||
else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
|
||||
!(ap->flags & ATA_FLAG_SAS_HOST))
|
||||
schedule_delayed_work(&ap->hotplug_task, 0);
|
||||
|
||||
if (ap->pflags & ATA_PFLAG_RECOVERED)
|
||||
|
|
|
@ -3316,6 +3316,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
|
|||
goto invalid_fld;
|
||||
}
|
||||
|
||||
/* We may not issue NCQ commands to devices not supporting NCQ */
|
||||
if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) {
|
||||
fp = 1;
|
||||
goto invalid_fld;
|
||||
}
|
||||
|
||||
/* sanity check for pio multi commands */
|
||||
if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
|
||||
fp = 1;
|
||||
|
@ -4282,7 +4288,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
|
|||
#ifdef ATA_DEBUG
|
||||
struct scsi_device *scsidev = cmd->device;
|
||||
|
||||
DPRINTK("CDB (%u:%d,%d,%d) %9ph\n",
|
||||
DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
|
||||
ap->print_id,
|
||||
scsidev->channel, scsidev->id, scsidev->lun,
|
||||
cmd->cmnd);
|
||||
|
@ -4309,7 +4315,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
|
|||
if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
|
||||
/* relay SCSI command to ATAPI device */
|
||||
int len = COMMAND_SIZE(scsi_op);
|
||||
if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
|
||||
if (unlikely(len > scmd->cmd_len ||
|
||||
len > dev->cdb_len ||
|
||||
scmd->cmd_len > ATAPI_CDB_LEN))
|
||||
goto bad_cdb_len;
|
||||
|
||||
xlat_func = atapi_xlat;
|
||||
|
|
|
@ -146,6 +146,7 @@
|
|||
enum sata_rcar_type {
|
||||
RCAR_GEN1_SATA,
|
||||
RCAR_GEN2_SATA,
|
||||
RCAR_GEN3_SATA,
|
||||
RCAR_R8A7790_ES1_SATA,
|
||||
};
|
||||
|
||||
|
@ -784,26 +785,11 @@ static void sata_rcar_setup_port(struct ata_host *host)
|
|||
ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
|
||||
}
|
||||
|
||||
static void sata_rcar_init_controller(struct ata_host *host)
|
||||
static void sata_rcar_init_module(struct sata_rcar_priv *priv)
|
||||
{
|
||||
struct sata_rcar_priv *priv = host->private_data;
|
||||
void __iomem *base = priv->base;
|
||||
u32 val;
|
||||
|
||||
/* reset and setup phy */
|
||||
switch (priv->type) {
|
||||
case RCAR_GEN1_SATA:
|
||||
sata_rcar_gen1_phy_init(priv);
|
||||
break;
|
||||
case RCAR_GEN2_SATA:
|
||||
case RCAR_R8A7790_ES1_SATA:
|
||||
sata_rcar_gen2_phy_init(priv);
|
||||
break;
|
||||
default:
|
||||
dev_warn(host->dev, "SATA phy is not initialized\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* SATA-IP reset state */
|
||||
val = ioread32(base + ATAPI_CONTROL1_REG);
|
||||
val |= ATAPI_CONTROL1_RESET;
|
||||
|
@ -824,10 +810,33 @@ static void sata_rcar_init_controller(struct ata_host *host)
|
|||
/* ack and mask */
|
||||
iowrite32(0, base + SATAINTSTAT_REG);
|
||||
iowrite32(0x7ff, base + SATAINTMASK_REG);
|
||||
|
||||
/* enable interrupts */
|
||||
iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
|
||||
}
|
||||
|
||||
static void sata_rcar_init_controller(struct ata_host *host)
|
||||
{
|
||||
struct sata_rcar_priv *priv = host->private_data;
|
||||
|
||||
/* reset and setup phy */
|
||||
switch (priv->type) {
|
||||
case RCAR_GEN1_SATA:
|
||||
sata_rcar_gen1_phy_init(priv);
|
||||
break;
|
||||
case RCAR_GEN2_SATA:
|
||||
case RCAR_GEN3_SATA:
|
||||
case RCAR_R8A7790_ES1_SATA:
|
||||
sata_rcar_gen2_phy_init(priv);
|
||||
break;
|
||||
default:
|
||||
dev_warn(host->dev, "SATA phy is not initialized\n");
|
||||
break;
|
||||
}
|
||||
|
||||
sata_rcar_init_module(priv);
|
||||
}
|
||||
|
||||
static const struct of_device_id sata_rcar_match[] = {
|
||||
{
|
||||
/* Deprecated by "renesas,sata-r8a7779" */
|
||||
|
@ -856,7 +865,7 @@ static const struct of_device_id sata_rcar_match[] = {
|
|||
},
|
||||
{
|
||||
.compatible = "renesas,sata-r8a7795",
|
||||
.data = (void *)RCAR_GEN2_SATA
|
||||
.data = (void *)RCAR_GEN3_SATA
|
||||
},
|
||||
{
|
||||
.compatible = "renesas,rcar-gen2-sata",
|
||||
|
@ -864,7 +873,7 @@ static const struct of_device_id sata_rcar_match[] = {
|
|||
},
|
||||
{
|
||||
.compatible = "renesas,rcar-gen3-sata",
|
||||
.data = (void *)RCAR_GEN2_SATA
|
||||
.data = (void *)RCAR_GEN3_SATA
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
@ -982,11 +991,18 @@ static int sata_rcar_resume(struct device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* ack and mask */
|
||||
iowrite32(0, base + SATAINTSTAT_REG);
|
||||
iowrite32(0x7ff, base + SATAINTMASK_REG);
|
||||
/* enable interrupts */
|
||||
iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
|
||||
if (priv->type == RCAR_GEN3_SATA) {
|
||||
sata_rcar_gen2_phy_init(priv);
|
||||
sata_rcar_init_module(priv);
|
||||
} else {
|
||||
/* ack and mask */
|
||||
iowrite32(0, base + SATAINTSTAT_REG);
|
||||
iowrite32(0x7ff, base + SATAINTMASK_REG);
|
||||
|
||||
/* enable interrupts */
|
||||
iowrite32(ATAPI_INT_ENABLE_SATAINT,
|
||||
base + ATAPI_INT_ENABLE_REG);
|
||||
}
|
||||
|
||||
ata_host_resume(host);
|
||||
|
||||
|
|
|
@ -231,7 +231,6 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -264,6 +263,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
|
||||
|
||||
/* QCA ROME chipset */
|
||||
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
|
||||
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
|
||||
{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
|
||||
{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
|
||||
|
@ -386,10 +386,10 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
*/
|
||||
static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
|
||||
{
|
||||
/* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
|
||||
/* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
|
|
|
@ -244,7 +244,9 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
|
|||
|
||||
bt_dev_dbg(bdev, "Host wake IRQ");
|
||||
|
||||
pm_request_resume(bdev->dev);
|
||||
pm_runtime_get(bdev->dev);
|
||||
pm_runtime_mark_last_busy(bdev->dev);
|
||||
pm_runtime_put_autosuspend(bdev->dev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -301,7 +303,7 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
|
|||
.usb_auto_sleep = 0,
|
||||
.usb_resume_timeout = 0,
|
||||
.break_to_host = 0,
|
||||
.pulsed_host_wake = 0,
|
||||
.pulsed_host_wake = 1,
|
||||
};
|
||||
|
||||
static int bcm_setup_sleep(struct hci_uart *hu)
|
||||
|
@ -586,8 +588,11 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
|
|||
} else if (!bcm->rx_skb) {
|
||||
/* Delay auto-suspend when receiving completed packet */
|
||||
mutex_lock(&bcm_device_lock);
|
||||
if (bcm->dev && bcm_device_exists(bcm->dev))
|
||||
pm_request_resume(bcm->dev->dev);
|
||||
if (bcm->dev && bcm_device_exists(bcm->dev)) {
|
||||
pm_runtime_get(bcm->dev->dev);
|
||||
pm_runtime_mark_last_busy(bcm->dev->dev);
|
||||
pm_runtime_put_autosuspend(bcm->dev->dev);
|
||||
}
|
||||
mutex_unlock(&bcm_device_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -449,17 +449,17 @@ struct bcm2835_pll_ana_bits {
|
|||
static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {
|
||||
.mask0 = 0,
|
||||
.set0 = 0,
|
||||
.mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK),
|
||||
.mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK,
|
||||
.set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT),
|
||||
.mask3 = (u32)~A2W_PLL_KA_MASK,
|
||||
.mask3 = A2W_PLL_KA_MASK,
|
||||
.set3 = (2 << A2W_PLL_KA_SHIFT),
|
||||
.fb_prediv_mask = BIT(14),
|
||||
};
|
||||
|
||||
static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = {
|
||||
.mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK),
|
||||
.mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK,
|
||||
.set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT),
|
||||
.mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK),
|
||||
.mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK,
|
||||
.set1 = (6 << A2W_PLLH_KP_SHIFT),
|
||||
.mask3 = 0,
|
||||
.set3 = 0,
|
||||
|
@ -623,8 +623,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
|
|||
~A2W_PLL_CTRL_PWRDN);
|
||||
|
||||
/* Take the PLL out of reset. */
|
||||
spin_lock(&cprman->regs_lock);
|
||||
cprman_write(cprman, data->cm_ctrl_reg,
|
||||
cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
|
||||
spin_unlock(&cprman->regs_lock);
|
||||
|
||||
/* Wait for the PLL to lock. */
|
||||
timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
|
||||
|
@ -701,9 +703,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
|
|||
}
|
||||
|
||||
/* Unmask the reference clock from the oscillator. */
|
||||
spin_lock(&cprman->regs_lock);
|
||||
cprman_write(cprman, A2W_XOSC_CTRL,
|
||||
cprman_read(cprman, A2W_XOSC_CTRL) |
|
||||
data->reference_enable_mask);
|
||||
spin_unlock(&cprman->regs_lock);
|
||||
|
||||
if (do_ana_setup_first)
|
||||
bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
|
||||
|
|
|
@ -205,6 +205,18 @@ static const struct aspeed_clk_soc_data ast2400_data = {
|
|||
.calc_pll = aspeed_ast2400_calc_pll,
|
||||
};
|
||||
|
||||
static int aspeed_clk_is_enabled(struct clk_hw *hw)
|
||||
{
|
||||
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
|
||||
u32 clk = BIT(gate->clock_idx);
|
||||
u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
|
||||
u32 reg;
|
||||
|
||||
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®);
|
||||
|
||||
return ((reg & clk) == enval) ? 1 : 0;
|
||||
}
|
||||
|
||||
static int aspeed_clk_enable(struct clk_hw *hw)
|
||||
{
|
||||
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
|
||||
|
@ -215,6 +227,11 @@ static int aspeed_clk_enable(struct clk_hw *hw)
|
|||
|
||||
spin_lock_irqsave(gate->lock, flags);
|
||||
|
||||
if (aspeed_clk_is_enabled(hw)) {
|
||||
spin_unlock_irqrestore(gate->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (gate->reset_idx >= 0) {
|
||||
/* Put IP in reset */
|
||||
regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst);
|
||||
|
@ -255,17 +272,6 @@ static void aspeed_clk_disable(struct clk_hw *hw)
|
|||
spin_unlock_irqrestore(gate->lock, flags);
|
||||
}
|
||||
|
||||
static int aspeed_clk_is_enabled(struct clk_hw *hw)
|
||||
{
|
||||
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
|
||||
u32 clk = BIT(gate->clock_idx);
|
||||
u32 reg;
|
||||
|
||||
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®);
|
||||
|
||||
return (reg & clk) ? 0 : 1;
|
||||
}
|
||||
|
||||
static const struct clk_ops aspeed_clk_gate_ops = {
|
||||
.enable = aspeed_clk_enable,
|
||||
.disable = aspeed_clk_disable,
|
||||
|
|
|
@ -1125,8 +1125,10 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
|
|||
{
|
||||
lockdep_assert_held(&prepare_lock);
|
||||
|
||||
if (!core)
|
||||
if (!core) {
|
||||
req->rate = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
clk_core_init_rate_req(core, req);
|
||||
|
||||
|
@ -2309,8 +2311,11 @@ static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
|
|||
|
||||
trace_clk_set_phase(core, degrees);
|
||||
|
||||
if (core->ops->set_phase)
|
||||
if (core->ops->set_phase) {
|
||||
ret = core->ops->set_phase(core->hw, degrees);
|
||||
if (!ret)
|
||||
core->phase = degrees;
|
||||
}
|
||||
|
||||
trace_clk_set_phase_complete(core, degrees);
|
||||
|
||||
|
@ -2967,23 +2972,38 @@ static int __clk_core_init(struct clk_core *core)
|
|||
rate = 0;
|
||||
core->rate = core->req_rate = rate;
|
||||
|
||||
/*
|
||||
* Enable CLK_IS_CRITICAL clocks so newly added critical clocks
|
||||
* don't get accidentally disabled when walking the orphan tree and
|
||||
* reparenting clocks
|
||||
*/
|
||||
if (core->flags & CLK_IS_CRITICAL) {
|
||||
unsigned long flags;
|
||||
|
||||
clk_core_prepare(core);
|
||||
|
||||
flags = clk_enable_lock();
|
||||
clk_core_enable(core);
|
||||
clk_enable_unlock(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* walk the list of orphan clocks and reparent any that newly finds a
|
||||
* parent.
|
||||
*/
|
||||
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
|
||||
struct clk_core *parent = __clk_init_parent(orphan);
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* we could call __clk_set_parent, but that would result in a
|
||||
* redundant call to the .set_rate op, if it exists
|
||||
* We need to use __clk_set_parent_before() and _after() to
|
||||
* to properly migrate any prepare/enable count of the orphan
|
||||
* clock. This is important for CLK_IS_CRITICAL clocks, which
|
||||
* are enabled during init but might not have a parent yet.
|
||||
*/
|
||||
if (parent) {
|
||||
/* update the clk tree topology */
|
||||
flags = clk_enable_lock();
|
||||
clk_reparent(orphan, parent);
|
||||
clk_enable_unlock(flags);
|
||||
__clk_set_parent_before(orphan, parent);
|
||||
__clk_set_parent_after(orphan, parent, NULL);
|
||||
__clk_recalc_accuracies(orphan);
|
||||
__clk_recalc_rates(orphan, 0);
|
||||
}
|
||||
|
@ -3000,16 +3020,6 @@ static int __clk_core_init(struct clk_core *core)
|
|||
if (core->ops->init)
|
||||
core->ops->init(core->hw);
|
||||
|
||||
if (core->flags & CLK_IS_CRITICAL) {
|
||||
unsigned long flags;
|
||||
|
||||
clk_core_prepare(core);
|
||||
|
||||
flags = clk_enable_lock();
|
||||
clk_core_enable(core);
|
||||
clk_enable_unlock(flags);
|
||||
}
|
||||
|
||||
kref_init(&core->ref);
|
||||
out:
|
||||
clk_pm_runtime_put(core);
|
||||
|
|
|
@ -149,6 +149,8 @@ static int hi3660_stub_clk_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(stub_clk_chan.mbox);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
freq_reg = devm_ioremap(dev, res->start, resource_size(res));
|
||||
if (!freq_reg)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -131,7 +131,17 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
|
|||
static struct clk *clk[IMX5_CLK_END];
|
||||
static struct clk_onecell_data clk_data;
|
||||
|
||||
static struct clk ** const uart_clks[] __initconst = {
|
||||
static struct clk ** const uart_clks_mx51[] __initconst = {
|
||||
&clk[IMX5_CLK_UART1_IPG_GATE],
|
||||
&clk[IMX5_CLK_UART1_PER_GATE],
|
||||
&clk[IMX5_CLK_UART2_IPG_GATE],
|
||||
&clk[IMX5_CLK_UART2_PER_GATE],
|
||||
&clk[IMX5_CLK_UART3_IPG_GATE],
|
||||
&clk[IMX5_CLK_UART3_PER_GATE],
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
|
||||
&clk[IMX5_CLK_UART1_IPG_GATE],
|
||||
&clk[IMX5_CLK_UART1_PER_GATE],
|
||||
&clk[IMX5_CLK_UART2_IPG_GATE],
|
||||
|
@ -321,8 +331,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
|
|||
clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
|
||||
clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
|
||||
clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
|
||||
|
||||
imx_register_uart_clocks(uart_clks);
|
||||
}
|
||||
|
||||
static void __init mx50_clocks_init(struct device_node *np)
|
||||
|
@ -388,6 +396,8 @@ static void __init mx50_clocks_init(struct device_node *np)
|
|||
|
||||
r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
|
||||
clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
|
||||
|
||||
imx_register_uart_clocks(uart_clks_mx50_mx53);
|
||||
}
|
||||
CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
|
||||
|
||||
|
@ -477,6 +487,8 @@ static void __init mx51_clocks_init(struct device_node *np)
|
|||
val = readl(MXC_CCM_CLPCR);
|
||||
val |= 1 << 23;
|
||||
writel(val, MXC_CCM_CLPCR);
|
||||
|
||||
imx_register_uart_clocks(uart_clks_mx51);
|
||||
}
|
||||
CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
|
||||
|
||||
|
@ -606,5 +618,7 @@ static void __init mx53_clocks_init(struct device_node *np)
|
|||
|
||||
r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
|
||||
clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
|
||||
|
||||
imx_register_uart_clocks(uart_clks_mx50_mx53);
|
||||
}
|
||||
CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
|
||||
|
|
|
@ -49,11 +49,10 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
|
|||
struct clk_regmap_mux_div *a53cc;
|
||||
struct regmap *regmap;
|
||||
struct clk_init_data init = { };
|
||||
int ret;
|
||||
int ret = -ENODEV;
|
||||
|
||||
regmap = dev_get_regmap(parent, NULL);
|
||||
if (IS_ERR(regmap)) {
|
||||
ret = PTR_ERR(regmap);
|
||||
if (!regmap) {
|
||||
dev_err(dev, "failed to get regmap: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -762,7 +762,7 @@ static struct ccu_mp out_a_clk = {
|
|||
.features = CCU_FEATURE_FIXED_PREDIV,
|
||||
.hw.init = CLK_HW_INIT_PARENTS("out-a",
|
||||
clk_out_parents,
|
||||
&ccu_div_ops,
|
||||
&ccu_mp_ops,
|
||||
0),
|
||||
},
|
||||
};
|
||||
|
@ -783,7 +783,7 @@ static struct ccu_mp out_b_clk = {
|
|||
.features = CCU_FEATURE_FIXED_PREDIV,
|
||||
.hw.init = CLK_HW_INIT_PARENTS("out-b",
|
||||
clk_out_parents,
|
||||
&ccu_div_ops,
|
||||
&ccu_mp_ops,
|
||||
0),
|
||||
},
|
||||
};
|
||||
|
@ -804,7 +804,7 @@ static struct ccu_mp out_c_clk = {
|
|||
.features = CCU_FEATURE_FIXED_PREDIV,
|
||||
.hw.init = CLK_HW_INIT_PARENTS("out-c",
|
||||
clk_out_parents,
|
||||
&ccu_div_ops,
|
||||
&ccu_mp_ops,
|
||||
0),
|
||||
},
|
||||
};
|
||||
|
|
|
@ -45,7 +45,7 @@ static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = {
|
|||
|
||||
static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = {
|
||||
{ AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
|
||||
{ AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP, "lcd_gclk", "lcdc_clkdm" },
|
||||
{ AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" },
|
||||
{ AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" },
|
||||
{ AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
|
||||
{ AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" },
|
||||
|
|
|
@ -187,7 +187,7 @@ static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst
|
|||
{ AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
|
||||
{ AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
|
||||
{ AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" },
|
||||
{ AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "disp_clk", "dss_clkdm" },
|
||||
{ AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" },
|
||||
{ AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
|
||||
{ 0 },
|
||||
};
|
||||
|
|
|
@ -537,6 +537,8 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
|
|||
init.parent_names = ®_data->parent;
|
||||
init.num_parents = 1;
|
||||
init.flags = 0;
|
||||
if (reg_data->flags & CLKF_SET_RATE_PARENT)
|
||||
init.flags |= CLK_SET_RATE_PARENT;
|
||||
init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",
|
||||
node->parent->name, node->name,
|
||||
reg_data->offset, 0);
|
||||
|
|
|
@ -118,14 +118,15 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
|||
spin_lock_irqsave(&dmamux->lock, flags);
|
||||
mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
|
||||
dmamux->dma_requests);
|
||||
set_bit(mux->chan_id, dmamux->dma_inuse);
|
||||
spin_unlock_irqrestore(&dmamux->lock, flags);
|
||||
|
||||
if (mux->chan_id == dmamux->dma_requests) {
|
||||
spin_unlock_irqrestore(&dmamux->lock, flags);
|
||||
dev_err(&pdev->dev, "Run out of free DMA requests\n");
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
goto error_chan_id;
|
||||
}
|
||||
set_bit(mux->chan_id, dmamux->dma_inuse);
|
||||
spin_unlock_irqrestore(&dmamux->lock, flags);
|
||||
|
||||
/* Look for DMA Master */
|
||||
for (i = 1, min = 0, max = dmamux->dma_reqs[i];
|
||||
|
@ -173,6 +174,8 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
|||
|
||||
error:
|
||||
clear_bit(mux->chan_id, dmamux->dma_inuse);
|
||||
|
||||
error_chan_id:
|
||||
kfree(mux);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
|
@ -2063,9 +2063,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
|
||||
DRM_INFO("amdgpu: finishing device.\n");
|
||||
adev->shutdown = true;
|
||||
if (adev->mode_info.mode_config_initialized)
|
||||
drm_crtc_force_disable_all(adev->ddev);
|
||||
|
||||
if (adev->mode_info.mode_config_initialized){
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
drm_crtc_force_disable_all(adev->ddev);
|
||||
else
|
||||
drm_atomic_helper_shutdown(adev->ddev);
|
||||
}
|
||||
amdgpu_ib_pool_fini(adev);
|
||||
amdgpu_fence_driver_fini(adev);
|
||||
amdgpu_fbdev_fini(adev);
|
||||
|
|
|
@ -3134,8 +3134,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
|
|||
|
||||
switch (aplane->base.type) {
|
||||
case DRM_PLANE_TYPE_PRIMARY:
|
||||
aplane->base.format_default = true;
|
||||
|
||||
res = drm_universal_plane_init(
|
||||
dm->adev->ddev,
|
||||
&aplane->base,
|
||||
|
@ -4794,6 +4792,9 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
|
|||
return -EDEADLK;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
|
||||
if (crtc->primary == plane && crtc_state->active) {
|
||||
if (!plane_state->fb)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
|
|||
struct cea_sad *sad = &sads[i];
|
||||
|
||||
edid_caps->audio_modes[i].format_code = sad->format;
|
||||
edid_caps->audio_modes[i].channel_count = sad->channels;
|
||||
edid_caps->audio_modes[i].channel_count = sad->channels + 1;
|
||||
edid_caps->audio_modes[i].sample_rate = sad->freq;
|
||||
edid_caps->audio_modes[i].sample_size = sad->byte2;
|
||||
}
|
||||
|
|
|
@ -496,6 +496,9 @@ struct dce_hwseq_registers {
|
|||
HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
|
||||
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
|
||||
HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
|
||||
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
|
||||
|
@ -591,7 +594,10 @@ struct dce_hwseq_registers {
|
|||
type DENTIST_DISPCLK_WDIVIDER; \
|
||||
type VGA_TEST_ENABLE; \
|
||||
type VGA_TEST_RENDER_START; \
|
||||
type D1VGA_MODE_ENABLE;
|
||||
type D1VGA_MODE_ENABLE; \
|
||||
type D2VGA_MODE_ENABLE; \
|
||||
type D3VGA_MODE_ENABLE; \
|
||||
type D4VGA_MODE_ENABLE;
|
||||
|
||||
struct dce_hwseq_shift {
|
||||
HWSEQ_REG_FIELD_LIST(uint8_t)
|
||||
|
|
|
@ -128,23 +128,22 @@ static void set_truncation(
|
|||
return;
|
||||
}
|
||||
/* on other format-to do */
|
||||
if (params->flags.TRUNCATE_ENABLED == 0 ||
|
||||
params->flags.TRUNCATE_DEPTH == 2)
|
||||
if (params->flags.TRUNCATE_ENABLED == 0)
|
||||
return;
|
||||
/*Set truncation depth and Enable truncation*/
|
||||
REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
|
||||
FMT_TRUNCATE_EN, 1,
|
||||
FMT_TRUNCATE_DEPTH,
|
||||
params->flags.TRUNCATE_MODE,
|
||||
params->flags.TRUNCATE_DEPTH,
|
||||
FMT_TRUNCATE_MODE,
|
||||
params->flags.TRUNCATE_DEPTH);
|
||||
params->flags.TRUNCATE_MODE);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* set_spatial_dither
|
||||
* 1) set spatial dithering mode: pattern of seed
|
||||
* 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp
|
||||
* 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
|
||||
* 3) set random seed
|
||||
* 4) set random mode
|
||||
* lfsr is reset every frame or not reset
|
||||
|
|
|
@ -238,14 +238,24 @@ static void enable_power_gating_plane(
|
|||
static void disable_vga(
|
||||
struct dce_hwseq *hws)
|
||||
{
|
||||
unsigned int in_vga_mode = 0;
|
||||
unsigned int in_vga1_mode = 0;
|
||||
unsigned int in_vga2_mode = 0;
|
||||
unsigned int in_vga3_mode = 0;
|
||||
unsigned int in_vga4_mode = 0;
|
||||
|
||||
REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode);
|
||||
REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
|
||||
REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
|
||||
REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
|
||||
REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
|
||||
|
||||
if (in_vga_mode == 0)
|
||||
if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
|
||||
in_vga3_mode == 0 && in_vga4_mode == 0)
|
||||
return;
|
||||
|
||||
REG_WRITE(D1VGA_CONTROL, 0);
|
||||
REG_WRITE(D2VGA_CONTROL, 0);
|
||||
REG_WRITE(D3VGA_CONTROL, 0);
|
||||
REG_WRITE(D4VGA_CONTROL, 0);
|
||||
|
||||
/* HW Engineer's Notes:
|
||||
* During switch from vga->extended, if we set the VGA_TEST_ENABLE and
|
||||
|
|
|
@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = {
|
|||
{0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
|
||||
{0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
|
||||
{0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
|
||||
{0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
|
||||
{0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
|
||||
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
|
||||
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
|
||||
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
|
||||
|
@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
|
|||
{0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
|
||||
{0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
|
||||
{0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
|
||||
{0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
|
||||
{0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
|
||||
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
|
||||
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
|
||||
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
|
||||
|
|
|
@ -461,6 +461,12 @@ int drm_mode_getfb(struct drm_device *dev,
|
|||
if (!fb)
|
||||
return -ENOENT;
|
||||
|
||||
/* Multi-planar framebuffers need getfb2. */
|
||||
if (fb->format->num_planes > 1) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
r->height = fb->height;
|
||||
r->width = fb->width;
|
||||
r->depth = fb->format->depth;
|
||||
|
@ -484,6 +490,7 @@ int drm_mode_getfb(struct drm_device *dev,
|
|||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
out:
|
||||
drm_framebuffer_put(fb);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -2175,8 +2175,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
|||
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
|
||||
|
||||
intel_ddi_init_dp_buf_reg(encoder);
|
||||
if (!is_mst)
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
|
@ -2274,14 +2273,12 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
|
|||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);
|
||||
|
||||
/*
|
||||
* Power down sink before disabling the port, otherwise we end
|
||||
* up getting interrupts from the sink on detecting link loss.
|
||||
*/
|
||||
if (!is_mst)
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
|
||||
|
||||
intel_disable_ddi_buf(encoder);
|
||||
|
||||
|
|
|
@ -246,7 +246,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||
*/
|
||||
tmp = I915_READ_CTL(engine);
|
||||
if (tmp & RING_WAIT) {
|
||||
i915_handle_error(dev_priv, 0,
|
||||
i915_handle_error(dev_priv, BIT(engine->id),
|
||||
"Kicking stuck wait on %s",
|
||||
engine->name);
|
||||
I915_WRITE_CTL(engine, tmp);
|
||||
|
@ -258,7 +258,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||
default:
|
||||
return ENGINE_DEAD;
|
||||
case 1:
|
||||
i915_handle_error(dev_priv, 0,
|
||||
i915_handle_error(dev_priv, ALL_ENGINES,
|
||||
"Kicking stuck semaphore on %s",
|
||||
engine->name);
|
||||
I915_WRITE_CTL(engine, tmp);
|
||||
|
|
|
@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
|
|||
struct drm_crtc_state *old_crtc_state)
|
||||
{
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
{
|
||||
spin_lock_irq(&crtc->dev->event_lock);
|
||||
if (crtc->state->event) {
|
||||
WARN_ON(drm_crtc_vblank_get(crtc));
|
||||
|
@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
|
|||
.mode_set_nofb = ipu_crtc_mode_set_nofb,
|
||||
.atomic_check = ipu_crtc_atomic_check,
|
||||
.atomic_begin = ipu_crtc_atomic_begin,
|
||||
.atomic_flush = ipu_crtc_atomic_flush,
|
||||
.atomic_disable = ipu_crtc_atomic_disable,
|
||||
.atomic_enable = ipu_crtc_atomic_enable,
|
||||
};
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <drm/drm_plane_helper.h>
|
||||
|
||||
#include "video/imx-ipu-v3.h"
|
||||
#include "imx-drm.h"
|
||||
#include "ipuv3-plane.h"
|
||||
|
||||
struct ipu_plane_state {
|
||||
|
@ -272,7 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
|
|||
kfree(ipu_plane);
|
||||
}
|
||||
|
||||
void ipu_plane_state_reset(struct drm_plane *plane)
|
||||
static void ipu_plane_state_reset(struct drm_plane *plane)
|
||||
{
|
||||
struct ipu_plane_state *ipu_state;
|
||||
|
||||
|
@ -292,7 +293,8 @@ void ipu_plane_state_reset(struct drm_plane *plane)
|
|||
plane->state = &ipu_state->base;
|
||||
}
|
||||
|
||||
struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
|
||||
static struct drm_plane_state *
|
||||
ipu_plane_duplicate_state(struct drm_plane *plane)
|
||||
{
|
||||
struct ipu_plane_state *state;
|
||||
|
||||
|
@ -306,8 +308,8 @@ struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
|
|||
return &state->base;
|
||||
}
|
||||
|
||||
void ipu_plane_destroy_state(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
static void ipu_plane_destroy_state(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
|
||||
|
||||
|
|
|
@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
|
|||
/* don't do anything if sink is not display port, i.e.,
|
||||
* passive dp->(dvi|hdmi) adaptor
|
||||
*/
|
||||
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
|
||||
int saved_dpms = connector->dpms;
|
||||
/* Only turn off the display if it's physically disconnected */
|
||||
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
} else if (radeon_dp_needs_link_train(radeon_connector)) {
|
||||
/* Don't try to start link training before we
|
||||
* have the dpcd */
|
||||
if (!radeon_dp_getdpcd(radeon_connector))
|
||||
return;
|
||||
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
|
||||
radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
|
||||
radeon_dp_needs_link_train(radeon_connector)) {
|
||||
/* Don't start link training before we have the DPCD */
|
||||
if (!radeon_dp_getdpcd(radeon_connector))
|
||||
return;
|
||||
|
||||
/* set it to OFF so that drm_helper_connector_dpms()
|
||||
* won't return immediately since the current state
|
||||
* is ON at this point.
|
||||
*/
|
||||
connector->dpms = DRM_MODE_DPMS_OFF;
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
connector->dpms = saved_dpms;
|
||||
/* Turn the connector off and back on immediately, which
|
||||
* will trigger link training
|
||||
*/
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ static int sun4i_drv_bind(struct device *dev)
|
|||
/* drm_vblank_init calls kcalloc, which can fail */
|
||||
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||||
if (ret)
|
||||
goto free_mem_region;
|
||||
goto cleanup_mode_config;
|
||||
|
||||
drm->irq_enabled = true;
|
||||
|
||||
|
@ -139,7 +139,6 @@ finish_poll:
|
|||
sun4i_framebuffer_free(drm);
|
||||
cleanup_mode_config:
|
||||
drm_mode_config_cleanup(drm);
|
||||
free_mem_region:
|
||||
of_reserved_mem_device_release(dev);
|
||||
free_drm:
|
||||
drm_dev_unref(drm);
|
||||
|
|
|
@ -538,7 +538,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
|
|||
&sun4i_hdmi_regmap_config);
|
||||
if (IS_ERR(hdmi->regmap)) {
|
||||
dev_err(dev, "Couldn't create HDMI encoder regmap\n");
|
||||
return PTR_ERR(hdmi->regmap);
|
||||
ret = PTR_ERR(hdmi->regmap);
|
||||
goto err_disable_mod_clk;
|
||||
}
|
||||
|
||||
ret = sun4i_tmds_create(hdmi);
|
||||
|
@ -551,7 +552,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
|
|||
hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
|
||||
if (IS_ERR(hdmi->ddc_parent_clk)) {
|
||||
dev_err(dev, "Couldn't get the HDMI DDC clock\n");
|
||||
return PTR_ERR(hdmi->ddc_parent_clk);
|
||||
ret = PTR_ERR(hdmi->ddc_parent_clk);
|
||||
goto err_disable_mod_clk;
|
||||
}
|
||||
} else {
|
||||
hdmi->ddc_parent_clk = hdmi->tmds_clk;
|
||||
|
|
|
@ -103,6 +103,7 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
|
|||
|
||||
if (enabled) {
|
||||
clk_prepare_enable(clk);
|
||||
clk_rate_exclusive_get(clk);
|
||||
} else {
|
||||
clk_rate_exclusive_put(clk);
|
||||
clk_disable_unprepare(clk);
|
||||
|
@ -262,7 +263,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
|
|||
const struct drm_display_mode *mode)
|
||||
{
|
||||
/* Configure the dot clock */
|
||||
clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000);
|
||||
clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
|
||||
|
||||
/* Set the resolution */
|
||||
regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
|
||||
|
@ -423,7 +424,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
|
|||
WARN_ON(!tcon->quirks->has_channel_1);
|
||||
|
||||
/* Configure the dot clock */
|
||||
clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000);
|
||||
clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
|
||||
|
||||
/* Adjust clock delay */
|
||||
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
|
||||
|
|
|
@ -1903,8 +1903,12 @@ cleanup:
|
|||
if (!IS_ERR(primary))
|
||||
drm_plane_cleanup(primary);
|
||||
|
||||
if (group && tegra->domain) {
|
||||
iommu_detach_group(tegra->domain, group);
|
||||
if (group && dc->domain) {
|
||||
if (group == tegra->group) {
|
||||
iommu_detach_group(dc->domain, group);
|
||||
tegra->group = NULL;
|
||||
}
|
||||
|
||||
dc->domain = NULL;
|
||||
}
|
||||
|
||||
|
@ -1913,8 +1917,10 @@ cleanup:
|
|||
|
||||
static int tegra_dc_exit(struct host1x_client *client)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(client->parent);
|
||||
struct iommu_group *group = iommu_group_get(client->dev);
|
||||
struct tegra_dc *dc = host1x_client_to_dc(client);
|
||||
struct tegra_drm *tegra = drm->dev_private;
|
||||
int err;
|
||||
|
||||
devm_free_irq(dc->dev, dc->irq, dc);
|
||||
|
@ -1926,7 +1932,11 @@ static int tegra_dc_exit(struct host1x_client *client)
|
|||
}
|
||||
|
||||
if (group && dc->domain) {
|
||||
iommu_detach_group(dc->domain, group);
|
||||
if (group == tegra->group) {
|
||||
iommu_detach_group(dc->domain, group);
|
||||
tegra->group = NULL;
|
||||
}
|
||||
|
||||
dc->domain = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm)
|
|||
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
tegra_drm_fb_exit(drm);
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
|
||||
err = host1x_device_exit(device);
|
||||
|
|
|
@ -1072,7 +1072,6 @@ static int tegra_dsi_exit(struct host1x_client *client)
|
|||
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
|
||||
|
||||
tegra_output_exit(&dsi->output);
|
||||
regulator_disable(dsi->vdd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -297,6 +297,10 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
|
|||
case WIN_COLOR_DEPTH_B8G8R8X8:
|
||||
*alpha = WIN_COLOR_DEPTH_B8G8R8A8;
|
||||
return 0;
|
||||
|
||||
case WIN_COLOR_DEPTH_B5G6R5:
|
||||
*alpha = opaque;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
@ -330,9 +334,6 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
|
|||
unsigned int zpos[2];
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
state->dependent[i] = false;
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
zpos[i] = 0;
|
||||
|
||||
|
@ -346,6 +347,8 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
|
|||
|
||||
index = tegra_plane_get_overlap_index(tegra, p);
|
||||
|
||||
state->dependent[index] = false;
|
||||
|
||||
/*
|
||||
* If any of the other planes is on top of this plane and uses
|
||||
* a format with an alpha component, mark this plane as being
|
||||
|
|
|
@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
|||
{
|
||||
unsigned long start = vma->vm_start;
|
||||
unsigned long size = vma->vm_end - vma->vm_start;
|
||||
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
||||
unsigned long offset;
|
||||
unsigned long page, pos;
|
||||
|
||||
if (offset + size > info->fix.smem_len)
|
||||
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
|
||||
offset = vma->vm_pgoff << PAGE_SHIFT;
|
||||
|
||||
if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
|
||||
return -EINVAL;
|
||||
|
||||
pos = (unsigned long)info->fix.smem_start + offset;
|
||||
|
|
|
@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
|
|||
*/
|
||||
void vmw_svga_disable(struct vmw_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* Disabling SVGA will turn off device modesetting capabilities, so
|
||||
* notify KMS about that so that it doesn't cache atomic state that
|
||||
* isn't valid anymore, for example crtcs turned on.
|
||||
* Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
|
||||
* but vmw_kms_lost_device() takes the reservation sem and thus we'll
|
||||
* end up with lock order reversal. Thus, a master may actually perform
|
||||
* a new modeset just after we call vmw_kms_lost_device() and race with
|
||||
* vmw_svga_disable(), but that should at worst cause atomic KMS state
|
||||
* to be inconsistent with the device, causing modesetting problems.
|
||||
*
|
||||
*/
|
||||
vmw_kms_lost_device(dev_priv->dev);
|
||||
ttm_write_lock(&dev_priv->reservation_sem, false);
|
||||
spin_lock(&dev_priv->svga_lock);
|
||||
if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
|
||||
|
|
|
@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
|
|||
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
|
||||
void vmw_kms_lost_device(struct drm_device *dev);
|
||||
|
||||
int vmw_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_rect.h>
|
||||
|
||||
|
||||
/* Might need a hrtimer here? */
|
||||
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
|
||||
|
||||
|
@ -2517,9 +2516,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
|||
* Helper to be used if an error forces the caller to undo the actions of
|
||||
* vmw_kms_helper_resource_prepare.
|
||||
*/
|
||||
void vmw_kms_helper_resource_revert(struct vmw_resource *res)
|
||||
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
|
||||
{
|
||||
vmw_kms_helper_buffer_revert(res->backup);
|
||||
struct vmw_resource *res = ctx->res;
|
||||
|
||||
vmw_kms_helper_buffer_revert(ctx->buf);
|
||||
vmw_dmabuf_unreference(&ctx->buf);
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
|
@ -2536,10 +2538,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
|
|||
* interrupted by a signal.
|
||||
*/
|
||||
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
bool interruptible)
|
||||
bool interruptible,
|
||||
struct vmw_validation_ctx *ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ctx->buf = NULL;
|
||||
ctx->res = res;
|
||||
|
||||
if (interruptible)
|
||||
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
|
||||
else
|
||||
|
@ -2558,6 +2564,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
|||
res->dev_priv->has_mob);
|
||||
if (ret)
|
||||
goto out_unreserve;
|
||||
|
||||
ctx->buf = vmw_dmabuf_reference(res->backup);
|
||||
}
|
||||
ret = vmw_resource_validate(res);
|
||||
if (ret)
|
||||
|
@ -2565,7 +2573,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
|||
return 0;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(res->backup);
|
||||
vmw_kms_helper_buffer_revert(ctx->buf);
|
||||
out_unreserve:
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
out_unlock:
|
||||
|
@ -2581,11 +2589,13 @@ out_unlock:
|
|||
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
|
||||
* ref-counted fence pointer is returned here.
|
||||
*/
|
||||
void vmw_kms_helper_resource_finish(struct vmw_resource *res,
|
||||
struct vmw_fence_obj **out_fence)
|
||||
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
|
||||
struct vmw_fence_obj **out_fence)
|
||||
{
|
||||
if (res->backup || out_fence)
|
||||
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
|
||||
struct vmw_resource *res = ctx->res;
|
||||
|
||||
if (ctx->buf || out_fence)
|
||||
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
|
||||
out_fence, NULL);
|
||||
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
|
@ -2851,3 +2861,14 @@ int vmw_kms_set_config(struct drm_mode_set *set,
|
|||
|
||||
return drm_atomic_helper_set_config(set, ctx);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
|
||||
*
|
||||
* @dev: Pointer to the drm device
|
||||
*/
|
||||
void vmw_kms_lost_device(struct drm_device *dev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(dev);
|
||||
}
|
||||
|
|
|
@ -240,6 +240,11 @@ struct vmw_display_unit {
|
|||
int set_gui_y;
|
||||
};
|
||||
|
||||
struct vmw_validation_ctx {
|
||||
struct vmw_resource *res;
|
||||
struct vmw_dma_buffer *buf;
|
||||
};
|
||||
|
||||
#define vmw_crtc_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, crtc)
|
||||
#define vmw_connector_to_du(x) \
|
||||
|
@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
|||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep);
|
||||
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
bool interruptible);
|
||||
void vmw_kms_helper_resource_revert(struct vmw_resource *res);
|
||||
void vmw_kms_helper_resource_finish(struct vmw_resource *res,
|
||||
bool interruptible,
|
||||
struct vmw_validation_ctx *ctx);
|
||||
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
|
||||
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
int vmw_kms_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
|
@ -439,5 +445,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
|
|||
|
||||
int vmw_kms_set_config(struct drm_mode_set *set,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
|||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(framebuffer, typeof(*vfbs), base);
|
||||
struct vmw_kms_sou_surface_dirty sdirty;
|
||||
struct vmw_validation_ctx ctx;
|
||||
int ret;
|
||||
|
||||
if (!srf)
|
||||
srf = &vfbs->surface->res;
|
||||
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true);
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
|||
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
|
||||
dest_x, dest_y, num_clips, inc,
|
||||
&sdirty.base);
|
||||
vmw_kms_helper_resource_finish(srf, out_fence);
|
||||
vmw_kms_helper_resource_finish(&ctx, out_fence);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -980,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
|
|||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(framebuffer, typeof(*vfbs), base);
|
||||
struct vmw_stdu_dirty sdirty;
|
||||
struct vmw_validation_ctx ctx;
|
||||
int ret;
|
||||
|
||||
if (!srf)
|
||||
srf = &vfbs->surface->res;
|
||||
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true);
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1008,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
|
|||
dest_x, dest_y, num_clips, inc,
|
||||
&sdirty.base);
|
||||
out_finish:
|
||||
vmw_kms_helper_resource_finish(srf, out_fence);
|
||||
vmw_kms_helper_resource_finish(&ctx, out_fence);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -250,10 +250,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
|
|||
{
|
||||
int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
|
||||
struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
|
||||
struct ipu_prg_channel *chan = &prg->chan[prg_chan];
|
||||
struct ipu_prg_channel *chan;
|
||||
u32 val;
|
||||
|
||||
if (!chan->enabled || prg_chan < 0)
|
||||
if (prg_chan < 0)
|
||||
return;
|
||||
|
||||
chan = &prg->chan[prg_chan];
|
||||
if (!chan->enabled)
|
||||
return;
|
||||
|
||||
pm_runtime_get_sync(prg->dev);
|
||||
|
@ -280,13 +284,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
|
|||
{
|
||||
int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
|
||||
struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
|
||||
struct ipu_prg_channel *chan = &prg->chan[prg_chan];
|
||||
struct ipu_prg_channel *chan;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (prg_chan < 0)
|
||||
return prg_chan;
|
||||
|
||||
chan = &prg->chan[prg_chan];
|
||||
|
||||
if (chan->enabled) {
|
||||
ipu_pre_update(prg->pres[chan->used_pre], *eba);
|
||||
return 0;
|
||||
|
|
|
@ -417,13 +417,24 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
|
||||
|
||||
/* How many bytes were read in this iterator cycle */
|
||||
static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
|
||||
u32 start_read_index)
|
||||
{
|
||||
if (rbi->priv_read_index >= start_read_index)
|
||||
return rbi->priv_read_index - start_read_index;
|
||||
else
|
||||
return rbi->ring_datasize - start_read_index +
|
||||
rbi->priv_read_index;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update host ring buffer after iterating over packets.
|
||||
*/
|
||||
void hv_pkt_iter_close(struct vmbus_channel *channel)
|
||||
{
|
||||
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
||||
u32 orig_write_sz = hv_get_bytes_to_write(rbi);
|
||||
u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
|
||||
|
||||
/*
|
||||
* Make sure all reads are done before we update the read index since
|
||||
|
@ -431,8 +442,12 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
|
|||
* is updated.
|
||||
*/
|
||||
virt_rmb();
|
||||
start_read_index = rbi->ring_buffer->read_index;
|
||||
rbi->ring_buffer->read_index = rbi->priv_read_index;
|
||||
|
||||
if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Issue a full memory barrier before making the signaling decision.
|
||||
* Here is the reason for having this barrier:
|
||||
|
@ -446,26 +461,29 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
|
|||
*/
|
||||
virt_mb();
|
||||
|
||||
/* If host has disabled notifications then skip */
|
||||
if (rbi->ring_buffer->interrupt_mask)
|
||||
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
|
||||
if (!pending_sz)
|
||||
return;
|
||||
|
||||
if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) {
|
||||
u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
|
||||
/*
|
||||
* Ensure the read of write_index in hv_get_bytes_to_write()
|
||||
* happens after the read of pending_send_sz.
|
||||
*/
|
||||
virt_rmb();
|
||||
curr_write_sz = hv_get_bytes_to_write(rbi);
|
||||
bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
|
||||
|
||||
/*
|
||||
* If there was space before we began iteration,
|
||||
* then host was not blocked. Also handles case where
|
||||
* pending_sz is zero then host has nothing pending
|
||||
* and does not need to be signaled.
|
||||
*/
|
||||
if (orig_write_sz > pending_sz)
|
||||
return;
|
||||
/*
|
||||
* If there was space before we began iteration,
|
||||
* then host was not blocked.
|
||||
*/
|
||||
|
||||
/* If pending write will not fit, don't give false hope. */
|
||||
if (hv_get_bytes_to_write(rbi) < pending_sz)
|
||||
return;
|
||||
}
|
||||
if (curr_write_sz - bytes_read > pending_sz)
|
||||
return;
|
||||
|
||||
/* If pending write will not fit, don't give false hope. */
|
||||
if (curr_write_sz <= pending_sz)
|
||||
return;
|
||||
|
||||
vmbus_setevent(channel);
|
||||
}
|
||||
|
|
|
@ -920,6 +920,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
|
|||
int st_accel_common_probe(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct st_sensor_data *adata = iio_priv(indio_dev);
|
||||
struct st_sensors_platform_data *pdata =
|
||||
(struct st_sensors_platform_data *)adata->dev->platform_data;
|
||||
int irq = adata->get_irq_data_ready(indio_dev);
|
||||
int err;
|
||||
|
||||
|
@ -946,7 +948,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
|
|||
&adata->sensor_settings->fs.fs_avl[0];
|
||||
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
|
||||
|
||||
err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
|
||||
if (!pdata)
|
||||
pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
|
||||
|
||||
err = st_sensors_init_sensor(indio_dev, pdata);
|
||||
if (err < 0)
|
||||
goto st_accel_power_off;
|
||||
|
||||
|
|
|
@ -462,8 +462,10 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev)
|
|||
regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val);
|
||||
} while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--);
|
||||
|
||||
if (timeout < 0)
|
||||
if (timeout < 0) {
|
||||
mutex_unlock(&indio_dev->mlock);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -54,7 +54,6 @@ struct stm32_dfsdm_adc {
|
|||
struct stm32_dfsdm *dfsdm;
|
||||
const struct stm32_dfsdm_dev_data *dev_data;
|
||||
unsigned int fl_id;
|
||||
unsigned int ch_id;
|
||||
|
||||
/* ADC specific */
|
||||
unsigned int oversamp;
|
||||
|
@ -384,7 +383,7 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
|
|||
{
|
||||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
|
||||
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
|
||||
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
|
||||
unsigned int sample_freq = adc->sample_freq;
|
||||
unsigned int spi_freq;
|
||||
int ret;
|
||||
|
@ -419,18 +418,20 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
|
|||
return len;
|
||||
}
|
||||
|
||||
static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma)
|
||||
static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc,
|
||||
const struct iio_chan_spec *chan,
|
||||
bool dma)
|
||||
{
|
||||
struct regmap *regmap = adc->dfsdm->regmap;
|
||||
int ret;
|
||||
unsigned int dma_en = 0, cont_en = 0;
|
||||
|
||||
ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id);
|
||||
ret = stm32_dfsdm_start_channel(adc->dfsdm, chan->channel);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
|
||||
adc->ch_id);
|
||||
chan->channel);
|
||||
if (ret < 0)
|
||||
goto stop_channels;
|
||||
|
||||
|
@ -464,12 +465,13 @@ stop_channels:
|
|||
|
||||
regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
|
||||
DFSDM_CR1_RCONT_MASK, 0);
|
||||
stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id);
|
||||
stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
|
||||
static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc,
|
||||
const struct iio_chan_spec *chan)
|
||||
{
|
||||
struct regmap *regmap = adc->dfsdm->regmap;
|
||||
|
||||
|
@ -482,7 +484,7 @@ static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
|
|||
regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
|
||||
DFSDM_CR1_RCONT_MASK, 0);
|
||||
|
||||
stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id);
|
||||
stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
|
||||
}
|
||||
|
||||
static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
|
||||
|
@ -609,6 +611,7 @@ static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
|
|||
static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
const struct iio_chan_spec *chan = &indio_dev->channels[0];
|
||||
int ret;
|
||||
|
||||
/* Reset adc buffer index */
|
||||
|
@ -618,7 +621,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = stm32_dfsdm_start_conv(adc, true);
|
||||
ret = stm32_dfsdm_start_conv(adc, chan, true);
|
||||
if (ret) {
|
||||
dev_err(&indio_dev->dev, "Can't start conversion\n");
|
||||
goto stop_dfsdm;
|
||||
|
@ -635,7 +638,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
|
|||
return 0;
|
||||
|
||||
err_stop_conv:
|
||||
stm32_dfsdm_stop_conv(adc);
|
||||
stm32_dfsdm_stop_conv(adc, chan);
|
||||
stop_dfsdm:
|
||||
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
|
||||
|
||||
|
@ -645,11 +648,12 @@ stop_dfsdm:
|
|||
static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
const struct iio_chan_spec *chan = &indio_dev->channels[0];
|
||||
|
||||
if (adc->dma_chan)
|
||||
dmaengine_terminate_all(adc->dma_chan);
|
||||
|
||||
stm32_dfsdm_stop_conv(adc);
|
||||
stm32_dfsdm_stop_conv(adc, chan);
|
||||
|
||||
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
|
||||
|
||||
|
@ -730,7 +734,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
|
|||
if (ret < 0)
|
||||
goto stop_dfsdm;
|
||||
|
||||
ret = stm32_dfsdm_start_conv(adc, false);
|
||||
ret = stm32_dfsdm_start_conv(adc, chan, false);
|
||||
if (ret < 0) {
|
||||
regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
|
||||
DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
|
||||
|
@ -751,7 +755,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
|
|||
else
|
||||
ret = IIO_VAL_INT;
|
||||
|
||||
stm32_dfsdm_stop_conv(adc);
|
||||
stm32_dfsdm_stop_conv(adc, chan);
|
||||
|
||||
stop_dfsdm:
|
||||
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
|
||||
|
@ -765,7 +769,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
|
|||
{
|
||||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
|
||||
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
|
||||
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
|
||||
unsigned int spi_freq = adc->spi_freq;
|
||||
int ret = -EINVAL;
|
||||
|
||||
|
@ -972,7 +976,6 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
|
|||
}
|
||||
ch->scan_type.realbits = 24;
|
||||
ch->scan_type.storagebits = 32;
|
||||
adc->ch_id = ch->channel;
|
||||
|
||||
return stm32_dfsdm_chan_configure(adc->dfsdm,
|
||||
&adc->dfsdm->ch_list[ch->channel]);
|
||||
|
@ -1001,7 +1004,7 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
|
|||
}
|
||||
ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
|
||||
|
||||
d_ch = &adc->dfsdm->ch_list[adc->ch_id];
|
||||
d_ch = &adc->dfsdm->ch_list[ch->channel];
|
||||
if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
|
||||
adc->spi_freq = adc->dfsdm->spi_master_freq;
|
||||
|
||||
|
@ -1042,8 +1045,8 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
|
|||
return -ENOMEM;
|
||||
|
||||
for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
|
||||
ch->scan_index = chan_idx;
|
||||
ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
|
||||
ch[chan_idx].scan_index = chan_idx;
|
||||
ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &ch[chan_idx]);
|
||||
if (ret < 0) {
|
||||
dev_err(&indio_dev->dev, "Channels init failed\n");
|
||||
return ret;
|
||||
|
|
|
@ -83,7 +83,7 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
|
|||
{
|
||||
struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
|
||||
struct device *dev = &priv->pdev->dev;
|
||||
unsigned int clk_div = priv->spi_clk_out_div;
|
||||
unsigned int clk_div = priv->spi_clk_out_div, clk_src;
|
||||
int ret;
|
||||
|
||||
if (atomic_inc_return(&priv->n_active_ch) == 1) {
|
||||
|
@ -100,6 +100,14 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
|
|||
}
|
||||
}
|
||||
|
||||
/* select clock source, e.g. 0 for "dfsdm" or 1 for "audio" */
|
||||
clk_src = priv->aclk ? 1 : 0;
|
||||
ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
|
||||
DFSDM_CHCFGR1_CKOUTSRC_MASK,
|
||||
DFSDM_CHCFGR1_CKOUTSRC(clk_src));
|
||||
if (ret < 0)
|
||||
goto disable_aclk;
|
||||
|
||||
/* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
|
||||
ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
|
||||
DFSDM_CHCFGR1_CKOUTDIV_MASK,
|
||||
|
@ -274,7 +282,7 @@ static int stm32_dfsdm_probe(struct platform_device *pdev)
|
|||
|
||||
dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
|
||||
dfsdm->base,
|
||||
&stm32h7_dfsdm_regmap_cfg);
|
||||
dev_data->regmap_cfg);
|
||||
if (IS_ERR(dfsdm->regmap)) {
|
||||
ret = PTR_ERR(dfsdm->regmap);
|
||||
dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
|
||||
|
|
|
@ -133,6 +133,9 @@ static int ccs811_start_sensor_application(struct i2c_client *client)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret & CCS811_STATUS_FW_MODE_APPLICATION))
|
||||
return 0;
|
||||
|
||||
if ((ret & CCS811_STATUS_APP_VALID_MASK) !=
|
||||
CCS811_STATUS_APP_VALID_LOADED)
|
||||
return -EIO;
|
||||
|
|
|
@ -640,7 +640,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
|
|||
press_data->sensor_settings->drdy_irq.int2.addr))
|
||||
pdata = (struct st_sensors_platform_data *)&default_press_pdata;
|
||||
|
||||
err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
|
||||
err = st_sensors_init_sensor(indio_dev, pdata);
|
||||
if (err < 0)
|
||||
goto st_press_power_off;
|
||||
|
||||
|
|
|
@ -3069,7 +3069,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
|
|||
continue;
|
||||
|
||||
/* different dest port -> unique */
|
||||
if (!cma_any_port(cur_daddr) &&
|
||||
if (!cma_any_port(daddr) &&
|
||||
!cma_any_port(cur_daddr) &&
|
||||
(dport != cur_dport))
|
||||
continue;
|
||||
|
||||
|
@ -3080,7 +3081,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
|
|||
continue;
|
||||
|
||||
/* different dst address -> unique */
|
||||
if (!cma_any_addr(cur_daddr) &&
|
||||
if (!cma_any_addr(daddr) &&
|
||||
!cma_any_addr(cur_daddr) &&
|
||||
cma_addr_cmp(daddr, cur_daddr))
|
||||
continue;
|
||||
|
||||
|
@ -3378,13 +3380,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
|||
}
|
||||
#endif
|
||||
}
|
||||
daddr = cma_dst_addr(id_priv);
|
||||
daddr->sa_family = addr->sa_family;
|
||||
|
||||
ret = cma_get_port(id_priv);
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
daddr = cma_dst_addr(id_priv);
|
||||
daddr->sa_family = addr->sa_family;
|
||||
|
||||
return 0;
|
||||
err2:
|
||||
if (id_priv->cma_dev)
|
||||
|
@ -4173,6 +4175,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
|||
struct cma_multicast *mc;
|
||||
int ret;
|
||||
|
||||
if (!id->device)
|
||||
return -EINVAL;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
|
||||
!cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
|
||||
|
|
|
@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
|
|||
ctx = idr_find(&ctx_idr, id);
|
||||
if (!ctx)
|
||||
ctx = ERR_PTR(-ENOENT);
|
||||
else if (ctx->file != file)
|
||||
else if (ctx->file != file || !ctx->cm_id)
|
||||
ctx = ERR_PTR(-EINVAL);
|
||||
return ctx;
|
||||
}
|
||||
|
@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
|
|||
struct rdma_ucm_create_id cmd;
|
||||
struct rdma_ucm_create_id_resp resp;
|
||||
struct ucma_context *ctx;
|
||||
struct rdma_cm_id *cm_id;
|
||||
enum ib_qp_type qp_type;
|
||||
int ret;
|
||||
|
||||
|
@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
|
|||
return -ENOMEM;
|
||||
|
||||
ctx->uid = cmd.uid;
|
||||
ctx->cm_id = rdma_create_id(current->nsproxy->net_ns,
|
||||
ucma_event_handler, ctx, cmd.ps, qp_type);
|
||||
if (IS_ERR(ctx->cm_id)) {
|
||||
ret = PTR_ERR(ctx->cm_id);
|
||||
cm_id = rdma_create_id(current->nsproxy->net_ns,
|
||||
ucma_event_handler, ctx, cmd.ps, qp_type);
|
||||
if (IS_ERR(cm_id)) {
|
||||
ret = PTR_ERR(cm_id);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
|
@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
|
|||
ret = -EFAULT;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
ctx->cm_id = cm_id;
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
rdma_destroy_id(ctx->cm_id);
|
||||
rdma_destroy_id(cm_id);
|
||||
err1:
|
||||
mutex_lock(&mut);
|
||||
idr_remove(&ctx_idr, ctx->id);
|
||||
mutex_unlock(&mut);
|
||||
mutex_lock(&file->mut);
|
||||
list_del(&ctx->list);
|
||||
mutex_unlock(&file->mut);
|
||||
kfree(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
@ -664,19 +670,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
|
|||
int in_len, int out_len)
|
||||
{
|
||||
struct rdma_ucm_resolve_ip cmd;
|
||||
struct sockaddr *src, *dst;
|
||||
struct ucma_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
src = (struct sockaddr *) &cmd.src_addr;
|
||||
dst = (struct sockaddr *) &cmd.dst_addr;
|
||||
if (!rdma_addr_size(src) || !rdma_addr_size(dst))
|
||||
return -EINVAL;
|
||||
|
||||
ctx = ucma_get_ctx(file, cmd.id);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
|
||||
(struct sockaddr *) &cmd.dst_addr,
|
||||
cmd.timeout_ms);
|
||||
ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1349,7 +1359,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
|
|||
return -ENOSPC;
|
||||
|
||||
addr = (struct sockaddr *) &cmd->addr;
|
||||
if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
|
||||
if (cmd->addr_size != rdma_addr_size(addr))
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
|
||||
|
@ -1417,6 +1427,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
|
|||
join_cmd.uid = cmd.uid;
|
||||
join_cmd.id = cmd.id;
|
||||
join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
|
||||
if (!join_cmd.addr_size)
|
||||
return -EINVAL;
|
||||
|
||||
join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
|
||||
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
|
||||
|
||||
|
@ -1432,6 +1445,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
|
|||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
|
||||
return -EINVAL;
|
||||
|
||||
return ucma_process_join(file, &cmd, out_len);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,8 +57,8 @@
|
|||
#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
|
||||
#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
|
||||
|
||||
#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G)
|
||||
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39)
|
||||
#define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G)
|
||||
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
|
||||
#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
|
||||
|
||||
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
|
||||
|
|
|
@ -3598,7 +3598,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
|
|||
int umem_pgs, page_shift, rc;
|
||||
|
||||
if (length > BNXT_RE_MAX_MR_SIZE) {
|
||||
dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
|
||||
dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
|
||||
length, BNXT_RE_MAX_MR_SIZE);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
|
|
@ -243,7 +243,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
|
|||
u32 sw_cons, raw_cons;
|
||||
u16 type;
|
||||
int budget = nq->budget;
|
||||
u64 q_handle;
|
||||
uintptr_t q_handle;
|
||||
|
||||
/* Service the NQ until empty */
|
||||
raw_cons = hwq->cons;
|
||||
|
@ -526,7 +526,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
|||
|
||||
/* Configure the request */
|
||||
req.dpi = cpu_to_le32(srq->dpi->dpi);
|
||||
req.srq_handle = cpu_to_le64(srq);
|
||||
req.srq_handle = cpu_to_le64((uintptr_t)srq);
|
||||
|
||||
req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
|
||||
pbl = &srq->hwq.pbl[PBL_LVL_0];
|
||||
|
|
|
@ -4860,21 +4860,21 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
|
|||
return ib_register_device(&dev->ib_dev, NULL);
|
||||
}
|
||||
|
||||
static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
destroy_umrc_res(dev);
|
||||
}
|
||||
|
||||
static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
ib_unregister_device(&dev->ib_dev);
|
||||
}
|
||||
|
||||
static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
|
||||
static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
return create_umr_res(dev);
|
||||
}
|
||||
|
||||
static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
destroy_umrc_res(dev);
|
||||
}
|
||||
|
||||
static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
init_delay_drop(dev);
|
||||
|
@ -4982,12 +4982,15 @@ static const struct mlx5_ib_profile pf_profile = {
|
|||
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
|
||||
mlx5_ib_stage_bfrag_init,
|
||||
mlx5_ib_stage_bfrag_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
|
||||
NULL,
|
||||
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
||||
mlx5_ib_stage_ib_reg_init,
|
||||
mlx5_ib_stage_ib_reg_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
|
||||
mlx5_ib_stage_umr_res_init,
|
||||
mlx5_ib_stage_umr_res_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
|
||||
mlx5_ib_stage_post_ib_reg_umr_init,
|
||||
NULL),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
|
||||
mlx5_ib_stage_delay_drop_init,
|
||||
mlx5_ib_stage_delay_drop_cleanup),
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue