Merge branch 'topic/hda-pci-ids' into for-next
Pull cleanup of HD-audio PCI IDs. Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
commit
21634f0f30
1
.mailmap
1
.mailmap
|
@ -246,6 +246,7 @@ John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
|||
John Stultz <johnstul@us.ibm.com>
|
||||
<jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com>
|
||||
<jon.toppins+linux@gmail.com> <jtoppins@redhat.com>
|
||||
Jonas Gorski <jonas.gorski@gmail.com> <jogo@openwrt.org>
|
||||
Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
|
||||
<josh@joshtriplett.org> <josh@freedesktop.org>
|
||||
<josh@joshtriplett.org> <josh@kernel.org>
|
||||
|
|
|
@ -994,7 +994,7 @@ Description: This file shows the amount of physical memory needed
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/rpm_lvl
|
||||
What: /sys/bus/platform/devices/*.ufs/rpm_lvl
|
||||
Date: September 2014
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry could be used to set or show the UFS device
|
||||
runtime power management level. The current driver
|
||||
implementation supports 7 levels with next target states:
|
||||
|
@ -1021,7 +1021,7 @@ Description: This entry could be used to set or show the UFS device
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
|
||||
What: /sys/bus/platform/devices/*.ufs/rpm_target_dev_state
|
||||
Date: February 2018
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry shows the target power mode of an UFS device
|
||||
for the chosen runtime power management level.
|
||||
|
||||
|
@ -1030,7 +1030,7 @@ Description: This entry shows the target power mode of an UFS device
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_link_state
|
||||
What: /sys/bus/platform/devices/*.ufs/rpm_target_link_state
|
||||
Date: February 2018
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry shows the target state of an UFS UIC link
|
||||
for the chosen runtime power management level.
|
||||
|
||||
|
@ -1039,7 +1039,7 @@ Description: This entry shows the target state of an UFS UIC link
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/spm_lvl
|
||||
What: /sys/bus/platform/devices/*.ufs/spm_lvl
|
||||
Date: September 2014
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry could be used to set or show the UFS device
|
||||
system power management level. The current driver
|
||||
implementation supports 7 levels with next target states:
|
||||
|
@ -1066,7 +1066,7 @@ Description: This entry could be used to set or show the UFS device
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state
|
||||
What: /sys/bus/platform/devices/*.ufs/spm_target_dev_state
|
||||
Date: February 2018
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry shows the target power mode of an UFS device
|
||||
for the chosen system power management level.
|
||||
|
||||
|
@ -1075,7 +1075,7 @@ Description: This entry shows the target power mode of an UFS device
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/spm_target_link_state
|
||||
What: /sys/bus/platform/devices/*.ufs/spm_target_link_state
|
||||
Date: February 2018
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry shows the target state of an UFS UIC link
|
||||
for the chosen system power management level.
|
||||
|
||||
|
@ -1084,7 +1084,7 @@ Description: This entry shows the target state of an UFS UIC link
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_enable
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/monitor_enable
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the status of performance monitor enablement
|
||||
and it can be used to start/stop the monitor. When the monitor
|
||||
is stopped, the performance data collected is also cleared.
|
||||
|
@ -1092,7 +1092,7 @@ Description: This file shows the status of performance monitor enablement
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_chunk_size
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/monitor_chunk_size
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file tells the monitor to focus on requests transferring
|
||||
data of specific chunk size (in Bytes). 0 means any chunk size.
|
||||
It can only be changed when monitor is disabled.
|
||||
|
@ -1100,7 +1100,7 @@ Description: This file tells the monitor to focus on requests transferring
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_sectors
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_total_sectors
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how many sectors (in 512 Bytes) have been
|
||||
sent from device to host after monitor gets started.
|
||||
|
||||
|
@ -1109,7 +1109,7 @@ Description: This file shows how many sectors (in 512 Bytes) have been
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_busy
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_total_busy
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how long (in micro seconds) has been spent
|
||||
sending data from device to host after monitor gets started.
|
||||
|
||||
|
@ -1118,7 +1118,7 @@ Description: This file shows how long (in micro seconds) has been spent
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_nr_requests
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_nr_requests
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how many read requests have been sent after
|
||||
monitor gets started.
|
||||
|
||||
|
@ -1127,7 +1127,7 @@ Description: This file shows how many read requests have been sent after
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_max
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_max
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the maximum latency (in micro seconds) of
|
||||
read requests after monitor gets started.
|
||||
|
||||
|
@ -1136,7 +1136,7 @@ Description: This file shows the maximum latency (in micro seconds) of
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_min
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_min
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the minimum latency (in micro seconds) of
|
||||
read requests after monitor gets started.
|
||||
|
||||
|
@ -1145,7 +1145,7 @@ Description: This file shows the minimum latency (in micro seconds) of
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_avg
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_avg
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the average latency (in micro seconds) of
|
||||
read requests after monitor gets started.
|
||||
|
||||
|
@ -1154,7 +1154,7 @@ Description: This file shows the average latency (in micro seconds) of
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_sum
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_sum
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the total latency (in micro seconds) of
|
||||
read requests sent after monitor gets started.
|
||||
|
||||
|
@ -1163,7 +1163,7 @@ Description: This file shows the total latency (in micro seconds) of
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_sectors
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_total_sectors
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how many sectors (in 512 Bytes) have been sent
|
||||
from host to device after monitor gets started.
|
||||
|
||||
|
@ -1172,7 +1172,7 @@ Description: This file shows how many sectors (in 512 Bytes) have been sent
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_busy
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_total_busy
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how long (in micro seconds) has been spent
|
||||
sending data from host to device after monitor gets started.
|
||||
|
||||
|
@ -1181,7 +1181,7 @@ Description: This file shows how long (in micro seconds) has been spent
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_nr_requests
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_nr_requests
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how many write requests have been sent after
|
||||
monitor gets started.
|
||||
|
||||
|
@ -1190,7 +1190,7 @@ Description: This file shows how many write requests have been sent after
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_max
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_max
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the maximum latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
|
@ -1199,7 +1199,7 @@ Description: This file shows the maximum latency (in micro seconds) of write
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_min
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_min
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the minimum latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
|
@ -1208,7 +1208,7 @@ Description: This file shows the minimum latency (in micro seconds) of write
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_avg
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_avg
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the average latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
|
@ -1217,7 +1217,7 @@ Description: This file shows the average latency (in micro seconds) of write
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_sum
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_sum
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the total latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
|
@ -1226,7 +1226,7 @@ Description: This file shows the total latency (in micro seconds) of write
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_presv_us_en
|
||||
What: /sys/bus/platform/devices/*.ufs/device_descriptor/wb_presv_us_en
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows if preserve user-space was configured
|
||||
|
||||
The file is read only.
|
||||
|
@ -1234,7 +1234,7 @@ Description: This entry shows if preserve user-space was configured
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_shared_alloc_units
|
||||
What: /sys/bus/platform/devices/*.ufs/device_descriptor/wb_shared_alloc_units
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the shared allocated units of WB buffer
|
||||
|
||||
The file is read only.
|
||||
|
@ -1242,7 +1242,7 @@ Description: This entry shows the shared allocated units of WB buffer
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_type
|
||||
What: /sys/bus/platform/devices/*.ufs/device_descriptor/wb_type
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the configured WB type.
|
||||
0x1 for shared buffer mode. 0x0 for dedicated buffer mode.
|
||||
|
||||
|
@ -1251,7 +1251,7 @@ Description: This entry shows the configured WB type.
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_buff_cap_adj
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_buff_cap_adj
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the total user-space decrease in shared
|
||||
buffer mode.
|
||||
The value of this parameter is 3 for TLC NAND when SLC mode
|
||||
|
@ -1262,7 +1262,7 @@ Description: This entry shows the total user-space decrease in shared
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_max_alloc_units
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_max_alloc_units
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the Maximum total WriteBooster Buffer size
|
||||
which is supported by the entire device.
|
||||
|
||||
|
@ -1271,7 +1271,7 @@ Description: This entry shows the Maximum total WriteBooster Buffer size
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_max_wb_luns
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_max_wb_luns
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the maximum number of luns that can support
|
||||
WriteBooster.
|
||||
|
||||
|
@ -1280,7 +1280,7 @@ Description: This entry shows the maximum number of luns that can support
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_sup_red_type
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_sup_red_type
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: The supportability of user space reduction mode
|
||||
and preserve user space mode.
|
||||
00h: WriteBooster Buffer can be configured only in
|
||||
|
@ -1295,7 +1295,7 @@ Description: The supportability of user space reduction mode
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_sup_wb_type
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_sup_wb_type
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: The supportability of WriteBooster Buffer type.
|
||||
|
||||
=== ==========================================================
|
||||
|
@ -1310,7 +1310,7 @@ Description: The supportability of WriteBooster Buffer type.
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/flags/wb_enable
|
||||
What: /sys/bus/platform/devices/*.ufs/flags/wb_enable
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the status of WriteBooster.
|
||||
|
||||
== ============================
|
||||
|
@ -1323,7 +1323,7 @@ Description: This entry shows the status of WriteBooster.
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/flags/wb_flush_en
|
||||
What: /sys/bus/platform/devices/*.ufs/flags/wb_flush_en
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows if flush is enabled.
|
||||
|
||||
== =================================
|
||||
|
@ -1336,7 +1336,7 @@ Description: This entry shows if flush is enabled.
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/flags/wb_flush_during_h8
|
||||
What: /sys/bus/platform/devices/*.ufs/flags/wb_flush_during_h8
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: Flush WriteBooster Buffer during hibernate state.
|
||||
|
||||
== =================================================
|
||||
|
@ -1351,7 +1351,7 @@ Description: Flush WriteBooster Buffer during hibernate state.
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_avail_buf
|
||||
What: /sys/bus/platform/devices/*.ufs/attributes/wb_avail_buf
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the amount of unused WriteBooster buffer
|
||||
available.
|
||||
|
||||
|
@ -1360,7 +1360,7 @@ Description: This entry shows the amount of unused WriteBooster buffer
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_cur_buf
|
||||
What: /sys/bus/platform/devices/*.ufs/attributes/wb_cur_buf
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the amount of unused current buffer.
|
||||
|
||||
The file is read only.
|
||||
|
@ -1368,7 +1368,7 @@ Description: This entry shows the amount of unused current buffer.
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_flush_status
|
||||
What: /sys/bus/platform/devices/*.ufs/attributes/wb_flush_status
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the flush operation status.
|
||||
|
||||
|
||||
|
@ -1385,7 +1385,7 @@ Description: This entry shows the flush operation status.
|
|||
What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_life_time_est
|
||||
What: /sys/bus/platform/devices/*.ufs/attributes/wb_life_time_est
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows an indication of the WriteBooster Buffer
|
||||
lifetime based on the amount of performed program/erase cycles
|
||||
|
||||
|
@ -1399,7 +1399,7 @@ Description: This entry shows an indication of the WriteBooster Buffer
|
|||
|
||||
What: /sys/class/scsi_device/*/device/unit_descriptor/wb_buf_alloc_units
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the configured size of WriteBooster buffer.
|
||||
0400h corresponds to 4GB.
|
||||
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/loongson,ls1x-wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Loongson-1 Watchdog Timer
|
||||
|
||||
maintainers:
|
||||
- Keguang Zhang <keguang.zhang@gmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- loongson,ls1b-wdt
|
||||
- loongson,ls1c-wdt
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/loongson,ls1x-clk.h>
|
||||
watchdog: watchdog@1fe5c060 {
|
||||
compatible = "loongson,ls1b-wdt";
|
||||
reg = <0x1fe5c060 0xc>;
|
||||
|
||||
clocks = <&clkc LS1X_CLKID_APB>;
|
||||
};
|
|
@ -98,7 +98,7 @@ If you aren't subscribed to netdev and/or are simply unsure if
|
|||
repository link above for any new networking-related commits. You may
|
||||
also check the following website for the current status:
|
||||
|
||||
http://vger.kernel.org/~davem/net-next.html
|
||||
https://patchwork.hopto.org/net-next.html
|
||||
|
||||
The ``net`` tree continues to collect fixes for the vX.Y content, and is
|
||||
fed back to Linus at regular (~weekly) intervals. Meaning that the
|
||||
|
|
|
@ -49,7 +49,7 @@ The following keys are defined:
|
|||
privileged ISA, with the following known exceptions (more exceptions may be
|
||||
added, but only if it can be demonstrated that the user ABI is not broken):
|
||||
|
||||
* The :fence.i: instruction cannot be directly executed by userspace
|
||||
* The ``fence.i`` instruction cannot be directly executed by userspace
|
||||
programs (it may still be executed in userspace via a
|
||||
kernel-controlled mechanism such as the vDSO).
|
||||
|
||||
|
|
|
@ -187,7 +187,8 @@ WMI method BatteryeRawAnalytics()
|
|||
|
||||
Returns a buffer usually containg 12 blocks of analytics data.
|
||||
Those blocks contain:
|
||||
- block number starting with 0 (u8)
|
||||
|
||||
- a block number starting with 0 (u8)
|
||||
- 31 bytes of unknown data
|
||||
|
||||
.. note::
|
||||
|
|
15
MAINTAINERS
15
MAINTAINERS
|
@ -4121,6 +4121,13 @@ F: Documentation/devicetree/bindings/spi/brcm,bcm63xx-hsspi.yaml
|
|||
F: drivers/spi/spi-bcm63xx-hsspi.c
|
||||
F: drivers/spi/spi-bcmbca-hsspi.c
|
||||
|
||||
BROADCOM BCM6348/BCM6358 SPI controller DRIVER
|
||||
M: Jonas Gorski <jonas.gorski@gmail.com>
|
||||
L: linux-spi@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
F: Documentation/devicetree/bindings/spi/spi-bcm63xx.txt
|
||||
F: drivers/spi/spi-bcm63xx.c
|
||||
|
||||
BROADCOM ETHERNET PHY DRIVERS
|
||||
M: Florian Fainelli <florian.fainelli@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
|
@ -8672,8 +8679,11 @@ S: Maintained
|
|||
F: drivers/input/touchscreen/resistive-adc-touch.c
|
||||
|
||||
GENERIC STRING LIBRARY
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
R: Andy Shevchenko <andy@kernel.org>
|
||||
S: Maintained
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
F: include/linux/string.h
|
||||
F: include/linux/string_choices.h
|
||||
F: include/linux/string_helpers.h
|
||||
|
@ -13968,7 +13978,7 @@ T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
|
|||
F: drivers/soc/microchip/
|
||||
|
||||
MICROCHIP SPI DRIVER
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
M: Ryan Wanner <ryan.wanner@microchip.com>
|
||||
S: Supported
|
||||
F: drivers/spi/spi-atmel.*
|
||||
|
||||
|
@ -17543,6 +17553,7 @@ QUALCOMM ETHQOS ETHERNET DRIVER
|
|||
M: Vinod Koul <vkoul@kernel.org>
|
||||
R: Bhupesh Sharma <bhupesh.sharma@linaro.org>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/qcom,ethqos.yaml
|
||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -197,6 +197,8 @@ config ARM64
|
|||
!CC_OPTIMIZE_FOR_SIZE)
|
||||
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
|
||||
if DYNAMIC_FTRACE_WITH_ARGS
|
||||
select HAVE_SAMPLE_FTRACE_DIRECT
|
||||
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_FAST_GUP
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
|
|
|
@ -211,6 +211,10 @@ static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs
|
|||
{
|
||||
return ret_regs->fp;
|
||||
}
|
||||
|
||||
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
unsigned long frame_pointer);
|
||||
|
||||
#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
#endif
|
||||
|
||||
|
|
|
@ -85,4 +85,7 @@ static inline int syscall_get_arch(struct task_struct *task)
|
|||
return AUDIT_ARCH_AARCH64;
|
||||
}
|
||||
|
||||
int syscall_trace_enter(struct pt_regs *regs);
|
||||
void syscall_trace_exit(struct pt_regs *regs);
|
||||
|
||||
#endif /* __ASM_SYSCALL_H */
|
||||
|
|
|
@ -75,9 +75,6 @@ static inline bool has_syscall_work(unsigned long flags)
|
|||
return unlikely(flags & _TIF_SYSCALL_WORK);
|
||||
}
|
||||
|
||||
int syscall_trace_enter(struct pt_regs *regs);
|
||||
void syscall_trace_exit(struct pt_regs *regs);
|
||||
|
||||
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
||||
const syscall_fn_t syscall_table[])
|
||||
{
|
||||
|
|
|
@ -28,8 +28,10 @@
|
|||
|
||||
struct sigcontext {
|
||||
struct user_regs_struct regs; /* needs to be first */
|
||||
struct __or1k_fpu_state fpu;
|
||||
unsigned long oldmask;
|
||||
union {
|
||||
unsigned long fpcsr;
|
||||
unsigned long oldmask; /* unused */
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* __ASM_OPENRISC_SIGCONTEXT_H */
|
||||
|
|
|
@ -50,7 +50,7 @@ static int restore_sigcontext(struct pt_regs *regs,
|
|||
err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
|
||||
err |= __copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long));
|
||||
err |= __copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long));
|
||||
err |= __copy_from_user(®s->fpcsr, &sc->fpu.fpcsr, sizeof(unsigned long));
|
||||
err |= __copy_from_user(®s->fpcsr, &sc->fpcsr, sizeof(unsigned long));
|
||||
|
||||
/* make sure the SM-bit is cleared so user-mode cannot fool us */
|
||||
regs->sr &= ~SPR_SR_SM;
|
||||
|
@ -113,7 +113,7 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
|||
err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
|
||||
err |= __copy_to_user(&sc->regs.pc, ®s->pc, sizeof(unsigned long));
|
||||
err |= __copy_to_user(&sc->regs.sr, ®s->sr, sizeof(unsigned long));
|
||||
err |= __copy_to_user(&sc->fpu.fpcsr, ®s->fpcsr, sizeof(unsigned long));
|
||||
err |= __copy_to_user(&sc->fpcsr, ®s->fpcsr, sizeof(unsigned long));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -136,12 +136,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
|
||||
{
|
||||
BUG();
|
||||
|
|
|
@ -263,11 +263,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
|
|||
(_PAGE_PTE | H_PAGE_THP_HUGE));
|
||||
}
|
||||
|
||||
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
|
||||
{
|
||||
return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
|
||||
}
|
||||
|
||||
static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
|
||||
{
|
||||
return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
|
||||
|
|
|
@ -132,6 +132,11 @@ static inline int get_region_id(unsigned long ea)
|
|||
return region_id;
|
||||
}
|
||||
|
||||
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
|
||||
{
|
||||
return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
|
||||
}
|
||||
|
||||
#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
|
||||
#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
|
||||
static inline int hash__p4d_bad(p4d_t p4d)
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
* Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -66,7 +67,7 @@
|
|||
#define SPECIAL_EXC_LOAD(reg, name) \
|
||||
ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
|
||||
|
||||
special_reg_save:
|
||||
SYM_CODE_START_LOCAL(special_reg_save)
|
||||
/*
|
||||
* We only need (or have stack space) to save this stuff if
|
||||
* we interrupted the kernel.
|
||||
|
@ -131,8 +132,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
|||
SPECIAL_EXC_STORE(r10,CSRR1)
|
||||
|
||||
blr
|
||||
SYM_CODE_END(special_reg_save)
|
||||
|
||||
ret_from_level_except:
|
||||
SYM_CODE_START_LOCAL(ret_from_level_except)
|
||||
ld r3,_MSR(r1)
|
||||
andi. r3,r3,MSR_PR
|
||||
beq 1f
|
||||
|
@ -206,6 +208,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
|||
mtxer r11
|
||||
|
||||
blr
|
||||
SYM_CODE_END(ret_from_level_except)
|
||||
|
||||
.macro ret_from_level srr0 srr1 paca_ex scratch
|
||||
bl ret_from_level_except
|
||||
|
@ -232,13 +235,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
|||
mfspr r13,\scratch
|
||||
.endm
|
||||
|
||||
ret_from_crit_except:
|
||||
SYM_CODE_START_LOCAL(ret_from_crit_except)
|
||||
ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
|
||||
rfci
|
||||
SYM_CODE_END(ret_from_crit_except)
|
||||
|
||||
ret_from_mc_except:
|
||||
SYM_CODE_START_LOCAL(ret_from_mc_except)
|
||||
ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
|
||||
rfmci
|
||||
SYM_CODE_END(ret_from_mc_except)
|
||||
|
||||
/* Exception prolog code for all exceptions */
|
||||
#define EXCEPTION_PROLOG(n, intnum, type, addition) \
|
||||
|
@ -978,20 +983,22 @@ masked_interrupt_book3e_0x2c0:
|
|||
* r14 and r15 containing the fault address and error code, with the
|
||||
* original values stashed away in the PACA
|
||||
*/
|
||||
storage_fault_common:
|
||||
SYM_CODE_START_LOCAL(storage_fault_common)
|
||||
addi r3,r1,STACK_INT_FRAME_REGS
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
SYM_CODE_END(storage_fault_common)
|
||||
|
||||
/*
|
||||
* Alignment exception doesn't fit entirely in the 0x100 bytes so it
|
||||
* continues here.
|
||||
*/
|
||||
alignment_more:
|
||||
SYM_CODE_START_LOCAL(alignment_more)
|
||||
addi r3,r1,STACK_INT_FRAME_REGS
|
||||
bl alignment_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
SYM_CODE_END(alignment_more)
|
||||
|
||||
/*
|
||||
* Trampolines used when spotting a bad kernel stack pointer in
|
||||
|
@ -1030,8 +1037,7 @@ BAD_STACK_TRAMPOLINE(0xe00)
|
|||
BAD_STACK_TRAMPOLINE(0xf00)
|
||||
BAD_STACK_TRAMPOLINE(0xf20)
|
||||
|
||||
.globl bad_stack_book3e
|
||||
bad_stack_book3e:
|
||||
_GLOBAL(bad_stack_book3e)
|
||||
/* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
|
||||
mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */
|
||||
ld r1,PACAEMERGSP(r13)
|
||||
|
@ -1285,8 +1291,7 @@ have_hes:
|
|||
* ever takes any parameters, the SCOM code must also be updated to
|
||||
* provide them.
|
||||
*/
|
||||
.globl a2_tlbinit_code_start
|
||||
a2_tlbinit_code_start:
|
||||
_GLOBAL(a2_tlbinit_code_start)
|
||||
|
||||
ori r11,r3,MAS0_WQ_ALLWAYS
|
||||
oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
|
||||
|
@ -1479,8 +1484,7 @@ _GLOBAL(book3e_secondary_thread_init)
|
|||
mflr r28
|
||||
b 3b
|
||||
|
||||
.globl init_core_book3e
|
||||
init_core_book3e:
|
||||
_GLOBAL(init_core_book3e)
|
||||
/* Establish the interrupt vector base */
|
||||
tovirt(r2,r2)
|
||||
LOAD_REG_ADDR(r3, interrupt_base_book3e)
|
||||
|
@ -1488,7 +1492,7 @@ init_core_book3e:
|
|||
sync
|
||||
blr
|
||||
|
||||
init_thread_book3e:
|
||||
SYM_CODE_START_LOCAL(init_thread_book3e)
|
||||
lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
|
||||
mtspr SPRN_EPCR,r3
|
||||
|
||||
|
@ -1502,6 +1506,7 @@ init_thread_book3e:
|
|||
mtspr SPRN_TSR,r3
|
||||
|
||||
blr
|
||||
SYM_CODE_END(init_thread_book3e)
|
||||
|
||||
_GLOBAL(__setup_base_ivors)
|
||||
SET_IVOR(0, 0x020) /* Critical Input */
|
||||
|
|
|
@ -364,26 +364,27 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
|
|||
|
||||
static int ssb_prctl_get(struct task_struct *task)
|
||||
{
|
||||
if (stf_enabled_flush_types == STF_BARRIER_NONE)
|
||||
/*
|
||||
* We don't have an explicit signal from firmware that we're
|
||||
* vulnerable or not, we only have certain CPU revisions that
|
||||
* are known to be vulnerable.
|
||||
*
|
||||
* We assume that if we're on another CPU, where the barrier is
|
||||
* NONE, then we are not vulnerable.
|
||||
*/
|
||||
/*
|
||||
* The STF_BARRIER feature is on by default, so if it's off that means
|
||||
* firmware has explicitly said the CPU is not vulnerable via either
|
||||
* the hypercall or device tree.
|
||||
*/
|
||||
if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
else
|
||||
/*
|
||||
* If we do have a barrier type then we are vulnerable. The
|
||||
* barrier is not a global or per-process mitigation, so the
|
||||
* only value we can report here is PR_SPEC_ENABLE, which
|
||||
* appears as "vulnerable" in /proc.
|
||||
*/
|
||||
return PR_SPEC_ENABLE;
|
||||
|
||||
return -EINVAL;
|
||||
/*
|
||||
* If the system's CPU has no known barrier (see setup_stf_barrier())
|
||||
* then assume that the CPU is not vulnerable.
|
||||
*/
|
||||
if (stf_enabled_flush_types == STF_BARRIER_NONE)
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
|
||||
/*
|
||||
* Otherwise the CPU is vulnerable. The barrier is not a global or
|
||||
* per-process mitigation, so the only value that can be reported here
|
||||
* is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
|
||||
*/
|
||||
return PR_SPEC_ENABLE;
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
|
|
|
@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
|
|||
|
||||
static long native_hpte_remove(unsigned long hpte_group)
|
||||
{
|
||||
unsigned long hpte_v, flags;
|
||||
struct hash_pte *hptep;
|
||||
int i;
|
||||
int slot_offset;
|
||||
unsigned long hpte_v;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
DBG_LOW(" remove(group=%lx)\n", hpte_group);
|
||||
|
||||
|
@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group)
|
|||
slot_offset &= 0x7;
|
||||
}
|
||||
|
||||
if (i == HPTES_PER_GROUP)
|
||||
return -1;
|
||||
if (i == HPTES_PER_GROUP) {
|
||||
i = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Invalidate the hpte. NOTE: this also unlocks it */
|
||||
release_hpte_lock();
|
||||
hptep->v = 0;
|
||||
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
|
|
@ -317,19 +317,14 @@ void __init riscv_fill_hwcap(void)
|
|||
#undef SET_ISA_EXT_MAP
|
||||
}
|
||||
|
||||
/*
|
||||
* Linux requires the following extensions, so we may as well
|
||||
* always set them.
|
||||
*/
|
||||
set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
|
||||
set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
|
||||
|
||||
/*
|
||||
* These ones were as they were part of the base ISA when the
|
||||
* port & dt-bindings were upstreamed, and so can be set
|
||||
* unconditionally where `i` is in riscv,isa on DT systems.
|
||||
*/
|
||||
if (acpi_disabled) {
|
||||
set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
|
||||
set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
|
||||
set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
|
||||
set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
|
||||
}
|
||||
|
|
|
@ -1346,7 +1346,7 @@ static void __init reserve_crashkernel(void)
|
|||
*/
|
||||
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
|
||||
search_start,
|
||||
min(search_end, (unsigned long) SZ_4G));
|
||||
min(search_end, (unsigned long)(SZ_4G - 1)));
|
||||
if (crash_base == 0) {
|
||||
/* Try again without restricting region to 32bit addressible memory */
|
||||
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
|
||||
|
|
|
@ -69,7 +69,7 @@ struct rv_jit_context {
|
|||
struct bpf_prog *prog;
|
||||
u16 *insns; /* RV insns */
|
||||
int ninsns;
|
||||
int body_len;
|
||||
int prologue_len;
|
||||
int epilogue_offset;
|
||||
int *offset; /* BPF to RV */
|
||||
int nexentries;
|
||||
|
@ -216,8 +216,8 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
|
|||
int from, to;
|
||||
|
||||
off++; /* BPF branch is from PC+1, RV is from PC */
|
||||
from = (insn > 0) ? ctx->offset[insn - 1] : 0;
|
||||
to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
|
||||
from = (insn > 0) ? ctx->offset[insn - 1] : ctx->prologue_len;
|
||||
to = (insn + off > 0) ? ctx->offset[insn + off - 1] : ctx->prologue_len;
|
||||
return ninsns_rvoff(to - from);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
unsigned int prog_size = 0, extable_size = 0;
|
||||
bool tmp_blinded = false, extra_pass = false;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
int pass = 0, prev_ninsns = 0, prologue_len, i;
|
||||
int pass = 0, prev_ninsns = 0, i;
|
||||
struct rv_jit_data *jit_data;
|
||||
struct rv_jit_context *ctx;
|
||||
|
||||
|
@ -83,6 +83,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
if (build_body(ctx, extra_pass, NULL)) {
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
prev_ninsns += 32;
|
||||
ctx->offset[i] = prev_ninsns;
|
||||
|
@ -91,12 +97,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
for (i = 0; i < NR_JIT_ITERATIONS; i++) {
|
||||
pass++;
|
||||
ctx->ninsns = 0;
|
||||
|
||||
bpf_jit_build_prologue(ctx);
|
||||
ctx->prologue_len = ctx->ninsns;
|
||||
|
||||
if (build_body(ctx, extra_pass, ctx->offset)) {
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
ctx->body_len = ctx->ninsns;
|
||||
bpf_jit_build_prologue(ctx);
|
||||
|
||||
ctx->epilogue_offset = ctx->ninsns;
|
||||
bpf_jit_build_epilogue(ctx);
|
||||
|
||||
|
@ -162,10 +171,8 @@ skip_init_ctx:
|
|||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
bpf_jit_binary_lock_ro(jit_data->header);
|
||||
prologue_len = ctx->epilogue_offset - ctx->body_len;
|
||||
for (i = 0; i < prog->len; i++)
|
||||
ctx->offset[i] = ninsns_rvoff(prologue_len +
|
||||
ctx->offset[i]);
|
||||
ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
|
||||
bpf_prog_fill_jited_linfo(prog, ctx->offset);
|
||||
out_offset:
|
||||
kfree(ctx->offset);
|
||||
|
|
|
@ -108,13 +108,13 @@ int systemasic_irq_demux(int irq)
|
|||
__u32 j, bit;
|
||||
|
||||
switch (irq) {
|
||||
case 13:
|
||||
case 13 + 16:
|
||||
level = 0;
|
||||
break;
|
||||
case 11:
|
||||
case 11 + 16:
|
||||
level = 1;
|
||||
break;
|
||||
case 9:
|
||||
case 9 + 16:
|
||||
level = 2;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -389,10 +389,10 @@ static unsigned char irl2irq[HL_NR_IRL];
|
|||
|
||||
static int highlander_irq_demux(int irq)
|
||||
{
|
||||
if (irq >= HL_NR_IRL || irq < 0 || !irl2irq[irq])
|
||||
if (irq >= HL_NR_IRL + 16 || irq < 16 || !irl2irq[irq - 16])
|
||||
return irq;
|
||||
|
||||
return irl2irq[irq];
|
||||
return irl2irq[irq - 16];
|
||||
}
|
||||
|
||||
static void __init highlander_init_irq(void)
|
||||
|
|
|
@ -117,10 +117,10 @@ static unsigned char irl2irq[R2D_NR_IRL];
|
|||
|
||||
int rts7751r2d_irq_demux(int irq)
|
||||
{
|
||||
if (irq >= R2D_NR_IRL || irq < 0 || !irl2irq[irq])
|
||||
if (irq >= R2D_NR_IRL + 16 || irq < 16 || !irl2irq[irq - 16])
|
||||
return irq;
|
||||
|
||||
return irl2irq[irq];
|
||||
return irl2irq[irq - 16];
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -29,9 +29,9 @@ endchoice
|
|||
config HD64461_IRQ
|
||||
int "HD64461 IRQ"
|
||||
depends on HD64461
|
||||
default "36"
|
||||
default "52"
|
||||
help
|
||||
The default setting of the HD64461 IRQ is 36.
|
||||
The default setting of the HD64461 IRQ is 52.
|
||||
|
||||
Do not change this unless you know what you are doing.
|
||||
|
||||
|
|
|
@ -229,7 +229,7 @@
|
|||
#define HD64461_NIMR HD64461_IO_OFFSET(0x5002)
|
||||
|
||||
#define HD64461_IRQBASE OFFCHIP_IRQ_BASE
|
||||
#define OFFCHIP_IRQ_BASE 64
|
||||
#define OFFCHIP_IRQ_BASE (64 + 16)
|
||||
#define HD64461_IRQ_NUM 16
|
||||
|
||||
#define HD64461_IRQ_UART (HD64461_IRQBASE+5)
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
unsigned long __xchg_u32(volatile u32 *m, u32 new);
|
||||
void __xchg_called_with_bad_pointer(void);
|
||||
|
||||
static inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
|
||||
static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 4:
|
||||
|
|
|
@ -87,7 +87,7 @@ xchg16(__volatile__ unsigned short *m, unsigned short val)
|
|||
return (load32 & mask) >> bit_shift;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
static __always_inline unsigned long
|
||||
__arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
|
||||
{
|
||||
switch (size) {
|
||||
|
|
|
@ -437,7 +437,7 @@ void __init arch_cpu_finalize_init(void)
|
|||
os_check_bugs();
|
||||
}
|
||||
|
||||
void apply_ibt_endbr(s32 *start, s32 *end)
|
||||
void apply_seal_endbr(s32 *start, s32 *end)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -719,26 +719,6 @@ SYM_CODE_START(__switch_to_asm)
|
|||
SYM_CODE_END(__switch_to_asm)
|
||||
.popsection
|
||||
|
||||
/*
|
||||
* The unwinder expects the last frame on the stack to always be at the same
|
||||
* offset from the end of the page, which allows it to validate the stack.
|
||||
* Calling schedule_tail() directly would break that convention because its an
|
||||
* asmlinkage function so its argument has to be pushed on the stack. This
|
||||
* wrapper creates a proper "end of stack" frame header before the call.
|
||||
*/
|
||||
.pushsection .text, "ax"
|
||||
SYM_FUNC_START(schedule_tail_wrapper)
|
||||
FRAME_BEGIN
|
||||
|
||||
pushl %eax
|
||||
call schedule_tail
|
||||
popl %eax
|
||||
|
||||
FRAME_END
|
||||
RET
|
||||
SYM_FUNC_END(schedule_tail_wrapper)
|
||||
.popsection
|
||||
|
||||
/*
|
||||
* A newly forked process directly context switches into this address.
|
||||
*
|
||||
|
@ -747,29 +727,22 @@ SYM_FUNC_END(schedule_tail_wrapper)
|
|||
* edi: kernel thread arg
|
||||
*/
|
||||
.pushsection .text, "ax"
|
||||
SYM_CODE_START(ret_from_fork)
|
||||
call schedule_tail_wrapper
|
||||
SYM_CODE_START(ret_from_fork_asm)
|
||||
movl %esp, %edx /* regs */
|
||||
|
||||
testl %ebx, %ebx
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
/* return address for the stack unwinder */
|
||||
pushl $.Lsyscall_32_done
|
||||
|
||||
2:
|
||||
/* When we fork, we trace the syscall return in the child, too. */
|
||||
movl %esp, %eax
|
||||
call syscall_exit_to_user_mode
|
||||
jmp .Lsyscall_32_done
|
||||
FRAME_BEGIN
|
||||
/* prev already in EAX */
|
||||
movl %ebx, %ecx /* fn */
|
||||
pushl %edi /* fn_arg */
|
||||
call ret_from_fork
|
||||
addl $4, %esp
|
||||
FRAME_END
|
||||
|
||||
/* kernel thread */
|
||||
1: movl %edi, %eax
|
||||
CALL_NOSPEC ebx
|
||||
/*
|
||||
* A kernel thread is allowed to return here after successfully
|
||||
* calling kernel_execve(). Exit to userspace to complete the execve()
|
||||
* syscall.
|
||||
*/
|
||||
movl $0, PT_EAX(%esp)
|
||||
jmp 2b
|
||||
SYM_CODE_END(ret_from_fork)
|
||||
RET
|
||||
SYM_CODE_END(ret_from_fork_asm)
|
||||
.popsection
|
||||
|
||||
SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
|
|
|
@ -284,36 +284,19 @@ SYM_FUNC_END(__switch_to_asm)
|
|||
* r12: kernel thread arg
|
||||
*/
|
||||
.pushsection .text, "ax"
|
||||
__FUNC_ALIGN
|
||||
SYM_CODE_START_NOALIGN(ret_from_fork)
|
||||
UNWIND_HINT_END_OF_STACK
|
||||
SYM_CODE_START(ret_from_fork_asm)
|
||||
UNWIND_HINT_REGS
|
||||
ANNOTATE_NOENDBR // copy_thread
|
||||
CALL_DEPTH_ACCOUNT
|
||||
movq %rax, %rdi
|
||||
call schedule_tail /* rdi: 'prev' task parameter */
|
||||
|
||||
testq %rbx, %rbx /* from kernel_thread? */
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
movq %rax, %rdi /* prev */
|
||||
movq %rsp, %rsi /* regs */
|
||||
movq %rbx, %rdx /* fn */
|
||||
movq %r12, %rcx /* fn_arg */
|
||||
call ret_from_fork
|
||||
|
||||
2:
|
||||
UNWIND_HINT_REGS
|
||||
movq %rsp, %rdi
|
||||
call syscall_exit_to_user_mode /* returns with IRQs disabled */
|
||||
jmp swapgs_restore_regs_and_return_to_usermode
|
||||
|
||||
1:
|
||||
/* kernel thread */
|
||||
UNWIND_HINT_END_OF_STACK
|
||||
movq %r12, %rdi
|
||||
CALL_NOSPEC rbx
|
||||
/*
|
||||
* A kernel thread is allowed to return here after successfully
|
||||
* calling kernel_execve(). Exit to userspace to complete the execve()
|
||||
* syscall.
|
||||
*/
|
||||
movq $0, RAX(%rsp)
|
||||
jmp 2b
|
||||
SYM_CODE_END(ret_from_fork)
|
||||
SYM_CODE_END(ret_from_fork_asm)
|
||||
.popsection
|
||||
|
||||
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
|
||||
|
|
|
@ -3993,6 +3993,13 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||
struct perf_event *leader = event->group_leader;
|
||||
struct perf_event *sibling = NULL;
|
||||
|
||||
/*
|
||||
* When this memload event is also the first event (no group
|
||||
* exists yet), then there is no aux event before it.
|
||||
*/
|
||||
if (leader == event)
|
||||
return -ENODATA;
|
||||
|
||||
if (!is_mem_loads_aux_event(leader)) {
|
||||
for_each_sibling_event(sibling, leader) {
|
||||
if (is_mem_loads_aux_event(sibling))
|
||||
|
|
|
@ -96,7 +96,7 @@ extern void alternative_instructions(void);
|
|||
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
|
||||
extern void apply_retpolines(s32 *start, s32 *end);
|
||||
extern void apply_returns(s32 *start, s32 *end);
|
||||
extern void apply_ibt_endbr(s32 *start, s32 *end);
|
||||
extern void apply_seal_endbr(s32 *start, s32 *end);
|
||||
extern void apply_fineibt(s32 *start_retpoline, s32 *end_retpoine,
|
||||
s32 *start_cfi, s32 *end_cfi);
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
/*
|
||||
* Create a dummy function pointer reference to prevent objtool from marking
|
||||
* the function as needing to be "sealed" (i.e. ENDBR converted to NOP by
|
||||
* apply_ibt_endbr()).
|
||||
* apply_seal_endbr()).
|
||||
*/
|
||||
#define IBT_NOSEAL(fname) \
|
||||
".pushsection .discard.ibt_endbr_noseal\n\t" \
|
||||
|
|
|
@ -234,6 +234,10 @@
|
|||
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
|
||||
* indirect jmp/call which may be susceptible to the Spectre variant 2
|
||||
* attack.
|
||||
*
|
||||
* NOTE: these do not take kCFI into account and are thus not comparable to C
|
||||
* indirect calls, take care when using. The target of these should be an ENDBR
|
||||
* instruction irrespective of kCFI.
|
||||
*/
|
||||
.macro JMP_NOSPEC reg:req
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
|
|
|
@ -12,7 +12,9 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
|
|||
__visible struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
||||
asmlinkage void ret_from_fork(void);
|
||||
asmlinkage void ret_from_fork_asm(void);
|
||||
__visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
|
||||
int (*fn)(void *), void *fn_arg);
|
||||
|
||||
/*
|
||||
* This is the structure pointed to by thread.sp for an inactive task. The
|
||||
|
|
|
@ -778,6 +778,8 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
|
|||
|
||||
#ifdef CONFIG_X86_KERNEL_IBT
|
||||
|
||||
static void poison_cfi(void *addr);
|
||||
|
||||
static void __init_or_module poison_endbr(void *addr, bool warn)
|
||||
{
|
||||
u32 endbr, poison = gen_endbr_poison();
|
||||
|
@ -802,8 +804,11 @@ static void __init_or_module poison_endbr(void *addr, bool warn)
|
|||
|
||||
/*
|
||||
* Generated by: objtool --ibt
|
||||
*
|
||||
* Seal the functions for indirect calls by clobbering the ENDBR instructions
|
||||
* and the kCFI hash value.
|
||||
*/
|
||||
void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end)
|
||||
void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
|
||||
{
|
||||
s32 *s;
|
||||
|
||||
|
@ -812,13 +817,13 @@ void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end)
|
|||
|
||||
poison_endbr(addr, true);
|
||||
if (IS_ENABLED(CONFIG_FINEIBT))
|
||||
poison_endbr(addr - 16, false);
|
||||
poison_cfi(addr - 16);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void __init_or_module apply_ibt_endbr(s32 *start, s32 *end) { }
|
||||
void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
|
||||
|
||||
#endif /* CONFIG_X86_KERNEL_IBT */
|
||||
|
||||
|
@ -1063,6 +1068,17 @@ static int cfi_rewrite_preamble(s32 *start, s32 *end)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void cfi_rewrite_endbr(s32 *start, s32 *end)
|
||||
{
|
||||
s32 *s;
|
||||
|
||||
for (s = start; s < end; s++) {
|
||||
void *addr = (void *)s + *s;
|
||||
|
||||
poison_endbr(addr+16, false);
|
||||
}
|
||||
}
|
||||
|
||||
/* .retpoline_sites */
|
||||
static int cfi_rand_callers(s32 *start, s32 *end)
|
||||
{
|
||||
|
@ -1157,14 +1173,19 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
|||
return;
|
||||
|
||||
case CFI_FINEIBT:
|
||||
/* place the FineIBT preamble at func()-16 */
|
||||
ret = cfi_rewrite_preamble(start_cfi, end_cfi);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* rewrite the callers to target func()-16 */
|
||||
ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* now that nobody targets func()+0, remove ENDBR there */
|
||||
cfi_rewrite_endbr(start_cfi, end_cfi);
|
||||
|
||||
if (builtin)
|
||||
pr_info("Using FineIBT CFI\n");
|
||||
return;
|
||||
|
@ -1177,6 +1198,41 @@ err:
|
|||
pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
|
||||
}
|
||||
|
||||
static inline void poison_hash(void *addr)
|
||||
{
|
||||
*(u32 *)addr = 0;
|
||||
}
|
||||
|
||||
static void poison_cfi(void *addr)
|
||||
{
|
||||
switch (cfi_mode) {
|
||||
case CFI_FINEIBT:
|
||||
/*
|
||||
* __cfi_\func:
|
||||
* osp nopl (%rax)
|
||||
* subl $0, %r10d
|
||||
* jz 1f
|
||||
* ud2
|
||||
* 1: nop
|
||||
*/
|
||||
poison_endbr(addr, false);
|
||||
poison_hash(addr + fineibt_preamble_hash);
|
||||
break;
|
||||
|
||||
case CFI_KCFI:
|
||||
/*
|
||||
* __cfi_\func:
|
||||
* movl $0, %eax
|
||||
* .skip 11, 0x90
|
||||
*/
|
||||
poison_hash(addr + 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
||||
|
@ -1184,6 +1240,10 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
|||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_KERNEL_IBT
|
||||
static void poison_cfi(void *addr) { }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
||||
|
@ -1565,7 +1625,10 @@ void __init alternative_instructions(void)
|
|||
*/
|
||||
callthunks_patch_builtin_calls();
|
||||
|
||||
apply_ibt_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
|
||||
/*
|
||||
* Seal all functions that do not have their address taken.
|
||||
*/
|
||||
apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Patch to UP if other cpus not imminent. */
|
||||
|
|
|
@ -282,7 +282,6 @@ static inline void tramp_free(void *tramp) { }
|
|||
|
||||
/* Defined as markers to the end of the ftrace default trampolines */
|
||||
extern void ftrace_regs_caller_end(void);
|
||||
extern void ftrace_regs_caller_ret(void);
|
||||
extern void ftrace_caller_end(void);
|
||||
extern void ftrace_caller_op_ptr(void);
|
||||
extern void ftrace_regs_caller_op_ptr(void);
|
||||
|
|
|
@ -358,7 +358,7 @@ int module_finalize(const Elf_Ehdr *hdr,
|
|||
}
|
||||
if (ibt_endbr) {
|
||||
void *iseg = (void *)ibt_endbr->sh_addr;
|
||||
apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size);
|
||||
apply_seal_endbr(iseg, iseg + ibt_endbr->sh_size);
|
||||
}
|
||||
if (locks) {
|
||||
void *lseg = (void *)locks->sh_addr;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/static_call.h>
|
||||
#include <trace/events/power.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/entry-common.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/apic.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -134,6 +135,25 @@ static int set_new_tls(struct task_struct *p, unsigned long tls)
|
|||
return do_set_thread_area_64(p, ARCH_SET_FS, tls);
|
||||
}
|
||||
|
||||
__visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
|
||||
int (*fn)(void *), void *fn_arg)
|
||||
{
|
||||
schedule_tail(prev);
|
||||
|
||||
/* Is this a kernel thread? */
|
||||
if (unlikely(fn)) {
|
||||
fn(fn_arg);
|
||||
/*
|
||||
* A kernel thread is allowed to return here after successfully
|
||||
* calling kernel_execve(). Exit to userspace to complete the
|
||||
* execve() syscall.
|
||||
*/
|
||||
regs->ax = 0;
|
||||
}
|
||||
|
||||
syscall_exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
|
@ -149,7 +169,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
frame = &fork_frame->frame;
|
||||
|
||||
frame->bp = encode_frame_pointer(childregs);
|
||||
frame->ret_addr = (unsigned long) ret_from_fork;
|
||||
frame->ret_addr = (unsigned long) ret_from_fork_asm;
|
||||
p->thread.sp = (unsigned long) fork_frame;
|
||||
p->thread.io_bitmap = NULL;
|
||||
p->thread.iopl_warn = 0;
|
||||
|
|
|
@ -90,30 +90,35 @@ SYM_CODE_END(xen_cpu_bringup_again)
|
|||
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
|
||||
#ifdef CONFIG_X86_32
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, _ASM_PTR __PAGE_OFFSET)
|
||||
#else
|
||||
#ifdef CONFIG_XEN_PV
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, _ASM_PTR __START_KERNEL_map)
|
||||
/* Map the p2m table to a 512GB-aligned user address. */
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad (PUD_SIZE * PTRS_PER_PUD))
|
||||
#endif
|
||||
#ifdef CONFIG_XEN_PV
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
|
||||
#endif
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,
|
||||
.ascii "!writable_page_tables|pae_pgdir_above_4gb")
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
|
||||
.long (1 << XENFEAT_writable_page_tables) | \
|
||||
(1 << XENFEAT_dom0) | \
|
||||
(1 << XENFEAT_linux_rsdp_unrestricted))
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables")
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,
|
||||
.quad _PAGE_PRESENT; .quad _PAGE_PRESENT)
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1)
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_MOD_START_PFN, .long 1)
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, _ASM_PTR __HYPERVISOR_VIRT_START)
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, _ASM_PTR 0)
|
||||
# define FEATURES_PV (1 << XENFEAT_writable_page_tables)
|
||||
#else
|
||||
# define FEATURES_PV 0
|
||||
#endif
|
||||
#ifdef CONFIG_XEN_PVH
|
||||
# define FEATURES_PVH (1 << XENFEAT_linux_rsdp_unrestricted)
|
||||
#else
|
||||
# define FEATURES_PVH 0
|
||||
#endif
|
||||
#ifdef CONFIG_XEN_DOM0
|
||||
# define FEATURES_DOM0 (1 << XENFEAT_dom0)
|
||||
#else
|
||||
# define FEATURES_DOM0 0
|
||||
#endif
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
|
||||
.long FEATURES_PV | FEATURES_PVH | FEATURES_DOM0)
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1)
|
||||
|
||||
#endif /*CONFIG_XEN */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* arch/xtensa/kernel/align.S
|
||||
*
|
||||
* Handle unalignment exceptions in kernel space.
|
||||
* Handle unalignment and load/store exceptions.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General
|
||||
* Public License. See the file "COPYING" in the main directory of
|
||||
|
@ -26,20 +26,18 @@
|
|||
#define LOAD_EXCEPTION_HANDLER
|
||||
#endif
|
||||
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION || defined LOAD_EXCEPTION_HANDLER
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION || defined CONFIG_XTENSA_LOAD_STORE
|
||||
#define STORE_EXCEPTION_HANDLER
|
||||
#endif
|
||||
|
||||
#if defined LOAD_EXCEPTION_HANDLER || defined STORE_EXCEPTION_HANDLER
|
||||
#define ANY_EXCEPTION_HANDLER
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_WINDOWED
|
||||
#if XCHAL_HAVE_WINDOWED && defined CONFIG_MMU
|
||||
#define UNALIGNED_USER_EXCEPTION
|
||||
#endif
|
||||
|
||||
/* First-level exception handler for unaligned exceptions.
|
||||
*
|
||||
* Note: This handler works only for kernel exceptions. Unaligned user
|
||||
* access should get a seg fault.
|
||||
*/
|
||||
|
||||
/* Big and little endian 16-bit values are located in
|
||||
* different halves of a register. HWORD_START helps to
|
||||
* abstract the notion of extracting a 16-bit value from a
|
||||
|
@ -228,8 +226,6 @@ ENDPROC(fast_load_store)
|
|||
#ifdef ANY_EXCEPTION_HANDLER
|
||||
ENTRY(fast_unaligned)
|
||||
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
|
||||
call0 .Lsave_and_load_instruction
|
||||
|
||||
/* Analyze the instruction (load or store?). */
|
||||
|
@ -244,8 +240,7 @@ ENTRY(fast_unaligned)
|
|||
/* 'store indicator bit' not set, jump */
|
||||
_bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
|
||||
|
||||
#endif
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#ifdef STORE_EXCEPTION_HANDLER
|
||||
|
||||
/* Store: Jump to table entry to get the value in the source register.*/
|
||||
|
||||
|
@ -254,7 +249,7 @@ ENTRY(fast_unaligned)
|
|||
addx8 a5, a6, a5
|
||||
jx a5 # jump into table
|
||||
#endif
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION
|
||||
#ifdef LOAD_EXCEPTION_HANDLER
|
||||
|
||||
/* Load: Load memory address. */
|
||||
|
||||
|
@ -328,7 +323,7 @@ ENTRY(fast_unaligned)
|
|||
mov a14, a3 ; _j .Lexit; .align 8
|
||||
mov a15, a3 ; _j .Lexit; .align 8
|
||||
#endif
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#ifdef STORE_EXCEPTION_HANDLER
|
||||
.Lstore_table:
|
||||
l32i a3, a2, PT_AREG0; _j .Lstore_w; .align 8
|
||||
mov a3, a1; _j .Lstore_w; .align 8 # fishy??
|
||||
|
@ -348,7 +343,6 @@ ENTRY(fast_unaligned)
|
|||
mov a3, a15 ; _j .Lstore_w; .align 8
|
||||
#endif
|
||||
|
||||
#ifdef ANY_EXCEPTION_HANDLER
|
||||
/* We cannot handle this exception. */
|
||||
|
||||
.extern _kernel_exception
|
||||
|
@ -377,8 +371,8 @@ ENTRY(fast_unaligned)
|
|||
|
||||
2: movi a0, _user_exception
|
||||
jx a0
|
||||
#endif
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
|
||||
#ifdef STORE_EXCEPTION_HANDLER
|
||||
|
||||
# a7: instruction pointer, a4: instruction, a3: value
|
||||
.Lstore_w:
|
||||
|
@ -444,7 +438,7 @@ ENTRY(fast_unaligned)
|
|||
s32i a6, a4, 4
|
||||
#endif
|
||||
#endif
|
||||
#ifdef ANY_EXCEPTION_HANDLER
|
||||
|
||||
.Lexit:
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
rsr a4, lend # check if we reached LEND
|
||||
|
@ -539,7 +533,7 @@ ENTRY(fast_unaligned)
|
|||
__src_b a4, a4, a5 # a4 has the instruction
|
||||
|
||||
ret
|
||||
#endif
|
||||
|
||||
ENDPROC(fast_unaligned)
|
||||
|
||||
ENTRY(fast_unaligned_fixup)
|
||||
|
|
|
@ -102,7 +102,8 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
|
|||
#endif
|
||||
{ EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 },
|
||||
/* EXCCAUSE_PRIVILEGED unhandled */
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION || \
|
||||
IS_ENABLED(CONFIG_XTENSA_LOAD_STORE)
|
||||
#ifdef CONFIG_XTENSA_UNALIGNED_USER
|
||||
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
|
||||
#endif
|
||||
|
|
|
@ -237,7 +237,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
|
|||
|
||||
init += sizeof(TRANSPORT_TUNTAP_NAME) - 1;
|
||||
if (*init == ',') {
|
||||
rem = split_if_spec(init + 1, &mac_str, &dev_name);
|
||||
rem = split_if_spec(init + 1, &mac_str, &dev_name, NULL);
|
||||
if (rem != NULL) {
|
||||
pr_err("%s: extra garbage on specification : '%s'\n",
|
||||
dev->name, rem);
|
||||
|
@ -540,6 +540,7 @@ static void iss_net_configure(int index, char *init)
|
|||
rtnl_unlock();
|
||||
pr_err("%s: error registering net device!\n", dev->name);
|
||||
platform_device_unregister(&lp->pdev);
|
||||
/* dev is freed by the iss_net_pdev_release callback */
|
||||
return;
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
|
|
@ -79,7 +79,14 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
|
|||
unsigned int slot_hashtable_size;
|
||||
|
||||
memset(profile, 0, sizeof(*profile));
|
||||
init_rwsem(&profile->lock);
|
||||
|
||||
/*
|
||||
* profile->lock of an underlying device can nest inside profile->lock
|
||||
* of a device-mapper device, so use a dynamic lock class to avoid
|
||||
* false-positive lockdep reports.
|
||||
*/
|
||||
lockdep_register_key(&profile->lockdep_key);
|
||||
__init_rwsem(&profile->lock, "&profile->lock", &profile->lockdep_key);
|
||||
|
||||
if (num_slots == 0)
|
||||
return 0;
|
||||
|
@ -89,7 +96,7 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
|
|||
profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]),
|
||||
GFP_KERNEL);
|
||||
if (!profile->slots)
|
||||
return -ENOMEM;
|
||||
goto err_destroy;
|
||||
|
||||
profile->num_slots = num_slots;
|
||||
|
||||
|
@ -435,6 +442,7 @@ void blk_crypto_profile_destroy(struct blk_crypto_profile *profile)
|
|||
{
|
||||
if (!profile)
|
||||
return;
|
||||
lockdep_unregister_key(&profile->lockdep_key);
|
||||
kvfree(profile->slot_hashtable);
|
||||
kvfree_sensitive(profile->slots,
|
||||
sizeof(profile->slots[0]) * profile->num_slots);
|
||||
|
|
|
@ -189,7 +189,7 @@ static void blk_flush_complete_seq(struct request *rq,
|
|||
case REQ_FSEQ_DATA:
|
||||
list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
|
||||
spin_lock(&q->requeue_lock);
|
||||
list_add_tail(&rq->queuelist, &q->flush_list);
|
||||
list_add(&rq->queuelist, &q->requeue_list);
|
||||
spin_unlock(&q->requeue_lock);
|
||||
blk_mq_kick_requeue_list(q);
|
||||
break;
|
||||
|
|
|
@ -328,8 +328,24 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_rq_init);
|
||||
|
||||
/* Set start and alloc time when the allocated request is actually used */
|
||||
static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
|
||||
{
|
||||
if (blk_mq_need_time_stamp(rq))
|
||||
rq->start_time_ns = ktime_get_ns();
|
||||
else
|
||||
rq->start_time_ns = 0;
|
||||
|
||||
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
||||
if (blk_queue_rq_alloc_time(rq->q))
|
||||
rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
|
||||
else
|
||||
rq->alloc_time_ns = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||
struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
|
||||
struct blk_mq_tags *tags, unsigned int tag)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = data->ctx;
|
||||
struct blk_mq_hw_ctx *hctx = data->hctx;
|
||||
|
@ -356,14 +372,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
|||
}
|
||||
rq->timeout = 0;
|
||||
|
||||
if (blk_mq_need_time_stamp(rq))
|
||||
rq->start_time_ns = ktime_get_ns();
|
||||
else
|
||||
rq->start_time_ns = 0;
|
||||
rq->part = NULL;
|
||||
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
||||
rq->alloc_time_ns = alloc_time_ns;
|
||||
#endif
|
||||
rq->io_start_time_ns = 0;
|
||||
rq->stats_sectors = 0;
|
||||
rq->nr_phys_segments = 0;
|
||||
|
@ -393,8 +402,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
|||
}
|
||||
|
||||
static inline struct request *
|
||||
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
|
||||
u64 alloc_time_ns)
|
||||
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
|
||||
{
|
||||
unsigned int tag, tag_offset;
|
||||
struct blk_mq_tags *tags;
|
||||
|
@ -413,7 +421,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
|
|||
tag = tag_offset + i;
|
||||
prefetch(tags->static_rqs[tag]);
|
||||
tag_mask &= ~(1UL << i);
|
||||
rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
|
||||
rq = blk_mq_rq_ctx_init(data, tags, tag);
|
||||
rq_list_add(data->cached_rq, rq);
|
||||
nr++;
|
||||
}
|
||||
|
@ -474,9 +482,11 @@ retry:
|
|||
* Try batched alloc if we want more than 1 tag.
|
||||
*/
|
||||
if (data->nr_tags > 1) {
|
||||
rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
|
||||
if (rq)
|
||||
rq = __blk_mq_alloc_requests_batch(data);
|
||||
if (rq) {
|
||||
blk_mq_rq_time_init(rq, alloc_time_ns);
|
||||
return rq;
|
||||
}
|
||||
data->nr_tags = 1;
|
||||
}
|
||||
|
||||
|
@ -499,8 +509,9 @@ retry:
|
|||
goto retry;
|
||||
}
|
||||
|
||||
return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
|
||||
alloc_time_ns);
|
||||
rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
|
||||
blk_mq_rq_time_init(rq, alloc_time_ns);
|
||||
return rq;
|
||||
}
|
||||
|
||||
static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
|
||||
|
@ -555,6 +566,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
|
|||
return NULL;
|
||||
|
||||
plug->cached_rq = rq_list_next(rq);
|
||||
blk_mq_rq_time_init(rq, 0);
|
||||
}
|
||||
|
||||
rq->cmd_flags = opf;
|
||||
|
@ -656,8 +668,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
|||
tag = blk_mq_get_tag(&data);
|
||||
if (tag == BLK_MQ_NO_TAG)
|
||||
goto out_queue_exit;
|
||||
rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
|
||||
alloc_time_ns);
|
||||
rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
|
||||
blk_mq_rq_time_init(rq, alloc_time_ns);
|
||||
rq->__data_len = 0;
|
||||
rq->__sector = (sector_t) -1;
|
||||
rq->bio = rq->biotail = NULL;
|
||||
|
@ -2896,6 +2908,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
|
|||
plug->cached_rq = rq_list_next(rq);
|
||||
rq_qos_throttle(q, *bio);
|
||||
|
||||
blk_mq_rq_time_init(rq, 0);
|
||||
rq->cmd_flags = (*bio)->bi_opf;
|
||||
INIT_LIST_HEAD(&rq->queuelist);
|
||||
return rq;
|
||||
|
|
|
@ -442,7 +442,6 @@ struct blk_revalidate_zone_args {
|
|||
unsigned long *conv_zones_bitmap;
|
||||
unsigned long *seq_zones_wlock;
|
||||
unsigned int nr_zones;
|
||||
sector_t zone_sectors;
|
||||
sector_t sector;
|
||||
};
|
||||
|
||||
|
@ -456,38 +455,34 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
|
|||
struct gendisk *disk = args->disk;
|
||||
struct request_queue *q = disk->queue;
|
||||
sector_t capacity = get_capacity(disk);
|
||||
sector_t zone_sectors = q->limits.chunk_sectors;
|
||||
|
||||
/* Check for bad zones and holes in the zone report */
|
||||
if (zone->start != args->sector) {
|
||||
pr_warn("%s: Zone gap at sectors %llu..%llu\n",
|
||||
disk->disk_name, args->sector, zone->start);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (zone->start >= capacity || !zone->len) {
|
||||
pr_warn("%s: Invalid zone start %llu, length %llu\n",
|
||||
disk->disk_name, zone->start, zone->len);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* All zones must have the same size, with the exception on an eventual
|
||||
* smaller last zone.
|
||||
*/
|
||||
if (zone->start == 0) {
|
||||
if (zone->len == 0 || !is_power_of_2(zone->len)) {
|
||||
pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
|
||||
disk->disk_name, zone->len);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
args->zone_sectors = zone->len;
|
||||
args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
|
||||
} else if (zone->start + args->zone_sectors < capacity) {
|
||||
if (zone->len != args->zone_sectors) {
|
||||
if (zone->start + zone->len < capacity) {
|
||||
if (zone->len != zone_sectors) {
|
||||
pr_warn("%s: Invalid zoned device with non constant zone size\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
if (zone->len > args->zone_sectors) {
|
||||
pr_warn("%s: Invalid zoned device with larger last zone size\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for holes in the zone report */
|
||||
if (zone->start != args->sector) {
|
||||
pr_warn("%s: Zone gap at sectors %llu..%llu\n",
|
||||
disk->disk_name, args->sector, zone->start);
|
||||
} else if (zone->len > zone_sectors) {
|
||||
pr_warn("%s: Invalid zoned device with larger last zone size\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -526,11 +521,13 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
|
|||
* @disk: Target disk
|
||||
* @update_driver_data: Callback to update driver data on the frozen disk
|
||||
*
|
||||
* Helper function for low-level device drivers to (re) allocate and initialize
|
||||
* a disk request queue zone bitmaps. This functions should normally be called
|
||||
* within the disk ->revalidate method for blk-mq based drivers. For BIO based
|
||||
* drivers only q->nr_zones needs to be updated so that the sysfs exposed value
|
||||
* is correct.
|
||||
* Helper function for low-level device drivers to check and (re) allocate and
|
||||
* initialize a disk request queue zone bitmaps. This functions should normally
|
||||
* be called within the disk ->revalidate method for blk-mq based drivers.
|
||||
* Before calling this function, the device driver must already have set the
|
||||
* device zone size (chunk_sector limit) and the max zone append limit.
|
||||
* For BIO based drivers, this function cannot be used. BIO based device drivers
|
||||
* only need to set disk->nr_zones so that the sysfs exposed value is correct.
|
||||
* If the @update_driver_data callback function is not NULL, the callback is
|
||||
* executed with the device request queue frozen after all zones have been
|
||||
* checked.
|
||||
|
@ -539,9 +536,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
|||
void (*update_driver_data)(struct gendisk *disk))
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
struct blk_revalidate_zone_args args = {
|
||||
.disk = disk,
|
||||
};
|
||||
sector_t zone_sectors = q->limits.chunk_sectors;
|
||||
sector_t capacity = get_capacity(disk);
|
||||
struct blk_revalidate_zone_args args = { };
|
||||
unsigned int noio_flag;
|
||||
int ret;
|
||||
|
||||
|
@ -550,13 +547,31 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
|||
if (WARN_ON_ONCE(!queue_is_mq(q)))
|
||||
return -EIO;
|
||||
|
||||
if (!get_capacity(disk))
|
||||
return -EIO;
|
||||
if (!capacity)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Checks that the device driver indicated a valid zone size and that
|
||||
* the max zone append limit is set.
|
||||
*/
|
||||
if (!zone_sectors || !is_power_of_2(zone_sectors)) {
|
||||
pr_warn("%s: Invalid non power of two zone size (%llu)\n",
|
||||
disk->disk_name, zone_sectors);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!q->limits.max_zone_append_sectors) {
|
||||
pr_warn("%s: Invalid 0 maximum zone append limit\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all memory allocations in this context are done as if
|
||||
* GFP_NOIO was specified.
|
||||
*/
|
||||
args.disk = disk;
|
||||
args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors);
|
||||
noio_flag = memalloc_noio_save();
|
||||
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
|
||||
blk_revalidate_zone_cb, &args);
|
||||
|
@ -570,7 +585,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
|||
* If zones where reported, make sure that the entire disk capacity
|
||||
* has been checked.
|
||||
*/
|
||||
if (ret > 0 && args.sector != get_capacity(disk)) {
|
||||
if (ret > 0 && args.sector != capacity) {
|
||||
pr_warn("%s: Missing zones from sector %llu\n",
|
||||
disk->disk_name, args.sector);
|
||||
ret = -ENODEV;
|
||||
|
@ -583,7 +598,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
|||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
if (ret > 0) {
|
||||
blk_queue_chunk_sectors(q, args.zone_sectors);
|
||||
disk->nr_zones = args.nr_zones;
|
||||
swap(disk->seq_zones_wlock, args.seq_zones_wlock);
|
||||
swap(disk->conv_zones_bitmap, args.conv_zones_bitmap);
|
||||
|
|
|
@ -176,7 +176,7 @@ static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
|
|||
* zoned writes, start searching from the start of a zone.
|
||||
*/
|
||||
if (blk_rq_is_seq_zoned_write(rq))
|
||||
pos -= round_down(pos, rq->q->limits.chunk_sectors);
|
||||
pos = round_down(pos, rq->q->limits.chunk_sectors);
|
||||
|
||||
while (node) {
|
||||
rq = rb_entry_rq(node);
|
||||
|
|
|
@ -90,7 +90,7 @@ int amiga_partition(struct parsed_partitions *state)
|
|||
}
|
||||
blk = be32_to_cpu(rdb->rdb_PartitionList);
|
||||
put_dev_sector(sect);
|
||||
for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
|
||||
for (part = 1; (s32) blk>0 && part<=16; part++, put_dev_sector(sect)) {
|
||||
/* Read in terms partition table understands */
|
||||
if (check_mul_overflow(blk, (sector_t) blksize, &blk)) {
|
||||
pr_err("Dev %s: overflow calculating partition block %llu! Skipping partitions %u and beyond\n",
|
||||
|
|
|
@ -992,7 +992,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
|||
ssize_t plen;
|
||||
|
||||
/* use the existing memory in an allocated page */
|
||||
if (ctx->merge) {
|
||||
if (ctx->merge && !(msg->msg_flags & MSG_SPLICE_PAGES)) {
|
||||
sgl = list_entry(ctx->tsgl_list.prev,
|
||||
struct af_alg_tsgl, list);
|
||||
sg = sgl->sg + sgl->cur - 1;
|
||||
|
@ -1054,6 +1054,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
|||
ctx->used += plen;
|
||||
copied += plen;
|
||||
size -= plen;
|
||||
ctx->merge = 0;
|
||||
} else {
|
||||
do {
|
||||
struct page *pg;
|
||||
|
@ -1085,12 +1086,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
|||
size -= plen;
|
||||
sgl->cur++;
|
||||
} while (len && sgl->cur < MAX_SGL_ENTS);
|
||||
|
||||
ctx->merge = plen & (PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
if (!size)
|
||||
sg_mark_end(sg + sgl->cur - 1);
|
||||
|
||||
ctx->merge = plen & (PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
|
|
@ -68,13 +68,15 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
struct hash_ctx *ctx = ask->private;
|
||||
ssize_t copied = 0;
|
||||
size_t len, max_pages, npages;
|
||||
bool continuing = ctx->more, need_init = false;
|
||||
bool continuing, need_init = false;
|
||||
int err;
|
||||
|
||||
max_pages = min_t(size_t, ALG_MAX_PAGES,
|
||||
DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE));
|
||||
|
||||
lock_sock(sk);
|
||||
continuing = ctx->more;
|
||||
|
||||
if (!continuing) {
|
||||
/* Discard a previous request that wasn't marked MSG_MORE. */
|
||||
hash_free_result(sk, ctx);
|
||||
|
|
|
@ -185,8 +185,10 @@ static int software_key_query(const struct kernel_pkey_params *params,
|
|||
|
||||
if (issig) {
|
||||
sig = crypto_alloc_sig(alg_name, 0, 0);
|
||||
if (IS_ERR(sig))
|
||||
if (IS_ERR(sig)) {
|
||||
ret = PTR_ERR(sig);
|
||||
goto error_free_key;
|
||||
}
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
|
||||
|
@ -208,8 +210,10 @@ static int software_key_query(const struct kernel_pkey_params *params,
|
|||
}
|
||||
} else {
|
||||
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
|
||||
if (IS_ERR(tfm))
|
||||
if (IS_ERR(tfm)) {
|
||||
ret = PTR_ERR(tfm);
|
||||
goto error_free_key;
|
||||
}
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
|
||||
|
@ -300,8 +304,10 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
|
|||
|
||||
if (issig) {
|
||||
sig = crypto_alloc_sig(alg_name, 0, 0);
|
||||
if (IS_ERR(sig))
|
||||
if (IS_ERR(sig)) {
|
||||
ret = PTR_ERR(sig);
|
||||
goto error_free_key;
|
||||
}
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
|
||||
|
@ -313,8 +319,10 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
|
|||
ksz = crypto_sig_maxsize(sig);
|
||||
} else {
|
||||
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
|
||||
if (IS_ERR(tfm))
|
||||
if (IS_ERR(tfm)) {
|
||||
ret = PTR_ERR(tfm);
|
||||
goto error_free_key;
|
||||
}
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
|
||||
|
@ -411,8 +419,10 @@ int public_key_verify_signature(const struct public_key *pkey,
|
|||
|
||||
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
|
||||
GFP_KERNEL);
|
||||
if (!key)
|
||||
if (!key) {
|
||||
ret = -ENOMEM;
|
||||
goto error_free_tfm;
|
||||
}
|
||||
|
||||
memcpy(key, pkey->key, pkey->keylen);
|
||||
ptr = key + pkey->keylen;
|
||||
|
|
|
@ -75,6 +75,7 @@ struct ivpu_wa_table {
|
|||
bool punit_disabled;
|
||||
bool clear_runtime_mem;
|
||||
bool d3hot_after_power_off;
|
||||
bool interrupt_clear_with_0;
|
||||
};
|
||||
|
||||
struct ivpu_hw_info;
|
||||
|
|
|
@ -101,6 +101,9 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
|
|||
vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
|
||||
vdev->wa.clear_runtime_mem = false;
|
||||
vdev->wa.d3hot_after_power_off = true;
|
||||
|
||||
if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
|
||||
vdev->wa.interrupt_clear_with_0 = true;
|
||||
}
|
||||
|
||||
static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
|
||||
|
@ -885,7 +888,7 @@ static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
|
|||
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
|
||||
REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
|
||||
REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
|
||||
REGB_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
|
||||
REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
|
||||
}
|
||||
|
||||
static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
|
||||
|
@ -973,12 +976,15 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
|
|||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear local interrupt status by writing 0 to all bits.
|
||||
* This must be done after interrupts are cleared at the source.
|
||||
* Writing 1 triggers an interrupt, so we can't perform read update write.
|
||||
*/
|
||||
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
|
||||
/* This must be done after interrupts are cleared at the source. */
|
||||
if (IVPU_WA(interrupt_clear_with_0))
|
||||
/*
|
||||
* Writing 1 triggers an interrupt, so we can't perform read update write.
|
||||
* Clear local interrupt status by writing 0 to all bits.
|
||||
*/
|
||||
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
|
||||
else
|
||||
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status);
|
||||
|
||||
/* Re-enable global interrupt */
|
||||
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
|
||||
|
|
|
@ -717,7 +717,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
|
|||
if (!d->config_buf)
|
||||
goto err_alloc;
|
||||
|
||||
for (i = 0; i < chip->num_config_regs; i++) {
|
||||
for (i = 0; i < chip->num_config_bases; i++) {
|
||||
d->config_buf[i] = kcalloc(chip->num_config_regs,
|
||||
sizeof(**d->config_buf),
|
||||
GFP_KERNEL);
|
||||
|
|
|
@ -162,21 +162,15 @@ int null_register_zoned_dev(struct nullb *nullb)
|
|||
disk_set_zoned(nullb->disk, BLK_ZONED_HM);
|
||||
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
|
||||
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
|
||||
|
||||
if (queue_is_mq(q)) {
|
||||
int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
blk_queue_chunk_sectors(q, dev->zone_size_sects);
|
||||
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
|
||||
}
|
||||
|
||||
blk_queue_chunk_sectors(q, dev->zone_size_sects);
|
||||
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
|
||||
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
|
||||
disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
|
||||
disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
|
||||
|
||||
if (queue_is_mq(q))
|
||||
return blk_revalidate_disk_zones(nullb->disk, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -751,7 +751,6 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
|||
{
|
||||
u32 v, wg;
|
||||
u8 model;
|
||||
int ret;
|
||||
|
||||
virtio_cread(vdev, struct virtio_blk_config,
|
||||
zoned.model, &model);
|
||||
|
@ -806,6 +805,7 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
|||
vblk->zone_sectors);
|
||||
return -ENODEV;
|
||||
}
|
||||
blk_queue_chunk_sectors(q, vblk->zone_sectors);
|
||||
dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
|
||||
|
@ -814,26 +814,22 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
|||
blk_queue_max_discard_sectors(q, 0);
|
||||
}
|
||||
|
||||
ret = blk_revalidate_disk_zones(vblk->disk, NULL);
|
||||
if (!ret) {
|
||||
virtio_cread(vdev, struct virtio_blk_config,
|
||||
zoned.max_append_sectors, &v);
|
||||
if (!v) {
|
||||
dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if ((v << SECTOR_SHIFT) < wg) {
|
||||
dev_err(&vdev->dev,
|
||||
"write granularity %u exceeds max_append_sectors %u limit\n",
|
||||
wg, v);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
blk_queue_max_zone_append_sectors(q, v);
|
||||
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
|
||||
virtio_cread(vdev, struct virtio_blk_config,
|
||||
zoned.max_append_sectors, &v);
|
||||
if (!v) {
|
||||
dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if ((v << SECTOR_SHIFT) < wg) {
|
||||
dev_err(&vdev->dev,
|
||||
"write granularity %u exceeds max_append_sectors %u limit\n",
|
||||
wg, v);
|
||||
return -ENODEV;
|
||||
}
|
||||
blk_queue_max_zone_append_sectors(q, v);
|
||||
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
|
||||
|
||||
return ret;
|
||||
return blk_revalidate_disk_zones(vblk->disk, NULL);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -269,7 +269,7 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
|
||||
}
|
||||
|
||||
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
static int us2e_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
|
||||
|
|
|
@ -117,7 +117,7 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
return smp_call_function_single(cpu, update_safari_cfg, &new_bits, 1);
|
||||
}
|
||||
|
||||
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
static int us3_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
|
||||
|
|
|
@ -66,18 +66,36 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
|||
{
|
||||
struct dma_fence_array *result;
|
||||
struct dma_fence *tmp, **array;
|
||||
ktime_t timestamp;
|
||||
unsigned int i;
|
||||
size_t count;
|
||||
|
||||
count = 0;
|
||||
timestamp = ns_to_ktime(0);
|
||||
for (i = 0; i < num_fences; ++i) {
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
|
||||
if (!dma_fence_is_signaled(tmp))
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
|
||||
if (!dma_fence_is_signaled(tmp)) {
|
||||
++count;
|
||||
} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
|
||||
&tmp->flags)) {
|
||||
if (ktime_after(tmp->timestamp, timestamp))
|
||||
timestamp = tmp->timestamp;
|
||||
} else {
|
||||
/*
|
||||
* Use the current time if the fence is
|
||||
* currently signaling.
|
||||
*/
|
||||
timestamp = ktime_get();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we couldn't find a pending fence just return a private signaled
|
||||
* fence with the timestamp of the last signaled one.
|
||||
*/
|
||||
if (count == 0)
|
||||
return dma_fence_get_stub();
|
||||
return dma_fence_allocate_private_stub(timestamp);
|
||||
|
||||
array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
|
||||
if (!array)
|
||||
|
@ -138,7 +156,7 @@ restart:
|
|||
} while (tmp);
|
||||
|
||||
if (count == 0) {
|
||||
tmp = dma_fence_get_stub();
|
||||
tmp = dma_fence_allocate_private_stub(ktime_get());
|
||||
goto return_tmp;
|
||||
}
|
||||
|
||||
|
|
|
@ -150,16 +150,17 @@ EXPORT_SYMBOL(dma_fence_get_stub);
|
|||
|
||||
/**
|
||||
* dma_fence_allocate_private_stub - return a private, signaled fence
|
||||
* @timestamp: timestamp when the fence was signaled
|
||||
*
|
||||
* Return a newly allocated and signaled stub fence.
|
||||
*/
|
||||
struct dma_fence *dma_fence_allocate_private_stub(void)
|
||||
struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
|
||||
dma_fence_init(fence,
|
||||
&dma_fence_stub_ops,
|
||||
|
@ -169,7 +170,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
|
|||
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&fence->flags);
|
||||
|
||||
dma_fence_signal(fence);
|
||||
dma_fence_signal_timestamp(fence, timestamp);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
|
|
@ -1296,6 +1296,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_need_post(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_pcie_dynamic_switching_supported(void);
|
||||
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_aspm_support_quirk(void);
|
||||
|
||||
|
|
|
@ -2881,6 +2881,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
|||
if (!attachment->is_mapped)
|
||||
continue;
|
||||
|
||||
if (attachment->bo_va->base.bo->tbo.pin_count)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
|
||||
if (ret) {
|
||||
|
|
|
@ -1458,6 +1458,25 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
|
||||
* speed switching. Until we have confirmation from Intel that a specific host
|
||||
* supports it, it's safer that we keep it disabled for all.
|
||||
*
|
||||
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
|
||||
*/
|
||||
bool amdgpu_device_pcie_dynamic_switching_supported(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_should_use_aspm - check if the device should program ASPM
|
||||
*
|
||||
|
|
|
@ -295,5 +295,9 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
|
|||
uint32_t *size,
|
||||
uint32_t pptable_id);
|
||||
|
||||
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -2113,7 +2113,6 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -2130,6 +2129,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -3021,7 +3021,6 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -3038,6 +3037,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -2077,89 +2077,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
|
||||
uint32_t *gen_speed_override,
|
||||
uint32_t *lane_width_override)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
*gen_speed_override = 0xff;
|
||||
*lane_width_override = 0xff;
|
||||
|
||||
switch (adev->pdev->device) {
|
||||
case 0x73A0:
|
||||
case 0x73A1:
|
||||
case 0x73A2:
|
||||
case 0x73A3:
|
||||
case 0x73AB:
|
||||
case 0x73AE:
|
||||
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
|
||||
*lane_width_override = 6;
|
||||
break;
|
||||
case 0x73E0:
|
||||
case 0x73E1:
|
||||
case 0x73E3:
|
||||
*lane_width_override = 4;
|
||||
break;
|
||||
case 0x7420:
|
||||
case 0x7421:
|
||||
case 0x7422:
|
||||
case 0x7423:
|
||||
case 0x7424:
|
||||
*lane_width_override = 3;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t gen_speed_override, lane_width_override;
|
||||
uint8_t *table_member1, *table_member2;
|
||||
uint32_t min_gen_speed, max_gen_speed;
|
||||
uint32_t min_lane_width, max_lane_width;
|
||||
uint32_t smu_pcie_arg;
|
||||
u32 smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
|
||||
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
|
||||
/* PCIE gen speed and lane width override */
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
|
||||
|
||||
sienna_cichlid_get_override_pcie_settings(smu,
|
||||
&gen_speed_override,
|
||||
&lane_width_override);
|
||||
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
|
||||
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
|
||||
|
||||
/* PCIE gen speed override */
|
||||
if (gen_speed_override != 0xff) {
|
||||
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
/* Force all levels to use the same settings */
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
} else {
|
||||
min_gen_speed = MAX(0, table_member1[0]);
|
||||
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
|
||||
min_gen_speed = min_gen_speed > max_gen_speed ?
|
||||
max_gen_speed : min_gen_speed;
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
}
|
||||
pcie_table->pcie_gen[0] = min_gen_speed;
|
||||
pcie_table->pcie_gen[1] = max_gen_speed;
|
||||
|
||||
/* PCIE lane width override */
|
||||
if (lane_width_override != 0xff) {
|
||||
min_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
max_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
} else {
|
||||
min_lane_width = MAX(1, table_member2[0]);
|
||||
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
|
||||
min_lane_width = min_lane_width > max_lane_width ?
|
||||
max_lane_width : min_lane_width;
|
||||
}
|
||||
pcie_table->pcie_lane[0] = min_lane_width;
|
||||
pcie_table->pcie_lane[1] = max_lane_width;
|
||||
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
smu_pcie_arg = (i << 16 |
|
||||
|
@ -3842,7 +3789,6 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -3859,6 +3805,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1525,7 +1525,6 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -1542,6 +1541,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -2424,3 +2424,51 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
int num_of_levels = pcie_table->num_of_link_levels;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
|
||||
|
||||
if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
|
||||
pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
|
||||
|
||||
/* Force all levels to use the same settings */
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1645,37 +1645,6 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct smu_temperature_range smu13_thermal_policy[] = {
|
||||
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
|
||||
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
|
||||
|
@ -2320,7 +2289,6 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -2337,6 +2305,7 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
@ -2654,7 +2623,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
|
|||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.print_clk_levels = smu_v13_0_0_print_clk_levels,
|
||||
.force_clk_levels = smu_v13_0_0_force_clk_levels,
|
||||
.update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
|
||||
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
|
||||
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
|
||||
.register_irq_handler = smu_v13_0_register_irq_handler,
|
||||
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
|
||||
|
|
|
@ -1763,7 +1763,6 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_v13_0_6_request_i2c_xfer(smu, req);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -1780,6 +1779,7 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1635,37 +1635,6 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct smu_temperature_range smu13_thermal_policy[] =
|
||||
{
|
||||
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
|
||||
|
@ -2234,7 +2203,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
|
|||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.print_clk_levels = smu_v13_0_7_print_clk_levels,
|
||||
.force_clk_levels = smu_v13_0_7_force_clk_levels,
|
||||
.update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
|
||||
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
|
||||
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
|
||||
.register_irq_handler = smu_v13_0_register_irq_handler,
|
||||
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
|
||||
|
|
|
@ -209,10 +209,6 @@ void armada_fbdev_setup(struct drm_device *dev)
|
|||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = armada_fbdev_client_hotplug(&fbh->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fbh->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -1426,9 +1426,9 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi,
|
|||
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
|
||||
if (dw_hdmi_support_scdc(hdmi, display)) {
|
||||
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
|
||||
drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 1);
|
||||
drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 1);
|
||||
else
|
||||
drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 0);
|
||||
drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 0);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
|
||||
|
@ -2116,7 +2116,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
|
||||
|
||||
/* Enabled Scrambling in the Sink */
|
||||
drm_scdc_set_scrambling(&hdmi->connector, 1);
|
||||
drm_scdc_set_scrambling(hdmi->curr_conn, 1);
|
||||
|
||||
/*
|
||||
* To activate the scrambler feature, you must ensure
|
||||
|
@ -2132,7 +2132,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
|
||||
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
|
||||
HDMI_MC_SWRSTZ);
|
||||
drm_scdc_set_scrambling(&hdmi->connector, 0);
|
||||
drm_scdc_set_scrambling(hdmi->curr_conn, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3553,6 +3553,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
|
|||
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
|
||||
| DRM_BRIDGE_OP_HPD;
|
||||
hdmi->bridge.interlace_allowed = true;
|
||||
hdmi->bridge.ddc = hdmi->ddc;
|
||||
#ifdef CONFIG_OF
|
||||
hdmi->bridge.of_node = pdev->dev.of_node;
|
||||
#endif
|
||||
|
|
|
@ -170,10 +170,10 @@
|
|||
* @pwm_refclk_freq: Cache for the reference clock input to the PWM.
|
||||
*/
|
||||
struct ti_sn65dsi86 {
|
||||
struct auxiliary_device bridge_aux;
|
||||
struct auxiliary_device gpio_aux;
|
||||
struct auxiliary_device aux_aux;
|
||||
struct auxiliary_device pwm_aux;
|
||||
struct auxiliary_device *bridge_aux;
|
||||
struct auxiliary_device *gpio_aux;
|
||||
struct auxiliary_device *aux_aux;
|
||||
struct auxiliary_device *pwm_aux;
|
||||
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
|
@ -468,27 +468,34 @@ static void ti_sn65dsi86_delete_aux(void *data)
|
|||
auxiliary_device_delete(data);
|
||||
}
|
||||
|
||||
/*
|
||||
* AUX bus docs say that a non-NULL release is mandatory, but it makes no
|
||||
* sense for the model used here where all of the aux devices are allocated
|
||||
* in the single shared structure. We'll use this noop as a workaround.
|
||||
*/
|
||||
static void ti_sn65dsi86_noop(struct device *dev) {}
|
||||
static void ti_sn65dsi86_aux_device_release(struct device *dev)
|
||||
{
|
||||
struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
|
||||
|
||||
kfree(aux);
|
||||
}
|
||||
|
||||
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
|
||||
struct auxiliary_device *aux,
|
||||
struct auxiliary_device **aux_out,
|
||||
const char *name)
|
||||
{
|
||||
struct device *dev = pdata->dev;
|
||||
struct auxiliary_device *aux;
|
||||
int ret;
|
||||
|
||||
aux = kzalloc(sizeof(*aux), GFP_KERNEL);
|
||||
if (!aux)
|
||||
return -ENOMEM;
|
||||
|
||||
aux->name = name;
|
||||
aux->dev.parent = dev;
|
||||
aux->dev.release = ti_sn65dsi86_noop;
|
||||
aux->dev.release = ti_sn65dsi86_aux_device_release;
|
||||
device_set_of_node_from_dev(&aux->dev, dev);
|
||||
ret = auxiliary_device_init(aux);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(aux);
|
||||
return ret;
|
||||
}
|
||||
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -497,6 +504,8 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
|
|||
if (ret)
|
||||
return ret;
|
||||
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
|
||||
if (!ret)
|
||||
*aux_out = aux;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -122,13 +122,34 @@ EXPORT_SYMBOL(drm_client_init);
|
|||
* drm_client_register() it is no longer permissible to call drm_client_release()
|
||||
* directly (outside the unregister callback), instead cleanup will happen
|
||||
* automatically on driver unload.
|
||||
*
|
||||
* Registering a client generates a hotplug event that allows the client
|
||||
* to set up its display from pre-existing outputs. The client must have
|
||||
* initialized its state to able to handle the hotplug event successfully.
|
||||
*/
|
||||
void drm_client_register(struct drm_client_dev *client)
|
||||
{
|
||||
struct drm_device *dev = client->dev;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->clientlist_mutex);
|
||||
list_add(&client->list, &dev->clientlist);
|
||||
|
||||
if (client->funcs && client->funcs->hotplug) {
|
||||
/*
|
||||
* Perform an initial hotplug event to pick up the
|
||||
* display configuration for the client. This step
|
||||
* has to be performed *after* registering the client
|
||||
* in the list of clients, or a concurrent hotplug
|
||||
* event might be lost; leaving the display off.
|
||||
*
|
||||
* Hold the clientlist_mutex as for a regular hotplug
|
||||
* event.
|
||||
*/
|
||||
ret = client->funcs->hotplug(client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
}
|
||||
mutex_unlock(&dev->clientlist_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_register);
|
||||
|
|
|
@ -217,7 +217,7 @@ static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
|
|||
* drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
|
||||
* @dev: DRM device
|
||||
* @preferred_bpp: Preferred bits per pixel for the device.
|
||||
* @dev->mode_config.preferred_depth is used if this is zero.
|
||||
* 32 is used if this is zero.
|
||||
*
|
||||
* This function sets up fbdev emulation for GEM DMA drivers that support
|
||||
* dumb buffers with a virtual address and that can be mmap'ed.
|
||||
|
@ -252,10 +252,6 @@ void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
|
|||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = drm_fbdev_dma_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -339,10 +339,6 @@ void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
|
|||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = drm_fbdev_generic_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -353,10 +353,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
|
|||
*/
|
||||
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
|
||||
{
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub();
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
|
||||
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_syncobj_replace_fence(syncobj, fence);
|
||||
dma_fence_put(fence);
|
||||
|
|
|
@ -215,10 +215,6 @@ void exynos_drm_fbdev_setup(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto err_drm_client_init;
|
||||
|
||||
ret = exynos_drm_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -328,10 +328,6 @@ void psb_fbdev_setup(struct drm_psb_private *dev_priv)
|
|||
goto err_drm_fb_helper_unprepare;
|
||||
}
|
||||
|
||||
ret = psb_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -4564,7 +4564,6 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
|
|||
saved_state->uapi = slave_crtc_state->uapi;
|
||||
saved_state->scaler_state = slave_crtc_state->scaler_state;
|
||||
saved_state->shared_dpll = slave_crtc_state->shared_dpll;
|
||||
saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
|
||||
saved_state->crc_enabled = slave_crtc_state->crc_enabled;
|
||||
|
||||
intel_crtc_free_hw_state(slave_crtc_state);
|
||||
|
|
|
@ -37,9 +37,6 @@ static u64 gen8_pte_encode(dma_addr_t addr,
|
|||
if (unlikely(flags & PTE_READ_ONLY))
|
||||
pte &= ~GEN8_PAGE_RW;
|
||||
|
||||
if (flags & PTE_LM)
|
||||
pte |= GEN12_PPGTT_PTE_LM;
|
||||
|
||||
/*
|
||||
* For pre-gen12 platforms pat_index is the same as enum
|
||||
* i915_cache_level, so the switch-case here is still valid.
|
||||
|
|
|
@ -670,7 +670,7 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
|
|||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
|
|
|
@ -868,8 +868,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
|
|||
oa_report_id_clear(stream, report32);
|
||||
oa_timestamp_clear(stream, report32);
|
||||
} else {
|
||||
u8 *oa_buf_end = stream->oa_buffer.vaddr +
|
||||
OA_BUFFER_SIZE;
|
||||
u32 part = oa_buf_end - (u8 *)report32;
|
||||
|
||||
/* Zero out the entire report */
|
||||
memset(report32, 0, report_size);
|
||||
if (report_size <= part) {
|
||||
memset(report32, 0, report_size);
|
||||
} else {
|
||||
memset(report32, 0, part);
|
||||
memset(oa_buf_base, 0, report_size - part);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -246,10 +246,6 @@ void msm_fbdev_setup(struct drm_device *dev)
|
|||
goto err_drm_fb_helper_unprepare;
|
||||
}
|
||||
|
||||
ret = msm_fbdev_client_hotplug(&helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -910,15 +910,19 @@ nv50_msto_prepare(struct drm_atomic_state *state,
|
|||
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
|
||||
struct nv50_mstc *mstc = msto->mstc;
|
||||
struct nv50_mstm *mstm = mstc->mstm;
|
||||
struct drm_dp_mst_atomic_payload *payload;
|
||||
struct drm_dp_mst_topology_state *old_mst_state;
|
||||
struct drm_dp_mst_atomic_payload *payload, *old_payload;
|
||||
|
||||
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
|
||||
|
||||
old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
|
||||
|
||||
payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
|
||||
old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
|
||||
|
||||
// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
|
||||
if (msto->disabled) {
|
||||
drm_dp_remove_payload(mgr, mst_state, payload, payload);
|
||||
drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
|
||||
|
||||
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
|
||||
} else {
|
||||
|
|
|
@ -90,6 +90,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
|
|||
if (cli)
|
||||
nouveau_svmm_part(chan->vmm->svmm, chan->inst);
|
||||
|
||||
nvif_object_dtor(&chan->blit);
|
||||
nvif_object_dtor(&chan->nvsw);
|
||||
nvif_object_dtor(&chan->gart);
|
||||
nvif_object_dtor(&chan->vram);
|
||||
|
|
|
@ -53,6 +53,7 @@ struct nouveau_channel {
|
|||
u32 user_put;
|
||||
|
||||
struct nvif_object user;
|
||||
struct nvif_object blit;
|
||||
|
||||
struct nvif_event kill;
|
||||
atomic_t killed;
|
||||
|
|
|
@ -375,15 +375,29 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
|
|||
ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
|
||||
NVDRM_NVSW, nouveau_abi16_swclass(drm),
|
||||
NULL, 0, &drm->channel->nvsw);
|
||||
|
||||
if (ret == 0 && device->info.chipset >= 0x11) {
|
||||
ret = nvif_object_ctor(&drm->channel->user, "drmBlit",
|
||||
0x005f, 0x009f,
|
||||
NULL, 0, &drm->channel->blit);
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
struct nvif_push *push = drm->channel->chan.push;
|
||||
ret = PUSH_WAIT(push, 2);
|
||||
if (ret == 0)
|
||||
ret = PUSH_WAIT(push, 8);
|
||||
if (ret == 0) {
|
||||
if (device->info.chipset >= 0x11) {
|
||||
PUSH_NVSQ(push, NV05F, 0x0000, drm->channel->blit.handle);
|
||||
PUSH_NVSQ(push, NV09F, 0x0120, 0,
|
||||
0x0124, 1,
|
||||
0x0128, 2);
|
||||
}
|
||||
PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
|
||||
NV_ERROR(drm, "failed to allocate sw or blit class, %d\n", ret);
|
||||
nouveau_accel_gr_fini(drm);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -295,6 +295,7 @@ g94_sor = {
|
|||
.clock = nv50_sor_clock,
|
||||
.war_2 = g94_sor_war_2,
|
||||
.war_3 = g94_sor_war_3,
|
||||
.hdmi = &g84_sor_hdmi,
|
||||
.dp = &g94_sor_dp,
|
||||
};
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ gt215_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 siz
|
|||
pack_hdmi_infoframe(&avi, data, size);
|
||||
|
||||
nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
|
||||
if (size)
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
nvkm_wr32(device, 0x61c528 + soff, avi.header);
|
||||
|
|
|
@ -224,7 +224,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
|
|||
u64 falcons;
|
||||
int ret, i;
|
||||
|
||||
if (list_empty(&acr->hsfw)) {
|
||||
if (list_empty(&acr->hsfw) || !acr->func || !acr->func->wpr_layout) {
|
||||
nvkm_debug(subdev, "No HSFW(s)\n");
|
||||
nvkm_acr_cleanup(acr);
|
||||
return 0;
|
||||
|
|
|
@ -318,10 +318,6 @@ void omap_fbdev_setup(struct drm_device *dev)
|
|||
|
||||
INIT_WORK(&fbdev->work, pan_worker);
|
||||
|
||||
ret = omap_fbdev_client_hotplug(&helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -2178,6 +2178,7 @@ static const struct panel_desc innolux_at043tn24 = {
|
|||
.height = 54,
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
||||
.connector_type = DRM_MODE_CONNECTOR_DPI,
|
||||
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
|
||||
};
|
||||
|
||||
|
@ -3202,6 +3203,7 @@ static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
|
|||
.vsync_start = 480 + 49,
|
||||
.vsync_end = 480 + 49 + 2,
|
||||
.vtotal = 480 + 49 + 2 + 22,
|
||||
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
|
||||
};
|
||||
|
||||
static const struct panel_desc powertip_ph800480t013_idf02 = {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue