Merge 6.3-rc6 into usb-next

We need the USB fixes in here for testing, and this resolves two merge
conflicts, one pointed out by linux-next:

	drivers/usb/dwc3/dwc3-pci.c
	drivers/usb/host/xhci-pci.c

Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2023-04-10 08:56:59 +02:00
commit 8e86652e3e
401 changed files with 3820 additions and 1922 deletions

View File

@ -265,7 +265,9 @@ Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com> Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org> Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
Leonard Crestez <leonard.crestez@nxp.com> Leonard Crestez <cdleonard@gmail.com>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com> Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonard Göhrs <l.goehrs@pengutronix.de>
Leonid I Ananiev <leonid.i.ananiev@intel.com> Leonid I Ananiev <leonid.i.ananiev@intel.com>
Leon Romanovsky <leon@kernel.org> <leon@leon.nu> Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com> Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>

View File

@ -36,7 +36,6 @@ problems and bugs in particular.
reporting-issues reporting-issues
reporting-regressions reporting-regressions
security-bugs
bug-hunting bug-hunting
bug-bisect bug-bisect
tainted-kernels tainted-kernels

View File

@ -395,7 +395,7 @@ might want to be aware of; it for example explains how to add your issue to the
list of tracked regressions, to ensure it won't fall through the cracks. list of tracked regressions, to ensure it won't fall through the cracks.
What qualifies as security issue is left to your judgment. Consider reading What qualifies as security issue is left to your judgment. Consider reading
Documentation/admin-guide/security-bugs.rst before proceeding, as it Documentation/process/security-bugs.rst before proceeding, as it
provides additional details how to best handle security issues. provides additional details how to best handle security issues.
An issue is a 'really severe problem' when something totally unacceptably bad An issue is a 'really severe problem' when something totally unacceptably bad
@ -1269,7 +1269,7 @@ them when sending the report by mail. If you filed it in a bug tracker, forward
the report's text to these addresses; but on top of it put a small note where the report's text to these addresses; but on top of it put a small note where
you mention that you filed it with a link to the ticket. you mention that you filed it with a link to the ticket.
See Documentation/admin-guide/security-bugs.rst for more information. See Documentation/process/security-bugs.rst for more information.
Duties after the report went out Duties after the report went out

View File

@ -76,6 +76,13 @@ properties:
If "broken-flash-reset" is present then having this property does not If "broken-flash-reset" is present then having this property does not
make any difference. make any difference.
spi-cpol: true
spi-cpha: true
dependencies:
spi-cpol: [ spi-cpha ]
spi-cpha: [ spi-cpol ]
unevaluatedProperties: false unevaluatedProperties: false
examples: examples:

View File

@ -96,9 +96,11 @@ $defs:
2: Lower Slew rate (slower edges) 2: Lower Slew rate (slower edges)
3: Reserved (No adjustments) 3: Reserved (No adjustments)
bias-bus-hold: true
bias-pull-down: true bias-pull-down: true
bias-pull-up: true bias-pull-up: true
bias-disable: true bias-disable: true
input-enable: true
output-high: true output-high: true
output-low: true output-low: true

View File

@ -92,7 +92,7 @@ properties:
- description: Error interrupt - description: Error interrupt
- description: Receive buffer full interrupt - description: Receive buffer full interrupt
- description: Transmit buffer empty interrupt - description: Transmit buffer empty interrupt
- description: Transmit End interrupt - description: Break interrupt
- items: - items:
- description: Error interrupt - description: Error interrupt
- description: Receive buffer full interrupt - description: Receive buffer full interrupt
@ -107,7 +107,7 @@ properties:
- const: eri - const: eri
- const: rxi - const: rxi
- const: txi - const: txi
- const: tei - const: bri
- items: - items:
- const: eri - const: eri
- const: rxi - const: rxi

View File

@ -39,13 +39,12 @@ With CONFIG_ZSMALLOC_STAT, we could see zsmalloc internal information via
# cat /sys/kernel/debug/zsmalloc/zram0/classes # cat /sys/kernel/debug/zsmalloc/zram0/classes
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage class size 10% 20% 30% 40% 50% 60% 70% 80% 90% 99% 100% obj_allocated obj_used pages_used pages_per_zspage freeable
... ...
... ...
9 176 0 1 186 129 8 4 30 512 0 12 4 1 0 1 0 0 1 0 414 3464 3346 433 1 14
10 192 1 0 2880 2872 135 3 31 528 2 7 2 2 1 0 1 0 0 2 117 4154 3793 536 4 44
11 208 0 1 819 795 42 2 32 544 6 3 4 1 2 1 0 0 0 1 260 4170 3965 556 2 26
12 224 0 1 219 159 12 4
... ...
... ...
@ -54,10 +53,28 @@ class
index index
size size
object size zspage stores object size zspage stores
almost_empty 10%
the number of ZS_ALMOST_EMPTY zspages(see below) the number of zspages with usage ratio less than 10% (see below)
almost_full 20%
the number of ZS_ALMOST_FULL zspages(see below) the number of zspages with usage ratio between 10% and 20%
30%
the number of zspages with usage ratio between 20% and 30%
40%
the number of zspages with usage ratio between 30% and 40%
50%
the number of zspages with usage ratio between 40% and 50%
60%
the number of zspages with usage ratio between 50% and 60%
70%
the number of zspages with usage ratio between 60% and 70%
80%
the number of zspages with usage ratio between 70% and 80%
90%
the number of zspages with usage ratio between 80% and 90%
99%
the number of zspages with usage ratio between 90% and 99%
100%
the number of zspages with usage ratio 100%
obj_allocated obj_allocated
the number of objects allocated the number of objects allocated
obj_used obj_used
@ -66,19 +83,14 @@ pages_used
the number of pages allocated for the class the number of pages allocated for the class
pages_per_zspage pages_per_zspage
the number of 0-order pages to make a zspage the number of 0-order pages to make a zspage
freeable
the approximate number of pages class compaction can free
We assign a zspage to ZS_ALMOST_EMPTY fullness group when n <= N / f, where Each zspage maintains inuse counter which keeps track of the number of
objects stored in the zspage. The inuse counter determines the zspage's
* n = number of allocated objects "fullness group" which is calculated as the ratio of the "inuse" objects to
* N = total number of objects zspage can store the total number of objects the zspage can hold (objs_per_zspage). The
* f = fullness_threshold_frac(ie, 4 at the moment) closer the inuse counter is to objs_per_zspage, the better.
Similarly, we assign zspage to:
* ZS_ALMOST_FULL when n > N / f
* ZS_EMPTY when n == 0
* ZS_FULL when n == N
Internals Internals
========= =========
@ -94,10 +106,10 @@ of objects that each zspage can store.
For instance, consider the following size classes::: For instance, consider the following size classes:::
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
... ...
94 1536 0 0 0 0 0 3 0 94 1536 0 .... 0 0 0 0 3 0
100 1632 0 0 0 0 0 2 0 100 1632 0 .... 0 0 0 0 2 0
... ...
@ -134,10 +146,11 @@ reduces memory wastage.
Let's take a closer look at the bottom of `/sys/kernel/debug/zsmalloc/zramX/classes`::: Let's take a closer look at the bottom of `/sys/kernel/debug/zsmalloc/zramX/classes`:::
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
... ...
202 3264 0 0 0 0 0 4 0 202 3264 0 .. 0 0 0 0 4 0
254 4096 0 0 0 0 0 1 0 254 4096 0 .. 0 0 0 0 1 0
... ...
Size class #202 stores objects of size 3264 bytes and has a maximum of 4 pages Size class #202 stores objects of size 3264 bytes and has a maximum of 4 pages
@ -151,40 +164,42 @@ efficient storage of large objects.
For zspage chain size of 8, huge class watermark becomes 3632 bytes::: For zspage chain size of 8, huge class watermark becomes 3632 bytes:::
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
... ...
202 3264 0 0 0 0 0 4 0 202 3264 0 .. 0 0 0 0 4 0
211 3408 0 0 0 0 0 5 0 211 3408 0 .. 0 0 0 0 5 0
217 3504 0 0 0 0 0 6 0 217 3504 0 .. 0 0 0 0 6 0
222 3584 0 0 0 0 0 7 0 222 3584 0 .. 0 0 0 0 7 0
225 3632 0 0 0 0 0 8 0 225 3632 0 .. 0 0 0 0 8 0
254 4096 0 0 0 0 0 1 0 254 4096 0 .. 0 0 0 0 1 0
... ...
For zspage chain size of 16, huge class watermark becomes 3840 bytes::: For zspage chain size of 16, huge class watermark becomes 3840 bytes:::
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
... ...
202 3264 0 0 0 0 0 4 0 202 3264 0 .. 0 0 0 0 4 0
206 3328 0 0 0 0 0 13 0 206 3328 0 .. 0 0 0 0 13 0
207 3344 0 0 0 0 0 9 0 207 3344 0 .. 0 0 0 0 9 0
208 3360 0 0 0 0 0 14 0 208 3360 0 .. 0 0 0 0 14 0
211 3408 0 0 0 0 0 5 0 211 3408 0 .. 0 0 0 0 5 0
212 3424 0 0 0 0 0 16 0 212 3424 0 .. 0 0 0 0 16 0
214 3456 0 0 0 0 0 11 0 214 3456 0 .. 0 0 0 0 11 0
217 3504 0 0 0 0 0 6 0 217 3504 0 .. 0 0 0 0 6 0
219 3536 0 0 0 0 0 13 0 219 3536 0 .. 0 0 0 0 13 0
222 3584 0 0 0 0 0 7 0 222 3584 0 .. 0 0 0 0 7 0
223 3600 0 0 0 0 0 15 0 223 3600 0 .. 0 0 0 0 15 0
225 3632 0 0 0 0 0 8 0 225 3632 0 .. 0 0 0 0 8 0
228 3680 0 0 0 0 0 9 0 228 3680 0 .. 0 0 0 0 9 0
230 3712 0 0 0 0 0 10 0 230 3712 0 .. 0 0 0 0 10 0
232 3744 0 0 0 0 0 11 0 232 3744 0 .. 0 0 0 0 11 0
234 3776 0 0 0 0 0 12 0 234 3776 0 .. 0 0 0 0 12 0
235 3792 0 0 0 0 0 13 0 235 3792 0 .. 0 0 0 0 13 0
236 3808 0 0 0 0 0 14 0 236 3808 0 .. 0 0 0 0 14 0
238 3840 0 0 0 0 0 15 0 238 3840 0 .. 0 0 0 0 15 0
254 4096 0 0 0 0 0 1 0 254 4096 0 .. 0 0 0 0 1 0
... ...
Overall the combined zspage chain size effect on zsmalloc pool configuration::: Overall the combined zspage chain size effect on zsmalloc pool configuration:::
@ -214,9 +229,10 @@ zram as a build artifacts storage (Linux kernel compilation).
zsmalloc classes stats::: zsmalloc classes stats:::
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
... ...
Total 13 51 413836 412973 159955 3 Total 13 .. 51 413836 412973 159955 3
zram mm_stat::: zram mm_stat:::
@ -227,9 +243,10 @@ zram as a build artifacts storage (Linux kernel compilation).
zsmalloc classes stats::: zsmalloc classes stats:::
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
... ...
Total 18 87 414852 412978 156666 0 Total 18 .. 87 414852 412978 156666 0
zram mm_stat::: zram mm_stat:::

View File

@ -138,7 +138,7 @@ required reading:
philosophy and is very important for people moving to Linux from philosophy and is very important for people moving to Linux from
development on other Operating Systems. development on other Operating Systems.
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>` :ref:`Documentation/process/security-bugs.rst <securitybugs>`
If you feel you have found a security problem in the Linux kernel, If you feel you have found a security problem in the Linux kernel,
please follow the steps in this document to help notify the kernel please follow the steps in this document to help notify the kernel
developers, and help solve the issue. developers, and help solve the issue.

View File

@ -35,6 +35,14 @@ Below are the essential guides that every developer should read.
kernel-enforcement-statement kernel-enforcement-statement
kernel-driver-statement kernel-driver-statement
For security issues, see:
.. toctree::
:maxdepth: 1
security-bugs
embargoed-hardware-issues
Other guides to the community that are of interest to most developers are: Other guides to the community that are of interest to most developers are:
.. toctree:: .. toctree::
@ -47,7 +55,6 @@ Other guides to the community that are of interest to most developers are:
submit-checklist submit-checklist
kernel-docs kernel-docs
deprecated deprecated
embargoed-hardware-issues
maintainers maintainers
researcher-guidelines researcher-guidelines

View File

@ -68,7 +68,7 @@ Before contributing, carefully read the appropriate documentation:
* Documentation/process/development-process.rst * Documentation/process/development-process.rst
* Documentation/process/submitting-patches.rst * Documentation/process/submitting-patches.rst
* Documentation/admin-guide/reporting-issues.rst * Documentation/admin-guide/reporting-issues.rst
* Documentation/admin-guide/security-bugs.rst * Documentation/process/security-bugs.rst
Then send a patch (including a commit log with all the details listed Then send a patch (including a commit log with all the details listed
below) and follow up on any feedback from other developers. below) and follow up on any feedback from other developers.

View File

@ -39,7 +39,7 @@ Procedure for submitting patches to the -stable tree
Security patches should not be handled (solely) by the -stable review Security patches should not be handled (solely) by the -stable review
process but should follow the procedures in process but should follow the procedures in
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`. :ref:`Documentation/process/security-bugs.rst <securitybugs>`.
For all other submissions, choose one of the following procedures For all other submissions, choose one of the following procedures
----------------------------------------------------------------- -----------------------------------------------------------------

View File

@ -254,7 +254,7 @@ If you have a patch that fixes an exploitable security bug, send that patch
to security@kernel.org. For severe bugs, a short embargo may be considered to security@kernel.org. For severe bugs, a short embargo may be considered
to allow distributors to get the patch out to users; in such cases, to allow distributors to get the patch out to users; in such cases,
obviously, the patch should not be sent to any public lists. See also obviously, the patch should not be sent to any public lists. See also
Documentation/admin-guide/security-bugs.rst. Documentation/process/security-bugs.rst.
Patches that fix a severe bug in a released kernel should be directed Patches that fix a severe bug in a released kernel should be directed
toward the stable maintainers by putting a line like this:: toward the stable maintainers by putting a line like this::

View File

@ -1,6 +1,6 @@
.. include:: ../disclaimer-ita.rst .. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>` :Original: :ref:`Documentation/process/security-bugs.rst <securitybugs>`
.. _it_securitybugs: .. _it_securitybugs:

View File

@ -272,7 +272,7 @@ embargo potrebbe essere preso in considerazione per dare il tempo alle
distribuzioni di prendere la patch e renderla disponibile ai loro utenti; distribuzioni di prendere la patch e renderla disponibile ai loro utenti;
in questo caso, ovviamente, la patch non dovrebbe essere inviata su alcuna in questo caso, ovviamente, la patch non dovrebbe essere inviata su alcuna
lista di discussione pubblica. Leggete anche lista di discussione pubblica. Leggete anche
Documentation/admin-guide/security-bugs.rst. Documentation/process/security-bugs.rst.
Patch che correggono bachi importanti su un kernel già rilasciato, dovrebbero Patch che correggono bachi importanti su un kernel già rilasciato, dovrebbero
essere inviate ai manutentori dei kernel stabili aggiungendo la seguente riga:: essere inviate ai manutentori dei kernel stabili aggiungendo la seguente riga::

View File

@ -167,7 +167,7 @@ linux-api@vger.kernel.org に送ることを勧めます。
このドキュメントは Linux 開発の思想を理解するのに非常に重要です。 このドキュメントは Linux 開発の思想を理解するのに非常に重要です。
そして、他のOSでの開発者が Linux に移る時にとても重要です。 そして、他のOSでの開発者が Linux に移る時にとても重要です。
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>` :ref:`Documentation/process/security-bugs.rst <securitybugs>`
もし Linux カーネルでセキュリティ問題を発見したように思ったら、こ もし Linux カーネルでセキュリティ問題を発見したように思ったら、こ
のドキュメントのステップに従ってカーネル開発者に連絡し、問題解決を のドキュメントのステップに従ってカーネル開発者に連絡し、問題解決を
支援してください。 支援してください。

View File

@ -157,7 +157,7 @@ mtk.manpages@gmail.com의 메인테이너에게 보낼 것을 권장한다.
리눅스로 전향하는 사람들에게는 매우 중요하다. 리눅스로 전향하는 사람들에게는 매우 중요하다.
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>` :ref:`Documentation/process/security-bugs.rst <securitybugs>`
여러분들이 리눅스 커널의 보안 문제를 발견했다고 생각한다면 이 문서에 여러분들이 리눅스 커널의 보안 문제를 발견했다고 생각한다면 이 문서에
나온 단계에 따라서 커널 개발자들에게 알리고 그 문제를 해결할 수 있도록 나온 단계에 따라서 커널 개발자들에게 알리고 그 문제를 해결할 수 있도록
도와 달라. 도와 달라.

View File

@ -135,7 +135,7 @@ de obligada lectura:
de Linux y es muy importante para las personas que se mudan a Linux de Linux y es muy importante para las personas que se mudan a Linux
tras desarrollar otros sistemas operativos. tras desarrollar otros sistemas operativos.
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>` :ref:`Documentation/process/security-bugs.rst <securitybugs>`
Si cree que ha encontrado un problema de seguridad en el kernel de Si cree que ha encontrado un problema de seguridad en el kernel de
Linux, siga los pasos de este documento para ayudar a notificar a los Linux, siga los pasos de este documento para ayudar a notificar a los
desarrolladores del kernel y ayudar a resolver el problema. desarrolladores del kernel y ayudar a resolver el problema.

View File

@ -276,7 +276,7 @@ parche a security@kernel.org. Para errores graves, se debe mantener un
poco de discreción y permitir que los distribuidores entreguen el parche a poco de discreción y permitir que los distribuidores entreguen el parche a
los usuarios; en esos casos, obviamente, el parche no debe enviarse a los usuarios; en esos casos, obviamente, el parche no debe enviarse a
ninguna lista pública. Revise también ninguna lista pública. Revise también
Documentation/admin-guide/security-bugs.rst. Documentation/process/security-bugs.rst.
Los parches que corrigen un error grave en un kernel en uso deben dirigirse Los parches que corrigen un error grave en un kernel en uso deben dirigirse
hacia los maintainers estables poniendo una línea como esta:: hacia los maintainers estables poniendo una línea como esta::

View File

@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst .. include:: ../disclaimer-zh_CN.rst
:Original: :doc:`../../../admin-guide/security-bugs` :Original: :doc:`../../../process/security-bugs`
:译者: :译者:

View File

@ -125,7 +125,7 @@ Linux内核代码中包含有大量的文档。这些文档对于学习如何与
这篇文档对于理解Linux的开发哲学至关重要。对于将开发平台从其他操作系 这篇文档对于理解Linux的开发哲学至关重要。对于将开发平台从其他操作系
统转移到Linux的人来说也很重要。 统转移到Linux的人来说也很重要。
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>` :ref:`Documentation/process/security-bugs.rst <securitybugs>`
如果你认为自己发现了Linux内核的安全性问题请根据这篇文档中的步骤来 如果你认为自己发现了Linux内核的安全性问题请根据这篇文档中的步骤来
提醒其他内核开发者并帮助解决这个问题。 提醒其他内核开发者并帮助解决这个问题。

View File

@ -2,7 +2,7 @@
.. include:: ../disclaimer-zh_TW.rst .. include:: ../disclaimer-zh_TW.rst
:Original: :doc:`../../../admin-guide/security-bugs` :Original: :doc:`../../../process/security-bugs`
:譯者: :譯者:

View File

@ -128,7 +128,7 @@ Linux內核代碼中包含有大量的文檔。這些文檔對於學習如何與
這篇文檔對於理解Linux的開發哲學至關重要。對於將開發平台從其他操作系 這篇文檔對於理解Linux的開發哲學至關重要。對於將開發平台從其他操作系
統轉移到Linux的人來說也很重要。 統轉移到Linux的人來說也很重要。
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>` :ref:`Documentation/process/security-bugs.rst <securitybugs>`
如果你認爲自己發現了Linux內核的安全性問題請根據這篇文檔中的步驟來 如果你認爲自己發現了Linux內核的安全性問題請根據這篇文檔中的步驟來
提醒其他內核開發者並幫助解決這個問題。 提醒其他內核開發者並幫助解決這個問題。

View File

@ -8296,11 +8296,11 @@ ENOSYS for the others.
8.35 KVM_CAP_PMU_CAPABILITY 8.35 KVM_CAP_PMU_CAPABILITY
--------------------------- ---------------------------
:Capability KVM_CAP_PMU_CAPABILITY :Capability: KVM_CAP_PMU_CAPABILITY
:Architectures: x86 :Architectures: x86
:Type: vm :Type: vm
:Parameters: arg[0] is bitmask of PMU virtualization capabilities. :Parameters: arg[0] is bitmask of PMU virtualization capabilities.
:Returns 0 on success, -EINVAL when arg[0] contains invalid bits :Returns: 0 on success, -EINVAL when arg[0] contains invalid bits
This capability alters PMU virtualization in KVM. This capability alters PMU virtualization in KVM.

View File

@ -73,7 +73,7 @@ Tips for patch submitters
and ideally, should come with a patch proposal. Please do not send and ideally, should come with a patch proposal. Please do not send
automated reports to this list either. Such bugs will be handled automated reports to this list either. Such bugs will be handled
better and faster in the usual public places. See better and faster in the usual public places. See
Documentation/admin-guide/security-bugs.rst for details. Documentation/process/security-bugs.rst for details.
8. Happy hacking. 8. Happy hacking.
@ -8216,6 +8216,7 @@ F: drivers/net/ethernet/freescale/dpaa
FREESCALE QORIQ DPAA FMAN DRIVER FREESCALE QORIQ DPAA FMAN DRIVER
M: Madalin Bucur <madalin.bucur@nxp.com> M: Madalin Bucur <madalin.bucur@nxp.com>
R: Sean Anderson <sean.anderson@seco.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/net/fsl-fman.txt F: Documentation/devicetree/bindings/net/fsl-fman.txt
@ -14656,10 +14657,8 @@ F: net/ipv4/nexthop.c
NFC SUBSYSTEM NFC SUBSYSTEM
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org> M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
L: linux-nfc@lists.01.org (subscribers-only)
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
B: mailto:linux-nfc@lists.01.org
F: Documentation/devicetree/bindings/net/nfc/ F: Documentation/devicetree/bindings/net/nfc/
F: drivers/nfc/ F: drivers/nfc/
F: include/linux/platform_data/nfcmrvl.h F: include/linux/platform_data/nfcmrvl.h
@ -14670,7 +14669,6 @@ F: net/nfc/
NFC VIRTUAL NCI DEVICE DRIVER NFC VIRTUAL NCI DEVICE DRIVER
M: Bongsu Jeon <bongsu.jeon@samsung.com> M: Bongsu Jeon <bongsu.jeon@samsung.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: linux-nfc@lists.01.org (subscribers-only)
S: Supported S: Supported
F: drivers/nfc/virtual_ncidev.c F: drivers/nfc/virtual_ncidev.c
F: tools/testing/selftests/nci/ F: tools/testing/selftests/nci/
@ -15042,7 +15040,6 @@ F: Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
F: sound/soc/codecs/tfa989x.c F: sound/soc/codecs/tfa989x.c
NXP-NCI NFC DRIVER NXP-NCI NFC DRIVER
L: linux-nfc@lists.01.org (subscribers-only)
S: Orphan S: Orphan
F: Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml F: Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
F: drivers/nfc/nxp-nci F: drivers/nfc/nxp-nci
@ -18291,8 +18288,9 @@ F: drivers/s390/block/dasd*
F: include/linux/dasd_mod.h F: include/linux/dasd_mod.h
S390 IOMMU (PCI) S390 IOMMU (PCI)
M: Niklas Schnelle <schnelle@linux.ibm.com>
M: Matthew Rosato <mjrosato@linux.ibm.com> M: Matthew Rosato <mjrosato@linux.ibm.com>
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com> R: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
L: linux-s390@vger.kernel.org L: linux-s390@vger.kernel.org
S: Supported S: Supported
F: drivers/iommu/s390-iommu.c F: drivers/iommu/s390-iommu.c
@ -18487,7 +18485,6 @@ F: include/media/drv-intf/s3c_camif.h
SAMSUNG S3FWRN5 NFC DRIVER SAMSUNG S3FWRN5 NFC DRIVER
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org> M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
L: linux-nfc@lists.01.org (subscribers-only)
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
F: drivers/nfc/s3fwrn5 F: drivers/nfc/s3fwrn5
@ -18802,7 +18799,7 @@ F: include/uapi/linux/sed*
SECURITY CONTACT SECURITY CONTACT
M: Security Officers <security@kernel.org> M: Security Officers <security@kernel.org>
S: Supported S: Supported
F: Documentation/admin-guide/security-bugs.rst F: Documentation/process/security-bugs.rst
SECURITY SUBSYSTEM SECURITY SUBSYSTEM
M: Paul Moore <paul@paul-moore.com> M: Paul Moore <paul@paul-moore.com>
@ -20645,7 +20642,6 @@ F: sound/soc/codecs/tscs*.h
TENSILICA XTENSA PORT (xtensa) TENSILICA XTENSA PORT (xtensa)
M: Chris Zankel <chris@zankel.net> M: Chris Zankel <chris@zankel.net>
M: Max Filippov <jcmvbkbc@gmail.com> M: Max Filippov <jcmvbkbc@gmail.com>
L: linux-xtensa@linux-xtensa.org
S: Maintained S: Maintained
T: git https://github.com/jcmvbkbc/linux-xtensa.git T: git https://github.com/jcmvbkbc/linux-xtensa.git
F: arch/xtensa/ F: arch/xtensa/
@ -20981,7 +20977,6 @@ F: drivers/iio/magnetometer/tmag5273.c
TI TRF7970A NFC DRIVER TI TRF7970A NFC DRIVER
M: Mark Greer <mgreer@animalcreek.com> M: Mark Greer <mgreer@animalcreek.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
L: linux-nfc@lists.01.org (subscribers-only)
S: Supported S: Supported
F: Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml F: Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
F: drivers/nfc/trf7970a.c F: drivers/nfc/trf7970a.c
@ -23038,7 +23033,6 @@ F: drivers/gpio/gpio-xra1403.c
XTENSA XTFPGA PLATFORM SUPPORT XTENSA XTFPGA PLATFORM SUPPORT
M: Max Filippov <jcmvbkbc@gmail.com> M: Max Filippov <jcmvbkbc@gmail.com>
L: linux-xtensa@linux-xtensa.org
S: Maintained S: Maintained
F: drivers/spi/spi-xtensa-xtfpga.c F: drivers/spi/spi-xtensa-xtfpga.c
F: sound/soc/xtensa/xtfpga-i2s.c F: sound/soc/xtensa/xtfpga-i2s.c

View File

@ -2,7 +2,7 @@
VERSION = 6 VERSION = 6
PATCHLEVEL = 3 PATCHLEVEL = 3
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc6
NAME = Hurr durr I'ma ninja sloth NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -314,36 +314,32 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs); int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
unsigned int type; unsigned int type;
u32 instr = 0; u32 instr = 0;
u16 tinstr = 0;
int isize = 4; int isize = 4;
int thumb2_32b = 0; int thumb2_32b = 0;
int fault;
instrptr = instruction_pointer(regs); instrptr = instruction_pointer(regs);
if (compat_thumb_mode(regs)) { if (compat_thumb_mode(regs)) {
__le16 __user *ptr = (__le16 __user *)(instrptr & ~1); __le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
u16 tinstr, tinst2;
fault = alignment_get_thumb(regs, ptr, &tinstr); if (alignment_get_thumb(regs, ptr, &tinstr))
if (!fault) { return 1;
if (IS_T32(tinstr)) {
/* Thumb-2 32-bit */ if (IS_T32(tinstr)) { /* Thumb-2 32-bit */
u16 tinst2; if (alignment_get_thumb(regs, ptr + 1, &tinst2))
fault = alignment_get_thumb(regs, ptr + 1, &tinst2); return 1;
instr = ((u32)tinstr << 16) | tinst2; instr = ((u32)tinstr << 16) | tinst2;
thumb2_32b = 1; thumb2_32b = 1;
} else { } else {
isize = 2; isize = 2;
instr = thumb2arm(tinstr); instr = thumb2arm(tinstr);
}
} }
} else { } else {
fault = alignment_get_arm(regs, (__le32 __user *)instrptr, &instr); if (alignment_get_arm(regs, (__le32 __user *)instrptr, &instr))
return 1;
} }
if (fault)
return 1;
switch (CODING_BITS(instr)) { switch (CODING_BITS(instr)) {
case 0x00000000: /* 3.13.4 load/store instruction extensions */ case 0x00000000: /* 3.13.4 load/store instruction extensions */
if (LDSTHD_I_BIT(instr)) if (LDSTHD_I_BIT(instr))

View File

@ -220,6 +220,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_VCPU_ATTRIBUTES:
case KVM_CAP_PTP_KVM: case KVM_CAP_PTP_KVM:
case KVM_CAP_ARM_SYSTEM_SUSPEND: case KVM_CAP_ARM_SYSTEM_SUSPEND:
case KVM_CAP_IRQFD_RESAMPLE:
r = 1; r = 1;
break; break;
case KVM_CAP_SET_GUEST_DEBUG2: case KVM_CAP_SET_GUEST_DEBUG2:

View File

@ -666,14 +666,33 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
CONFIG_PGTABLE_LEVELS), CONFIG_PGTABLE_LEVELS),
.mm_ops = &kvm_user_mm_ops, .mm_ops = &kvm_user_mm_ops,
}; };
unsigned long flags;
kvm_pte_t pte = 0; /* Keep GCC quiet... */ kvm_pte_t pte = 0; /* Keep GCC quiet... */
u32 level = ~0; u32 level = ~0;
int ret; int ret;
/*
* Disable IRQs so that we hazard against a concurrent
* teardown of the userspace page tables (which relies on
* IPI-ing threads).
*/
local_irq_save(flags);
ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
VM_BUG_ON(ret); local_irq_restore(flags);
VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
VM_BUG_ON(!(pte & PTE_VALID)); if (ret)
return ret;
/*
* Not seeing an error, but not updating level? Something went
* deeply wrong...
*/
if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
return -EFAULT;
/* Oops, the userspace PTs are gone... Replay the fault */
if (!kvm_pte_valid(pte))
return -EAGAIN;
return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
} }
@ -1079,7 +1098,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
* *
* Returns the size of the mapping. * Returns the size of the mapping.
*/ */
static unsigned long static long
transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long hva, kvm_pfn_t *pfnp, unsigned long hva, kvm_pfn_t *pfnp,
phys_addr_t *ipap) phys_addr_t *ipap)
@ -1091,8 +1110,15 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
* sure that the HVA and IPA are sufficiently aligned and that the * sure that the HVA and IPA are sufficiently aligned and that the
* block map is contained within the memslot. * block map is contained within the memslot.
*/ */
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
get_user_mapping_size(kvm, hva) >= PMD_SIZE) { int sz = get_user_mapping_size(kvm, hva);
if (sz < 0)
return sz;
if (sz < PMD_SIZE)
return PAGE_SIZE;
/* /*
* The address we faulted on is backed by a transparent huge * The address we faulted on is backed by a transparent huge
* page. However, because we map the compound huge page and * page. However, because we map the compound huge page and
@ -1192,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
{ {
int ret = 0; int ret = 0;
bool write_fault, writable, force_pte = false; bool write_fault, writable, force_pte = false;
bool exec_fault; bool exec_fault, mte_allowed;
bool device = false; bool device = false;
unsigned long mmu_seq; unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
@ -1203,7 +1229,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_pfn_t pfn; kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot); bool logging_active = memslot_is_logging(memslot);
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule; long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt; struct kvm_pgtable *pgt;
@ -1217,6 +1243,20 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT; return -EFAULT;
} }
/*
* Permission faults just need to update the existing leaf entry,
* and so normally don't require allocations from the memcache. The
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
if (fault_status != ESR_ELx_FSC_PERM ||
(logging_active && write_fault)) {
ret = kvm_mmu_topup_memory_cache(memcache,
kvm_mmu_cache_min_pages(kvm));
if (ret)
return ret;
}
/* /*
* Let's check if we will get back a huge page backed by hugetlbfs, or * Let's check if we will get back a huge page backed by hugetlbfs, or
* get block mapping for device MMIO region. * get block mapping for device MMIO region.
@ -1269,37 +1309,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
fault_ipa &= ~(vma_pagesize - 1); fault_ipa &= ~(vma_pagesize - 1);
gfn = fault_ipa >> PAGE_SHIFT; gfn = fault_ipa >> PAGE_SHIFT;
mmap_read_unlock(current->mm); mte_allowed = kvm_vma_mte_allowed(vma);
/* Don't use the VMA after the unlock -- it may have vanished */
vma = NULL;
/* /*
* Permission faults just need to update the existing leaf entry, * Read mmu_invalidate_seq so that KVM can detect if the results of
* and so normally don't require allocations from the memcache. The * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to
* only exception to this is when dirty logging is enabled at runtime * acquiring kvm->mmu_lock.
* and a write fault needs to collapse a block entry into a table.
*/
if (fault_status != ESR_ELx_FSC_PERM ||
(logging_active && write_fault)) {
ret = kvm_mmu_topup_memory_cache(memcache,
kvm_mmu_cache_min_pages(kvm));
if (ret)
return ret;
}
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
/*
* Ensure the read of mmu_invalidate_seq happens before we call
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
* the page we just got a reference to gets unmapped before we have a
* chance to grab the mmu_lock, which ensure that if the page gets
* unmapped afterwards, the call to kvm_unmap_gfn will take it away
* from us again properly. This smp_rmb() interacts with the smp_wmb()
* in kvm_mmu_notifier_invalidate_<page|range_end>.
* *
* Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
* used to avoid unnecessary overhead introduced to locate the memory * with the smp_wmb() in kvm_mmu_invalidate_end().
* slot because it's always fixed even @gfn is adjusted for huge pages.
*/ */
smp_rmb(); mmu_seq = vcpu->kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm);
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
write_fault, &writable, NULL); write_fault, &writable, NULL);
@ -1350,11 +1374,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_pagesize = transparent_hugepage_adjust(kvm, memslot, vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
hva, &pfn, hva, &pfn,
&fault_ipa); &fault_ipa);
if (vma_pagesize < 0) {
ret = vma_pagesize;
goto out_unlock;
}
} }
if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) { if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */ /* Check the VMM hasn't introduced a new disallowed VMA */
if (kvm_vma_mte_allowed(vma)) { if (mte_allowed) {
sanitise_mte_tags(kvm, pfn, vma_pagesize); sanitise_mte_tags(kvm, pfn, vma_pagesize);
} else { } else {
ret = -EFAULT; ret = -EFAULT;

View File

@ -538,7 +538,8 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
if (!kvm_pmu_is_3p5(vcpu)) if (!kvm_pmu_is_3p5(vcpu))
val &= ~ARMV8_PMU_PMCR_LP; val &= ~ARMV8_PMU_PMCR_LP;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val; /* The reset bits don't indicate any state, and shouldn't be saved. */
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
if (val & ARMV8_PMU_PMCR_E) { if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu, kvm_pmu_enable_counter_mask(vcpu,

View File

@ -856,6 +856,22 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
return true; return true;
} }
static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
u64 idx;
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
/* PMCCNTR_EL0 */
idx = ARMV8_PMU_CYCLE_IDX;
else
/* PMEVCNTRn_EL0 */
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
*val = kvm_pmu_get_counter_value(vcpu, idx);
return 0;
}
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
@ -1072,7 +1088,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* Macro to expand the PMEVCNTRn_EL0 register */ /* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \ #define PMU_PMEVCNTR_EL0(n) \
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \ { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
.reset = reset_pmevcntr, \ .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */ /* Macro to expand the PMEVTYPERn_EL0 register */
@ -1982,7 +1998,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(SYS_PMCEID1_EL0), { PMU_SYS_REG(SYS_PMCEID1_EL0),
.access = access_pmceid, .reset = NULL }, .access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCCNTR_EL0), { PMU_SYS_REG(SYS_PMCCNTR_EL0),
.access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 }, .access = access_pmu_evcntr, .reset = reset_unknown,
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0), { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
.access = access_pmu_evtyper, .reset = NULL }, .access = access_pmu_evtyper, .reset = NULL },
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0), { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),

View File

@ -5,6 +5,8 @@
#include <asm/bmips.h> #include <asm/bmips.h>
#include <asm/io.h> #include <asm/io.h>
bool bmips_rac_flush_disable;
void arch_sync_dma_for_cpu_all(void) void arch_sync_dma_for_cpu_all(void)
{ {
void __iomem *cbr = BMIPS_GET_CBR(); void __iomem *cbr = BMIPS_GET_CBR();
@ -15,6 +17,9 @@ void arch_sync_dma_for_cpu_all(void)
boot_cpu_type() != CPU_BMIPS4380) boot_cpu_type() != CPU_BMIPS4380)
return; return;
if (unlikely(bmips_rac_flush_disable))
return;
/* Flush stale data out of the readahead cache */ /* Flush stale data out of the readahead cache */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG); __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);

View File

@ -35,6 +35,8 @@
#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c)) #define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
#define BCM6328_TP1_DISABLED BIT(9) #define BCM6328_TP1_DISABLED BIT(9)
extern bool bmips_rac_flush_disable;
static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000; static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
struct bmips_quirk { struct bmips_quirk {
@ -104,6 +106,12 @@ static void bcm6358_quirks(void)
* disable SMP for now * disable SMP for now
*/ */
bmips_smp_enabled = 0; bmips_smp_enabled = 0;
/*
* RAC flush causes kernel panics on BCM6358 when booting from TP1
* because the bootloader is not initializing it properly.
*/
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
} }
static void bcm6368_quirks(void) static void bcm6368_quirks(void)

View File

@ -148,6 +148,11 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
*/ */
} }
static inline bool __pte_protnone(unsigned long pte)
{
return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
}
static inline bool __pte_flags_need_flush(unsigned long oldval, static inline bool __pte_flags_need_flush(unsigned long oldval,
unsigned long newval) unsigned long newval)
{ {
@ -164,8 +169,8 @@ static inline bool __pte_flags_need_flush(unsigned long oldval,
/* /*
* We do not expect kernel mappings or non-PTEs or not-present PTEs. * We do not expect kernel mappings or non-PTEs or not-present PTEs.
*/ */
VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED); VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED); VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE)); VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(newval & _PAGE_PTE)); VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT)); VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));

View File

@ -290,6 +290,9 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
static int ppr_get(struct task_struct *target, const struct user_regset *regset, static int ppr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to) struct membuf to)
{ {
if (!target->thread.regs)
return -EINVAL;
return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64)); return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64));
} }
@ -297,6 +300,9 @@ static int ppr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf, unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf) const void __user *ubuf)
{ {
if (!target->thread.regs)
return -EINVAL;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf, return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->ppr, 0, sizeof(u64)); &target->thread.regs->ppr, 0, sizeof(u64));
} }

View File

@ -576,6 +576,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break; break;
#endif #endif
#ifdef CONFIG_HAVE_KVM_IRQFD
case KVM_CAP_IRQFD_RESAMPLE:
r = !xive_enabled();
break;
#endif
case KVM_CAP_PPC_ALLOC_HTAB: case KVM_CAP_PPC_ALLOC_HTAB:
r = hv_enabled; r = hv_enabled;
break; break;

View File

@ -856,6 +856,13 @@ int pseries_vas_dlpar_cpu(void)
{ {
int new_nr_creds, rc; int new_nr_creds, rc;
/*
* NX-GZIP is not enabled. Nothing to do for DLPAR event
*/
if (!copypaste_feat)
return 0;
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat, vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
(u64)virt_to_phys(&hv_cop_caps)); (u64)virt_to_phys(&hv_cop_caps));
@ -1012,6 +1019,7 @@ static int __init pseries_vas_init(void)
* Linux supports user space COPY/PASTE only with Radix * Linux supports user space COPY/PASTE only with Radix
*/ */
if (!radix_enabled()) { if (!radix_enabled()) {
copypaste_feat = false;
pr_err("API is supported only with radix page tables\n"); pr_err("API is supported only with radix page tables\n");
return -ENOTSUPP; return -ENOTSUPP;
} }

View File

@ -126,6 +126,7 @@ config RISCV
select OF_IRQ select OF_IRQ
select PCI_DOMAINS_GENERIC if PCI select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI select PCI_MSI if PCI
select RISCV_ALTERNATIVE if !XIP_KERNEL
select RISCV_INTC select RISCV_INTC
select RISCV_TIMER if RISCV_SBI select RISCV_TIMER if RISCV_SBI
select SIFIVE_PLIC select SIFIVE_PLIC
@ -401,9 +402,8 @@ config RISCV_ISA_C
config RISCV_ISA_SVPBMT config RISCV_ISA_SVPBMT
bool "SVPBMT extension support" bool "SVPBMT extension support"
depends on 64BIT && MMU depends on 64BIT && MMU
depends on !XIP_KERNEL depends on RISCV_ALTERNATIVE
default y default y
select RISCV_ALTERNATIVE
help help
Adds support to dynamically detect the presence of the SVPBMT Adds support to dynamically detect the presence of the SVPBMT
ISA-extension (Supervisor-mode: page-based memory types) and ISA-extension (Supervisor-mode: page-based memory types) and
@ -428,8 +428,8 @@ config TOOLCHAIN_HAS_ZBB
config RISCV_ISA_ZBB config RISCV_ISA_ZBB
bool "Zbb extension support for bit manipulation instructions" bool "Zbb extension support for bit manipulation instructions"
depends on TOOLCHAIN_HAS_ZBB depends on TOOLCHAIN_HAS_ZBB
depends on !XIP_KERNEL && MMU depends on MMU
select RISCV_ALTERNATIVE depends on RISCV_ALTERNATIVE
default y default y
help help
Adds support to dynamically detect the presence of the ZBB Adds support to dynamically detect the presence of the ZBB
@ -443,9 +443,9 @@ config RISCV_ISA_ZBB
config RISCV_ISA_ZICBOM config RISCV_ISA_ZICBOM
bool "Zicbom extension support for non-coherent DMA operation" bool "Zicbom extension support for non-coherent DMA operation"
depends on !XIP_KERNEL && MMU depends on MMU
depends on RISCV_ALTERNATIVE
default y default y
select RISCV_ALTERNATIVE
select RISCV_DMA_NONCOHERENT select RISCV_DMA_NONCOHERENT
help help
Adds support to dynamically detect the presence of the ZICBOM Adds support to dynamically detect the presence of the ZICBOM

View File

@ -2,8 +2,7 @@ menu "CPU errata selection"
config ERRATA_SIFIVE config ERRATA_SIFIVE
bool "SiFive errata" bool "SiFive errata"
depends on !XIP_KERNEL depends on RISCV_ALTERNATIVE
select RISCV_ALTERNATIVE
help help
All SiFive errata Kconfig depend on this Kconfig. Disabling All SiFive errata Kconfig depend on this Kconfig. Disabling
this Kconfig will disable all SiFive errata. Please say "Y" this Kconfig will disable all SiFive errata. Please say "Y"
@ -35,8 +34,7 @@ config ERRATA_SIFIVE_CIP_1200
config ERRATA_THEAD config ERRATA_THEAD
bool "T-HEAD errata" bool "T-HEAD errata"
depends on !XIP_KERNEL depends on RISCV_ALTERNATIVE
select RISCV_ALTERNATIVE
help help
All T-HEAD errata Kconfig depend on this Kconfig. Disabling All T-HEAD errata Kconfig depend on this Kconfig. Disabling
this Kconfig will disable all T-HEAD errata. Please say "Y" this Kconfig will disable all T-HEAD errata. Please say "Y"

View File

@ -57,18 +57,31 @@ struct riscv_isa_ext_data {
unsigned int isa_ext_id; unsigned int isa_ext_id;
}; };
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
#define riscv_isa_extension_available(isa_bitmap, ext) \
__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
static __always_inline bool static __always_inline bool
riscv_has_extension_likely(const unsigned long ext) riscv_has_extension_likely(const unsigned long ext)
{ {
compiletime_assert(ext < RISCV_ISA_EXT_MAX, compiletime_assert(ext < RISCV_ISA_EXT_MAX,
"ext must be < RISCV_ISA_EXT_MAX"); "ext must be < RISCV_ISA_EXT_MAX");
asm_volatile_goto( if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) asm_volatile_goto(
: ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
: [ext] "i" (ext) :
: : [ext] "i" (ext)
: l_no); :
: l_no);
} else {
if (!__riscv_isa_extension_available(NULL, ext))
goto l_no;
}
return true; return true;
l_no: l_no:
@ -81,26 +94,23 @@ riscv_has_extension_unlikely(const unsigned long ext)
compiletime_assert(ext < RISCV_ISA_EXT_MAX, compiletime_assert(ext < RISCV_ISA_EXT_MAX,
"ext must be < RISCV_ISA_EXT_MAX"); "ext must be < RISCV_ISA_EXT_MAX");
asm_volatile_goto( if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) asm_volatile_goto(
: ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
: [ext] "i" (ext) :
: : [ext] "i" (ext)
: l_yes); :
: l_yes);
} else {
if (__riscv_isa_extension_available(NULL, ext))
goto l_yes;
}
return false; return false;
l_yes: l_yes:
return true; return true;
} }
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
#define riscv_isa_extension_available(isa_bitmap, ext) \
__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
#endif #endif
#endif /* _ASM_RISCV_HWCAP_H */ #endif /* _ASM_RISCV_HWCAP_H */

View File

@ -147,10 +147,8 @@ static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
return; return;
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
if (delta_ns) { hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); t->next_set = true;
t->next_set = true;
}
} }
static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu) static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)

View File

@ -162,7 +162,7 @@ vdso_prepare: prepare0
ifdef CONFIG_EXPOLINE_EXTERN ifdef CONFIG_EXPOLINE_EXTERN
modules_prepare: expoline_prepare modules_prepare: expoline_prepare
expoline_prepare: expoline_prepare: scripts
$(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o $(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o
endif endif
endif endif

View File

@ -474,9 +474,7 @@ long arch_ptrace(struct task_struct *child, long request,
} }
return 0; return 0;
case PTRACE_GET_LAST_BREAK: case PTRACE_GET_LAST_BREAK:
put_user(child->thread.last_break, return put_user(child->thread.last_break, (unsigned long __user *)data);
(unsigned long __user *) data);
return 0;
case PTRACE_ENABLE_TE: case PTRACE_ENABLE_TE:
if (!MACHINE_HAS_TE) if (!MACHINE_HAS_TE)
return -EIO; return -EIO;
@ -824,9 +822,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
} }
return 0; return 0;
case PTRACE_GET_LAST_BREAK: case PTRACE_GET_LAST_BREAK:
put_user(child->thread.last_break, return put_user(child->thread.last_break, (unsigned int __user *)data);
(unsigned int __user *) data);
return 0;
} }
return compat_ptrace_request(child, request, addr, data); return compat_ptrace_request(child, request, addr, data);
} }

View File

@ -271,10 +271,18 @@ static int handle_prog(struct kvm_vcpu *vcpu)
* handle_external_interrupt - used for external interruption interceptions * handle_external_interrupt - used for external interruption interceptions
* @vcpu: virtual cpu * @vcpu: virtual cpu
* *
* This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if * This interception occurs if:
* the new PSW does not have external interrupts disabled. In the first case, * - the CPUSTAT_EXT_INT bit was already set when the external interrupt
* we've got to deliver the interrupt manually, and in the second case, we * occurred. In this case, the interrupt needs to be injected manually to
* drop to userspace to handle the situation there. * preserve interrupt priority.
* - the external new PSW has external interrupts enabled, which will cause an
* interruption loop. We drop to userspace in this case.
*
* The latter case can be detected by inspecting the external mask bit in the
* external new psw.
*
* Under PV, only the latter case can occur, since interrupt priorities are
* handled in the ultravisor.
*/ */
static int handle_external_interrupt(struct kvm_vcpu *vcpu) static int handle_external_interrupt(struct kvm_vcpu *vcpu)
{ {
@ -285,10 +293,18 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
vcpu->stat.exit_external_interrupt++; vcpu->stat.exit_external_interrupt++;
rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); if (kvm_s390_pv_cpu_is_protected(vcpu)) {
if (rc) newpsw = vcpu->arch.sie_block->gpsw;
return rc; } else {
/* We can not handle clock comparator or timer interrupt with bad PSW */ rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
if (rc)
return rc;
}
/*
* Clock comparator or timer interrupt with external interrupt enabled
* will cause interrupt loop. Drop to userspace.
*/
if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) && if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
(newpsw.mask & PSW_MASK_EXT)) (newpsw.mask & PSW_MASK_EXT))
return -EOPNOTSUPP; return -EOPNOTSUPP;

View File

@ -573,6 +573,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_VCPU_RESETS: case KVM_CAP_S390_VCPU_RESETS:
case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_S390_DIAG318: case KVM_CAP_S390_DIAG318:
case KVM_CAP_IRQFD_RESAMPLE:
r = 1; r = 1;
break; break;
case KVM_CAP_SET_GUEST_DEBUG2: case KVM_CAP_SET_GUEST_DEBUG2:

View File

@ -172,7 +172,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
"4: slgr %0,%0\n" "4: slgr %0,%0\n"
"5:\n" "5:\n"
EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b) EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) : "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
: "a" (empty_zero_page), [spec] "d" (spec.val) : "a" (empty_zero_page), [spec] "d" (spec.val)
: "cc", "memory", "0"); : "cc", "memory", "0");
return size; return size;

View File

@ -125,6 +125,8 @@
#define INTEL_FAM6_LUNARLAKE_M 0xBD #define INTEL_FAM6_LUNARLAKE_M 0xBD
#define INTEL_FAM6_ARROWLAKE 0xC6
/* "Small Core" Processors (Atom/E-Core) */ /* "Small Core" Processors (Atom/E-Core) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */

View File

@ -146,7 +146,11 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
pr_debug("Local APIC address 0x%08x\n", madt->address); pr_debug("Local APIC address 0x%08x\n", madt->address);
} }
if (madt->header.revision >= 5)
/* ACPI 6.3 and newer support the online capable bit. */
if (acpi_gbl_FADT.header.revision > 6 ||
(acpi_gbl_FADT.header.revision == 6 &&
acpi_gbl_FADT.minor_revision >= 3))
acpi_support_online_capable = true; acpi_support_online_capable = true;
default_acpi_madt_oem_check(madt->header.oem_id, default_acpi_madt_oem_check(madt->header.oem_id,
@ -193,7 +197,8 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags)
if (lapic_flags & ACPI_MADT_ENABLED) if (lapic_flags & ACPI_MADT_ENABLED)
return true; return true;
if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE)) if (!acpi_support_online_capable ||
(lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
return true; return true;
return false; return false;

View File

@ -358,12 +358,16 @@ static void __init ms_hyperv_init_platform(void)
* To mirror what Windows does we should extract CPU management * To mirror what Windows does we should extract CPU management
* features and use the ReservedIdentityBit to detect if Linux is the * features and use the ReservedIdentityBit to detect if Linux is the
* root partition. But that requires negotiating CPU management * root partition. But that requires negotiating CPU management
* interface (a process to be finalized). * interface (a process to be finalized). For now, use the privilege
* flag as the indicator for running as root.
* *
* For now, use the privilege flag as the indicator for running as * Hyper-V should never specify running as root and as a Confidential
* root. * VM. But to protect against a compromised/malicious Hyper-V trying
* to exploit root behavior to expose Confidential VM memory, ignore
* the root partition setting if also a Confidential VM.
*/ */
if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) { if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
!(ms_hyperv.priv_high & HV_ISOLATION)) {
hv_root_partition = true; hv_root_partition = true;
pr_info("Hyper-V: running as root partition\n"); pr_info("Hyper-V: running as root partition\n");
} }

View File

@ -368,9 +368,39 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
mask_after = e->fields.mask; mask_after = e->fields.mask;
if (mask_before != mask_after) if (mask_before != mask_after)
kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
&& ioapic->irr & (1 << index)) ioapic->irr & (1 << index) && !e->fields.mask && !e->fields.remote_irr) {
ioapic_service(ioapic, index, false); /*
* Pending status in irr may be outdated: the IRQ line may have
* already been deasserted by a device while the IRQ was masked.
* This occurs, for instance, if the interrupt is handled in a
* Linux guest as a oneshot interrupt (IRQF_ONESHOT). In this
* case the guest acknowledges the interrupt to the device in
* its threaded irq handler, i.e. after the EOI but before
* unmasking, so at the time of unmasking the IRQ line is
* already down but our pending irr bit is still set. In such
* cases, injecting this pending interrupt to the guest is
* buggy: the guest will receive an extra unwanted interrupt.
*
* So we need to check here if the IRQ is actually still pending.
* As we are generally not able to probe the IRQ line status
* directly, we do it through irqfd resampler. Namely, we clear
* the pending status and notify the resampler that this interrupt
* is done, without actually injecting it into the guest. If the
* IRQ line is actually already deasserted, we are done. If it is
* still asserted, a new interrupt will be shortly triggered
* through irqfd and injected into the guest.
*
* If, however, it's not possible to resample (no irqfd resampler
* registered for this irq), then unconditionally inject this
* pending interrupt into the guest, so the guest will not miss
* an interrupt, although may get an extra unwanted interrupt.
*/
if (kvm_notify_irqfd_resampler(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index))
ioapic->irr &= ~(1 << index);
else
ioapic_service(ioapic, index, false);
}
if (e->fields.delivery_mode == APIC_DM_FIXED) { if (e->fields.delivery_mode == APIC_DM_FIXED) {
struct kvm_lapic_irq irq; struct kvm_lapic_irq irq;

View File

@ -12,6 +12,11 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm,
int hv_remote_flush_tlb(struct kvm *kvm); int hv_remote_flush_tlb(struct kvm *kvm);
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
#else /* !CONFIG_HYPERV */ #else /* !CONFIG_HYPERV */
static inline int hv_remote_flush_tlb(struct kvm *kvm)
{
return -EOPNOTSUPP;
}
static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
{ {
} }

View File

@ -3729,7 +3729,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
} }
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
@ -3753,6 +3753,37 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
svm->current_vmcb->asid_generation--; svm->current_vmcb->asid_generation--;
} }
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
{
hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
/*
* When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
* flush the NPT mappings via hypercall as flushing the ASID only
* affects virtual to physical mappings, it does not invalidate guest
* physical to host physical mappings.
*/
if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
hyperv_flush_guest_mapping(root_tdp);
svm_flush_tlb_asid(vcpu);
}
static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
{
/*
* When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
* flushes should be routed to hv_remote_flush_tlb() without requesting
* a "regular" remote flush. Reaching this point means either there's
* a KVM bug or a prior hv_remote_flush_tlb() call failed, both of
* which might be fatal to the guest. Yell, but try to recover.
*/
if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
hv_remote_flush_tlb(vcpu->kvm);
svm_flush_tlb_asid(vcpu);
}
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
@ -4745,10 +4776,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_rflags = svm_set_rflags, .set_rflags = svm_set_rflags,
.get_if_flag = svm_get_if_flag, .get_if_flag = svm_get_if_flag,
.flush_tlb_all = svm_flush_tlb_current, .flush_tlb_all = svm_flush_tlb_all,
.flush_tlb_current = svm_flush_tlb_current, .flush_tlb_current = svm_flush_tlb_current,
.flush_tlb_gva = svm_flush_tlb_gva, .flush_tlb_gva = svm_flush_tlb_gva,
.flush_tlb_guest = svm_flush_tlb_current, .flush_tlb_guest = svm_flush_tlb_asid,
.vcpu_pre_run = svm_vcpu_pre_run, .vcpu_pre_run = svm_vcpu_pre_run,
.vcpu_run = svm_vcpu_run, .vcpu_run = svm_vcpu_run,

View File

@ -6,6 +6,8 @@
#ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__ #ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__
#define __ARCH_X86_KVM_SVM_ONHYPERV_H__ #define __ARCH_X86_KVM_SVM_ONHYPERV_H__
#include <asm/mshyperv.h>
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
#include "kvm_onhyperv.h" #include "kvm_onhyperv.h"
@ -15,6 +17,14 @@ static struct kvm_x86_ops svm_x86_ops;
int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu); int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu);
static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
{
struct hv_vmcb_enlightenments *hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments;
return ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB &&
!!hve->hv_enlightenments_control.enlightened_npt_tlb;
}
static inline void svm_hv_init_vmcb(struct vmcb *vmcb) static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
{ {
struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments; struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
@ -80,6 +90,11 @@ static inline void svm_hv_update_vp_id(struct vmcb *vmcb, struct kvm_vcpu *vcpu)
} }
#else #else
static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
{
return false;
}
static inline void svm_hv_init_vmcb(struct vmcb *vmcb) static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
{ {
} }

View File

@ -3868,7 +3868,12 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
exit_qual = 0; exit_qual = 0;
} }
if (ex->has_error_code) { /*
* Unlike AMD's Paged Real Mode, which reports an error code on #PF
* VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the
* "has error code" flags on VM-Exit if the CPU is in Real Mode.
*/
if (ex->has_error_code && is_protmode(vcpu)) {
/* /*
* Intel CPUs do not generate error codes with bits 31:16 set, * Intel CPUs do not generate error codes with bits 31:16 set,
* and more importantly VMX disallows setting bits 31:16 in the * and more importantly VMX disallows setting bits 31:16 in the

View File

@ -4432,6 +4432,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_VAPIC: case KVM_CAP_VAPIC:
case KVM_CAP_ENABLE_CAP: case KVM_CAP_ENABLE_CAP:
case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
case KVM_CAP_IRQFD_RESAMPLE:
r = 1; r = 1;
break; break;
case KVM_CAP_EXIT_HYPERCALL: case KVM_CAP_EXIT_HYPERCALL:
@ -8903,6 +8904,8 @@ restart:
} }
if (ctxt->have_exception) { if (ctxt->have_exception) {
WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write);
vcpu->mmio_needed = false;
r = 1; r = 1;
inject_emulated_exception(vcpu); inject_emulated_exception(vcpu);
} else if (vcpu->arch.pio.count) { } else if (vcpu->arch.pio.count) {
@ -9906,13 +9909,20 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu)
static void kvm_inject_exception(struct kvm_vcpu *vcpu) static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{ {
/*
* Suppress the error code if the vCPU is in Real Mode, as Real Mode
* exceptions don't report error codes. The presence of an error code
* is carried with the exception and only stripped when the exception
* is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
* report an error code despite the CPU being in Real Mode.
*/
vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
trace_kvm_inj_exception(vcpu->arch.exception.vector, trace_kvm_inj_exception(vcpu->arch.exception.vector,
vcpu->arch.exception.has_error_code, vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code, vcpu->arch.exception.error_code,
vcpu->arch.exception.injected); vcpu->arch.exception.injected);
if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
vcpu->arch.exception.error_code = false;
static_call(kvm_x86_inject_exception)(vcpu); static_call(kvm_x86_inject_exception)(vcpu);
} }

View File

@ -539,7 +539,7 @@ static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{ {
size_t len; size_t len, off = 0;
if (!sp) if (!sp)
sp = stack_pointer(task); sp = stack_pointer(task);
@ -548,9 +548,17 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE); kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
printk("%sStack:\n", loglvl); printk("%sStack:\n", loglvl);
print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE, while (off < len) {
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, u8 line[STACK_DUMP_LINE_SIZE];
sp, len, false); size_t line_len = len - off > STACK_DUMP_LINE_SIZE ?
STACK_DUMP_LINE_SIZE : len - off;
__memcpy(line, (u8 *)sp + off, line_len);
print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
line, line_len, false);
off += STACK_DUMP_LINE_SIZE;
}
show_trace(task, sp, loglvl); show_trace(task, sp, loglvl);
} }

View File

@ -1359,8 +1359,6 @@ bool blk_rq_is_poll(struct request *rq)
return false; return false;
if (rq->mq_hctx->type != HCTX_TYPE_POLL) if (rq->mq_hctx->type != HCTX_TYPE_POLL)
return false; return false;
if (WARN_ON_ONCE(!rq->bio))
return false;
return true; return true;
} }
EXPORT_SYMBOL_GPL(blk_rq_is_poll); EXPORT_SYMBOL_GPL(blk_rq_is_poll);
@ -1368,7 +1366,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait) static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{ {
do { do {
bio_poll(rq->bio, NULL, 0); blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
cond_resched(); cond_resched();
} while (!completion_done(wait)); } while (!completion_done(wait));
} }

View File

@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
if (disk->open_partitions) if (disk->open_partitions)
return -EBUSY; return -EBUSY;
set_bit(GD_NEED_PART_SCAN, &disk->state);
/* /*
* If the device is opened exclusively by current thread already, it's * If the device is opened exclusively by current thread already, it's
* safe to scan partitons, otherwise, use bd_prepare_to_claim() to * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
return ret; return ret;
} }
set_bit(GD_NEED_PART_SCAN, &disk->state);
bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL); bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
if (IS_ERR(bdev)) if (IS_ERR(bdev))
ret = PTR_ERR(bdev); ret = PTR_ERR(bdev);
else else
blkdev_put(bdev, mode & ~FMODE_EXCL); blkdev_put(bdev, mode & ~FMODE_EXCL);
/*
* If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
* and this will cause that re-assemble partitioned raid device will
* creat partition for underlying disk.
*/
clear_bit(GD_NEED_PART_SCAN, &disk->state);
if (!(mode & FMODE_EXCL)) if (!(mode & FMODE_EXCL))
bd_abort_claiming(disk->part0, disk_scan_partitions); bd_abort_claiming(disk->part0, disk_scan_partitions);
return ret; return ret;

View File

@ -8,7 +8,6 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <drm/drm_accel.h> #include <drm/drm_accel.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
#include <drm/drm_ioctl.h> #include <drm/drm_ioctl.h>
@ -118,6 +117,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
struct drm_ivpu_param *args = data; struct drm_ivpu_param *args = data;
int ret = 0; int ret = 0;
int idx;
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
switch (args->param) { switch (args->param) {
case DRM_IVPU_PARAM_DEVICE_ID: case DRM_IVPU_PARAM_DEVICE_ID:
@ -171,6 +174,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
break; break;
} }
drm_dev_exit(idx);
return ret; return ret;
} }
@ -470,8 +474,8 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->hw->ops = &ivpu_hw_mtl_ops; vdev->hw->ops = &ivpu_hw_mtl_ops;
vdev->platform = IVPU_PLATFORM_INVALID; vdev->platform = IVPU_PLATFORM_INVALID;
vdev->context_xa_limit.min = IVPU_GLOBAL_CONTEXT_MMU_SSID + 1; vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_CONTEXT_LIMIT; vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0); atomic64_set(&vdev->unique_id_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC); xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
@ -565,6 +569,8 @@ err_mmu_gctx_fini:
ivpu_mmu_global_context_fini(vdev); ivpu_mmu_global_context_fini(vdev);
err_power_down: err_power_down:
ivpu_hw_power_down(vdev); ivpu_hw_power_down(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
err_xa_destroy: err_xa_destroy:
xa_destroy(&vdev->submitted_jobs_xa); xa_destroy(&vdev->submitted_jobs_xa);
xa_destroy(&vdev->context_xa); xa_destroy(&vdev->context_xa);
@ -575,7 +581,11 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
{ {
ivpu_pm_disable(vdev); ivpu_pm_disable(vdev);
ivpu_shutdown(vdev); ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_job_done_thread_fini(vdev); ivpu_job_done_thread_fini(vdev);
ivpu_pm_cancel_recovery(vdev);
ivpu_ipc_fini(vdev); ivpu_ipc_fini(vdev);
ivpu_fw_fini(vdev); ivpu_fw_fini(vdev);
ivpu_mmu_global_context_fini(vdev); ivpu_mmu_global_context_fini(vdev);
@ -622,7 +632,7 @@ static void ivpu_remove(struct pci_dev *pdev)
{ {
struct ivpu_device *vdev = pci_get_drvdata(pdev); struct ivpu_device *vdev = pci_get_drvdata(pdev);
drm_dev_unregister(&vdev->drm); drm_dev_unplug(&vdev->drm);
ivpu_dev_fini(vdev); ivpu_dev_fini(vdev);
} }

View File

@ -7,6 +7,7 @@
#define __IVPU_DRV_H__ #define __IVPU_DRV_H__
#include <drm/drm_device.h> #include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h> #include <drm/drm_managed.h>
#include <drm/drm_mm.h> #include <drm/drm_mm.h>
#include <drm/drm_print.h> #include <drm/drm_print.h>
@ -24,7 +25,10 @@
#define PCI_DEVICE_ID_MTL 0x7d1d #define PCI_DEVICE_ID_MTL 0x7d1d
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0 #define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
#define IVPU_CONTEXT_LIMIT 64 /* SSID 1 is used by the VPU to represent invalid context */
#define IVPU_USER_CONTEXT_MIN_SSID 2
#define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 63)
#define IVPU_NUM_ENGINES 2 #define IVPU_NUM_ENGINES 2
#define IVPU_PLATFORM_SILICON 0 #define IVPU_PLATFORM_SILICON 0
@ -70,6 +74,7 @@
struct ivpu_wa_table { struct ivpu_wa_table {
bool punit_disabled; bool punit_disabled;
bool clear_runtime_mem; bool clear_runtime_mem;
bool d3hot_after_power_off;
}; };
struct ivpu_hw_info; struct ivpu_hw_info;

View File

@ -12,24 +12,23 @@
#include "ivpu_mmu.h" #include "ivpu_mmu.h"
#include "ivpu_pm.h" #include "ivpu_pm.h"
#define TILE_FUSE_ENABLE_BOTH 0x0 #define TILE_FUSE_ENABLE_BOTH 0x0
#define TILE_FUSE_ENABLE_UPPER 0x1 #define TILE_SKU_BOTH_MTL 0x3630
#define TILE_FUSE_ENABLE_LOWER 0x2
#define TILE_SKU_BOTH_MTL 0x3630
#define TILE_SKU_LOWER_MTL 0x3631
#define TILE_SKU_UPPER_MTL 0x3632
/* Work point configuration values */ /* Work point configuration values */
#define WP_CONFIG_1_TILE_5_3_RATIO 0x0101 #define CONFIG_1_TILE 0x01
#define WP_CONFIG_1_TILE_4_3_RATIO 0x0102 #define CONFIG_2_TILE 0x02
#define WP_CONFIG_2_TILE_5_3_RATIO 0x0201 #define PLL_RATIO_5_3 0x01
#define WP_CONFIG_2_TILE_4_3_RATIO 0x0202 #define PLL_RATIO_4_3 0x02
#define WP_CONFIG_0_TILE_PLL_OFF 0x0000 #define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
#define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
#define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
#define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
#define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
#define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
#define PLL_REF_CLK_FREQ (50 * 1000000) #define PLL_REF_CLK_FREQ (50 * 1000000)
#define PLL_SIMULATION_FREQ (10 * 1000000) #define PLL_SIMULATION_FREQ (10 * 1000000)
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
#define PLL_DEFAULT_EPP_VALUE 0x80 #define PLL_DEFAULT_EPP_VALUE 0x80
#define TIM_SAFE_ENABLE 0xf1d0dead #define TIM_SAFE_ENABLE 0xf1d0dead
@ -101,6 +100,7 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{ {
vdev->wa.punit_disabled = ivpu_is_fpga(vdev); vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
vdev->wa.clear_runtime_mem = false; vdev->wa.clear_runtime_mem = false;
vdev->wa.d3hot_after_power_off = true;
} }
static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
@ -218,7 +218,8 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
config = 0; config = 0;
} }
ivpu_dbg(vdev, PM, "PLL workpoint request: %d Hz\n", PLL_RATIO_TO_FREQ(target_ratio)); ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
config, target_ratio);
ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config); ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
if (ret) { if (ret) {
@ -403,11 +404,6 @@ static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
return ivpu_boot_host_ss_axi_drive(vdev, true); return ivpu_boot_host_ss_axi_drive(vdev, true);
} }
static int ivpu_boot_host_ss_axi_disable(struct ivpu_device *vdev)
{
return ivpu_boot_host_ss_axi_drive(vdev, false);
}
static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable) static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
{ {
int ret; int ret;
@ -441,11 +437,6 @@ static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
return ivpu_boot_host_ss_top_noc_drive(vdev, true); return ivpu_boot_host_ss_top_noc_drive(vdev, true);
} }
static int ivpu_boot_host_ss_top_noc_disable(struct ivpu_device *vdev)
{
return ivpu_boot_host_ss_top_noc_drive(vdev, false);
}
static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
{ {
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
@ -504,16 +495,6 @@ static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val); REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
} }
static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
{
ivpu_boot_dpu_active_drive(vdev, false);
ivpu_boot_pwr_island_isolation_drive(vdev, true);
ivpu_boot_pwr_island_trickle_drive(vdev, false);
ivpu_boot_pwr_island_drive(vdev, false);
return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
}
static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
{ {
int ret; int ret;
@ -629,34 +610,10 @@ static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev) static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
{ {
struct ivpu_hw_info *hw = vdev->hw; struct ivpu_hw_info *hw = vdev->hw;
u32 tile_fuse;
tile_fuse = REGB_RD32(MTL_BUTTRESS_TILE_FUSE); hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
if (!REG_TEST_FLD(MTL_BUTTRESS_TILE_FUSE, VALID, tile_fuse)) hw->sku = TILE_SKU_BOTH_MTL;
ivpu_warn(vdev, "Tile Fuse: Invalid (0x%x)\n", tile_fuse); hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
hw->tile_fuse = REG_GET_FLD(MTL_BUTTRESS_TILE_FUSE, SKU, tile_fuse);
switch (hw->tile_fuse) {
case TILE_FUSE_ENABLE_LOWER:
hw->sku = TILE_SKU_LOWER_MTL;
hw->config = WP_CONFIG_1_TILE_5_3_RATIO;
ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Lower\n");
break;
case TILE_FUSE_ENABLE_UPPER:
hw->sku = TILE_SKU_UPPER_MTL;
hw->config = WP_CONFIG_1_TILE_4_3_RATIO;
ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Upper\n");
break;
case TILE_FUSE_ENABLE_BOTH:
hw->sku = TILE_SKU_BOTH_MTL;
hw->config = WP_CONFIG_2_TILE_5_3_RATIO;
ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Both\n");
break;
default:
hw->config = WP_CONFIG_0_TILE_PLL_OFF;
ivpu_dbg(vdev, MISC, "Tile Fuse: Disable\n");
break;
}
ivpu_pll_init_frequency_ratios(vdev); ivpu_pll_init_frequency_ratios(vdev);
@ -797,21 +754,8 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
{ {
int ret = 0; int ret = 0;
/* FPGA requires manual clearing of IP_Reset bit by enabling quiescent state */ if (ivpu_hw_mtl_reset(vdev)) {
if (ivpu_is_fpga(vdev)) { ivpu_err(vdev, "Failed to reset the VPU\n");
if (ivpu_boot_host_ss_top_noc_disable(vdev)) {
ivpu_err(vdev, "Failed to disable TOP NOC\n");
ret = -EIO;
}
if (ivpu_boot_host_ss_axi_disable(vdev)) {
ivpu_err(vdev, "Failed to disable AXI\n");
ret = -EIO;
}
}
if (ivpu_boot_pwr_domain_disable(vdev)) {
ivpu_err(vdev, "Failed to disable power domain\n");
ret = -EIO; ret = -EIO;
} }
@ -844,6 +788,19 @@ static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val); REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
} }
static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
if ((config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
return cpu_clock;
}
/* Register indirect accesses */ /* Register indirect accesses */
static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev) static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
{ {
@ -855,7 +812,7 @@ static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
if (!ivpu_is_silicon(vdev)) if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ; return PLL_SIMULATION_FREQ;
return PLL_RATIO_TO_FREQ(pll_curr_ratio); return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config);
} }
static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev) static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)

View File

@ -21,7 +21,7 @@ struct ivpu_bo;
#define IVPU_IPC_ALIGNMENT 64 #define IVPU_IPC_ALIGNMENT 64
#define IVPU_IPC_HDR_FREE 0 #define IVPU_IPC_HDR_FREE 0
#define IVPU_IPC_HDR_ALLOCATED 0 #define IVPU_IPC_HDR_ALLOCATED 1
/** /**
* struct ivpu_ipc_hdr - The IPC message header structure, exchanged * struct ivpu_ipc_hdr - The IPC message header structure, exchanged

View File

@ -461,26 +461,22 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
&acquire_ctx);
if (ret) { if (ret) {
ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
return ret; return ret;
} }
for (i = 0; i < buf_count; i++) { ret = dma_resv_reserve_fences(bo->base.resv, 1);
ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1); if (ret) {
if (ret) { ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); goto unlock_reservations;
goto unlock_reservations;
}
} }
for (i = 0; i < buf_count; i++) dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
unlock_reservations: unlock_reservations:
drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx); drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
wmb(); /* Flush write combining buffers */ wmb(); /* Flush write combining buffers */
@ -489,12 +485,12 @@ unlock_reservations:
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{ {
int ret = 0;
struct ivpu_file_priv *file_priv = file->driver_priv; struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev; struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_submit *params = data; struct drm_ivpu_submit *params = data;
struct ivpu_job *job; struct ivpu_job *job;
u32 *buf_handles; u32 *buf_handles;
int idx, ret;
if (params->engine > DRM_IVPU_ENGINE_COPY) if (params->engine > DRM_IVPU_ENGINE_COPY)
return -EINVAL; return -EINVAL;
@ -523,6 +519,11 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto free_handles; goto free_handles;
} }
if (!drm_dev_enter(&vdev->drm, &idx)) {
ret = -ENODEV;
goto free_handles;
}
ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n", ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
file_priv->ctx.id, params->buffer_count); file_priv->ctx.id, params->buffer_count);
@ -530,7 +531,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (!job) { if (!job) {
ivpu_err(vdev, "Failed to create job\n"); ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM; ret = -ENOMEM;
goto free_handles; goto dev_exit;
} }
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count, ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
@ -548,6 +549,8 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
job_put: job_put:
job_put(job); job_put(job);
dev_exit:
drm_dev_exit(idx);
free_handles: free_handles:
kfree(buf_handles); kfree(buf_handles);

View File

@ -98,12 +98,18 @@ retry:
static void ivpu_pm_recovery_work(struct work_struct *work) static void ivpu_pm_recovery_work(struct work_struct *work)
{ {
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work); struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
struct ivpu_device *vdev = pm->vdev; struct ivpu_device *vdev = pm->vdev;
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL}; char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
int ret; int ret;
ret = pci_reset_function(to_pci_dev(vdev->drm.dev)); retry:
if (ret) ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
cond_resched();
goto retry;
}
if (ret && ret != -EAGAIN)
ivpu_err(vdev, "Failed to reset VPU: %d\n", ret); ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
@ -134,32 +140,28 @@ int ivpu_pm_suspend_cb(struct device *dev)
{ {
struct drm_device *drm = dev_get_drvdata(dev); struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm); struct ivpu_device *vdev = to_ivpu_device(drm);
int ret; unsigned long timeout;
ivpu_dbg(vdev, PM, "Suspend..\n"); ivpu_dbg(vdev, PM, "Suspend..\n");
ret = ivpu_suspend(vdev); timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
if (ret && vdev->pm->suspend_reschedule_counter) { while (!ivpu_hw_is_idle(vdev)) {
ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n", cond_resched();
vdev->pm->suspend_reschedule_counter); if (time_after_eq(jiffies, timeout)) {
pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend); ivpu_err(vdev, "Failed to enter idle on system suspend\n");
vdev->pm->suspend_reschedule_counter--; return -EBUSY;
return -EBUSY; }
} else if (!vdev->pm->suspend_reschedule_counter) {
ivpu_warn(vdev, "Failed to enter idle, force suspend\n");
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
} }
vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; ivpu_suspend(vdev);
ivpu_pm_prepare_warm_boot(vdev);
pci_save_state(to_pci_dev(dev)); pci_save_state(to_pci_dev(dev));
pci_set_power_state(to_pci_dev(dev), PCI_D3hot); pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
ivpu_dbg(vdev, PM, "Suspend done.\n"); ivpu_dbg(vdev, PM, "Suspend done.\n");
return ret; return 0;
} }
int ivpu_pm_resume_cb(struct device *dev) int ivpu_pm_resume_cb(struct device *dev)
@ -306,6 +308,11 @@ int ivpu_pm_init(struct ivpu_device *vdev)
return 0; return 0;
} }
void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
{
cancel_work_sync(&vdev->pm->recovery_work);
}
void ivpu_pm_enable(struct ivpu_device *vdev) void ivpu_pm_enable(struct ivpu_device *vdev)
{ {
struct device *dev = vdev->drm.dev; struct device *dev = vdev->drm.dev;

View File

@ -21,6 +21,7 @@ struct ivpu_pm_info {
int ivpu_pm_init(struct ivpu_device *vdev); int ivpu_pm_init(struct ivpu_device *vdev);
void ivpu_pm_enable(struct ivpu_device *vdev); void ivpu_pm_enable(struct ivpu_device *vdev);
void ivpu_pm_disable(struct ivpu_device *vdev); void ivpu_pm_disable(struct ivpu_device *vdev);
void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
int ivpu_pm_suspend_cb(struct device *dev); int ivpu_pm_suspend_cb(struct device *dev);
int ivpu_pm_resume_cb(struct device *dev); int ivpu_pm_resume_cb(struct device *dev);

View File

@ -1984,6 +1984,7 @@ static int instance;
static int acpi_video_bus_add(struct acpi_device *device) static int acpi_video_bus_add(struct acpi_device *device)
{ {
struct acpi_video_bus *video; struct acpi_video_bus *video;
bool auto_detect;
int error; int error;
acpi_status status; acpi_status status;
@ -2045,10 +2046,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
mutex_unlock(&video_list_lock); mutex_unlock(&video_list_lock);
/* /*
* The userspace visible backlight_device gets registered separately * If backlight-type auto-detection is used then a native backlight may
* from acpi_video_register_backlight(). * show up later and this may change the result from video to native.
* Therefor normally the userspace visible /sys/class/backlight device
* gets registered separately by the GPU driver calling
* acpi_video_register_backlight() when an internal panel is detected.
* Register the backlight now when not using auto-detection, so that
* when the kernel cmdline or DMI-quirks are used the backlight will
* get registered even if acpi_video_register_backlight() is not called.
*/ */
acpi_video_run_bcl_for_osi(video); acpi_video_run_bcl_for_osi(video);
if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
!auto_detect)
acpi_video_bus_register_backlight(video);
acpi_video_bus_add_notify_handler(video); acpi_video_bus_add_notify_handler(video);
return 0; return 0;

View File

@ -459,85 +459,67 @@ out_free:
Notification Handling Notification Handling
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */
/* /**
* acpi_bus_notify * acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler
* --------------- * @handle: Target ACPI object.
* Callback for all 'system-level' device notifications (values 0x00-0x7F). * @type: Notification type.
* @data: Ignored.
*
* This only handles notifications related to device hotplug.
*/ */
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{ {
struct acpi_device *adev; struct acpi_device *adev;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
bool hotplug_event = false;
switch (type) { switch (type) {
case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_BUS_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n"); acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
hotplug_event = true;
break; break;
case ACPI_NOTIFY_DEVICE_CHECK: case ACPI_NOTIFY_DEVICE_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n"); acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
hotplug_event = true;
break; break;
case ACPI_NOTIFY_DEVICE_WAKE: case ACPI_NOTIFY_DEVICE_WAKE:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n"); acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
break; return;
case ACPI_NOTIFY_EJECT_REQUEST: case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
hotplug_event = true;
break; break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT: case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n"); acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
/* TBD: Exactly what does 'light' mean? */ /* TBD: Exactly what does 'light' mean? */
break; return;
case ACPI_NOTIFY_FREQUENCY_MISMATCH: case ACPI_NOTIFY_FREQUENCY_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due " acpi_handle_err(handle, "Device cannot be configured due "
"to a frequency mismatch\n"); "to a frequency mismatch\n");
break; return;
case ACPI_NOTIFY_BUS_MODE_MISMATCH: case ACPI_NOTIFY_BUS_MODE_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due " acpi_handle_err(handle, "Device cannot be configured due "
"to a bus mode mismatch\n"); "to a bus mode mismatch\n");
break; return;
case ACPI_NOTIFY_POWER_FAULT: case ACPI_NOTIFY_POWER_FAULT:
acpi_handle_err(handle, "Device has suffered a power fault\n"); acpi_handle_err(handle, "Device has suffered a power fault\n");
break; return;
default: default:
acpi_handle_debug(handle, "Unknown event type 0x%x\n", type); acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
break;
}
adev = acpi_get_acpi_dev(handle);
if (!adev)
goto err;
if (adev->dev.driver) {
struct acpi_driver *driver = to_acpi_driver(adev->dev.driver);
if (driver && driver->ops.notify &&
(driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
driver->ops.notify(adev, type);
}
if (!hotplug_event) {
acpi_put_acpi_dev(adev);
return; return;
} }
if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type))) adev = acpi_get_acpi_dev(handle);
if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
return; return;
acpi_put_acpi_dev(adev); acpi_put_acpi_dev(adev);
err: acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
acpi_evaluate_ost(handle, type, ost_code, NULL);
} }
static void acpi_notify_device(acpi_handle handle, u32 event, void *data) static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
@ -562,42 +544,51 @@ static u32 acpi_device_fixed_event(void *data)
return ACPI_INTERRUPT_HANDLED; return ACPI_INTERRUPT_HANDLED;
} }
static int acpi_device_install_notify_handler(struct acpi_device *device) static int acpi_device_install_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{ {
acpi_status status; acpi_status status;
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
status = status =
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event, acpi_device_fixed_event,
device); device);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
status = status =
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event, acpi_device_fixed_event,
device); device);
else } else {
status = acpi_install_notify_handler(device->handle, u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_DEVICE_NOTIFY, ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
status = acpi_install_notify_handler(device->handle, type,
acpi_notify_device, acpi_notify_device,
device); device);
}
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
static void acpi_device_remove_notify_handler(struct acpi_device *device) static void acpi_device_remove_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{ {
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event); acpi_device_fixed_event);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event); acpi_device_fixed_event);
else } else {
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
acpi_remove_notify_handler(device->handle, type,
acpi_notify_device); acpi_notify_device);
}
} }
/* Handle events targeting \_SB device (at present only graceful shutdown) */ /* Handle events targeting \_SB device (at present only graceful shutdown) */
@ -1039,7 +1030,7 @@ static int acpi_device_probe(struct device *dev)
acpi_drv->name, acpi_dev->pnp.bus_id); acpi_drv->name, acpi_dev->pnp.bus_id);
if (acpi_drv->ops.notify) { if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev); ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv);
if (ret) { if (ret) {
if (acpi_drv->ops.remove) if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev); acpi_drv->ops.remove(acpi_dev);
@ -1062,7 +1053,7 @@ static void acpi_device_remove(struct device *dev)
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
if (acpi_drv->ops.notify) if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev); acpi_device_remove_notify_handler(acpi_dev, acpi_drv);
if (acpi_drv->ops.remove) if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev); acpi_drv->ops.remove(acpi_dev);

View File

@ -276,6 +276,43 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
}, },
}, },
/*
* Models which need acpi_video backlight control where the GPU drivers
* do not call acpi_video_register_backlight() because no internal panel
* is detected. Typically these are all-in-ones (monitors with builtin
* PC) where the panel connection shows up as regular DP instead of eDP.
*/
{
.callback = video_detect_force_video,
/* Apple iMac14,1 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
},
},
{
.callback = video_detect_force_video,
/* Apple iMac14,2 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
},
},
/*
* Older models with nvidia GPU which need acpi_video backlight
* control and where the old nvidia binary driver series does not
* call acpi_video_register_backlight().
*/
{
.callback = video_detect_force_video,
/* ThinkPad W530 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
},
},
/* /*
* These models have a working acpi_video backlight control, and using * These models have a working acpi_video backlight control, and using
* native backlight causes a regression where backlight does not work * native backlight causes a regression where backlight does not work
@ -782,7 +819,7 @@ static bool prefer_native_over_acpi_video(void)
* Determine which type of backlight interface to use on this system, * Determine which type of backlight interface to use on this system,
* First check cmdline, then dmi quirks, then do autodetect. * First check cmdline, then dmi quirks, then do autodetect.
*/ */
static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native) enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
{ {
static DEFINE_MUTEX(init_mutex); static DEFINE_MUTEX(init_mutex);
static bool nvidia_wmi_ec_present; static bool nvidia_wmi_ec_present;
@ -807,6 +844,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
native_available = true; native_available = true;
mutex_unlock(&init_mutex); mutex_unlock(&init_mutex);
if (auto_detect)
*auto_detect = false;
/* /*
* The below heuristics / detection steps are in order of descending * The below heuristics / detection steps are in order of descending
* presedence. The commandline takes presedence over anything else. * presedence. The commandline takes presedence over anything else.
@ -818,6 +858,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (acpi_backlight_dmi != acpi_backlight_undef) if (acpi_backlight_dmi != acpi_backlight_undef)
return acpi_backlight_dmi; return acpi_backlight_dmi;
if (auto_detect)
*auto_detect = true;
/* Special cases such as nvidia_wmi_ec and apple gmux. */ /* Special cases such as nvidia_wmi_ec and apple gmux. */
if (nvidia_wmi_ec_present) if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec; return acpi_backlight_nvidia_wmi_ec;
@ -837,15 +880,4 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
/* No ACPI video/native (old hw), use vendor specific fw methods. */ /* No ACPI video/native (old hw), use vendor specific fw methods. */
return acpi_backlight_vendor; return acpi_backlight_vendor;
} }
EXPORT_SYMBOL(__acpi_video_get_backlight_type);
enum acpi_backlight_type acpi_video_get_backlight_type(void)
{
return __acpi_video_get_backlight_type(false);
}
EXPORT_SYMBOL(acpi_video_get_backlight_type);
bool acpi_video_backlight_use_native(void)
{
return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
}
EXPORT_SYMBOL(acpi_video_backlight_use_native);

View File

@ -474,12 +474,18 @@ int detect_cache_attributes(unsigned int cpu)
populate_leaves: populate_leaves:
/* /*
* populate_cache_leaves() may completely setup the cache leaves and * If LLC is valid the cache leaves were already populated so just go to
* shared_cpu_map or it may leave it partially setup. * update the cpu map.
*/ */
ret = populate_cache_leaves(cpu); if (!last_level_cache_is_valid(cpu)) {
if (ret) /*
goto free_ci; * populate_cache_leaves() may completely setup the cache leaves and
* shared_cpu_map or it may leave it partially setup.
*/
ret = populate_cache_leaves(cpu);
if (ret)
goto free_ci;
}
/* /*
* For systems using DT for cache hierarchy, fw_token * For systems using DT for cache hierarchy, fw_token

View File

@ -1010,9 +1010,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
/* This is safe, since we have a reference from open(). */ /* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE); __module_get(THIS_MODULE);
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
/* /*
* If we don't hold exclusive handle for the device, upgrade to it * If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner. * here to avoid changing device under exclusive owner.
@ -1067,6 +1064,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
} }
} }
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
@ -1109,17 +1109,17 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (partscan) if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
loop_global_unlock(lo, is_loop); loop_global_unlock(lo, is_loop);
if (partscan) if (partscan)
loop_reread_partitions(lo); loop_reread_partitions(lo);
if (!(mode & FMODE_EXCL)) if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure); bd_abort_claiming(bdev, loop_configure);
error = 0; return 0;
done:
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
return error;
out_unlock: out_unlock:
loop_global_unlock(lo, is_loop); loop_global_unlock(lo, is_loop);
@ -1130,7 +1130,7 @@ out_putf:
fput(file); fput(file);
/* This is safe: open() is still holding a reference. */ /* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE); module_put(THIS_MODULE);
goto done; return error;
} }
static void __loop_clr_fd(struct loop_device *lo, bool release) static void __loop_clr_fd(struct loop_device *lo, bool release)

View File

@ -246,7 +246,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
if (ub->params.types & UBLK_PARAM_TYPE_BASIC) { if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
const struct ublk_param_basic *p = &ub->params.basic; const struct ublk_param_basic *p = &ub->params.basic;
if (p->logical_bs_shift > PAGE_SHIFT) if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
return -EINVAL; return -EINVAL;
if (p->logical_bs_shift > p->physical_bs_shift) if (p->logical_bs_shift > p->physical_bs_shift)
@ -1261,9 +1261,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
ublk_queue_cmd(ubq, req); ublk_queue_cmd(ubq, req);
} }
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
struct ublksrv_io_cmd *ub_cmd)
{ {
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
struct ublk_device *ub = cmd->file->private_data; struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq; struct ublk_queue *ubq;
struct ublk_io *io; struct ublk_io *io;
@ -1362,6 +1363,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED; return -EIOCBQUEUED;
} }
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
struct ublksrv_io_cmd ub_cmd;
/*
* Not necessary for async retry, but let's keep it simple and always
* copy the values to avoid any potential reuse.
*/
ub_cmd.q_id = READ_ONCE(ub_src->q_id);
ub_cmd.tag = READ_ONCE(ub_src->tag);
ub_cmd.result = READ_ONCE(ub_src->result);
ub_cmd.addr = READ_ONCE(ub_src->addr);
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
}
static const struct file_operations ublk_ch_fops = { static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = ublk_ch_open, .open = ublk_ch_open,
@ -1952,6 +1970,8 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
/* clear all we don't support yet */ /* clear all we don't support yet */
ub->params.types &= UBLK_PARAM_TYPE_ALL; ub->params.types &= UBLK_PARAM_TYPE_ALL;
ret = ublk_validate_params(ub); ret = ublk_validate_params(ub);
if (ret)
ub->params.types = 0;
} }
mutex_unlock(&ub->mutex); mutex_unlock(&ub->mutex);

View File

@ -97,10 +97,6 @@ struct quad8 {
struct quad8_reg __iomem *reg; struct quad8_reg __iomem *reg;
}; };
/* Borrow Toggle flip-flop */
#define QUAD8_FLAG_BT BIT(0)
/* Carry Toggle flip-flop */
#define QUAD8_FLAG_CT BIT(1)
/* Error flag */ /* Error flag */
#define QUAD8_FLAG_E BIT(4) #define QUAD8_FLAG_E BIT(4)
/* Up/Down flag */ /* Up/Down flag */
@ -133,6 +129,9 @@ struct quad8 {
#define QUAD8_CMR_QUADRATURE_X2 0x10 #define QUAD8_CMR_QUADRATURE_X2 0x10
#define QUAD8_CMR_QUADRATURE_X4 0x18 #define QUAD8_CMR_QUADRATURE_X4 0x18
/* Each Counter is 24 bits wide */
#define LS7267_CNTR_MAX GENMASK(23, 0)
static int quad8_signal_read(struct counter_device *counter, static int quad8_signal_read(struct counter_device *counter,
struct counter_signal *signal, struct counter_signal *signal,
enum counter_signal_level *level) enum counter_signal_level *level)
@ -156,18 +155,10 @@ static int quad8_count_read(struct counter_device *counter,
{ {
struct quad8 *const priv = counter_priv(counter); struct quad8 *const priv = counter_priv(counter);
struct channel_reg __iomem *const chan = priv->reg->channel + count->id; struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
unsigned int flags;
unsigned int borrow;
unsigned int carry;
unsigned long irqflags; unsigned long irqflags;
int i; int i;
flags = ioread8(&chan->control); *val = 0;
borrow = flags & QUAD8_FLAG_BT;
carry = !!(flags & QUAD8_FLAG_CT);
/* Borrow XOR Carry effectively doubles count range */
*val = (unsigned long)(borrow ^ carry) << 24;
spin_lock_irqsave(&priv->lock, irqflags); spin_lock_irqsave(&priv->lock, irqflags);
@ -191,8 +182,7 @@ static int quad8_count_write(struct counter_device *counter,
unsigned long irqflags; unsigned long irqflags;
int i; int i;
/* Only 24-bit values are supported */ if (val > LS7267_CNTR_MAX)
if (val > 0xFFFFFF)
return -ERANGE; return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags); spin_lock_irqsave(&priv->lock, irqflags);
@ -378,7 +368,7 @@ static int quad8_action_read(struct counter_device *counter,
/* Handle Index signals */ /* Handle Index signals */
if (synapse->signal->id >= 16) { if (synapse->signal->id >= 16) {
if (priv->preset_enable[count->id]) if (!priv->preset_enable[count->id])
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else else
*action = COUNTER_SYNAPSE_ACTION_NONE; *action = COUNTER_SYNAPSE_ACTION_NONE;
@ -806,8 +796,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter); struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags; unsigned long irqflags;
/* Only 24-bit values are supported */ if (preset > LS7267_CNTR_MAX)
if (preset > 0xFFFFFF)
return -ERANGE; return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags); spin_lock_irqsave(&priv->lock, irqflags);
@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
*ceiling = priv->preset[count->id]; *ceiling = priv->preset[count->id];
break; break;
default: default:
/* By default 0x1FFFFFF (25 bits unsigned) is maximum count */ *ceiling = LS7267_CNTR_MAX;
*ceiling = 0x1FFFFFF;
break; break;
} }
@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter); struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags; unsigned long irqflags;
/* Only 24-bit values are supported */ if (ceiling > LS7267_CNTR_MAX)
if (ceiling > 0xFFFFFF)
return -ERANGE; return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags); spin_lock_irqsave(&priv->lock, irqflags);

View File

@ -101,25 +101,40 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
BIT(CXL_CM_CAP_CAP_ID_HDM)); BIT(CXL_CM_CAP_CAP_ID_HDM));
} }
static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port, static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
struct cxl_endpoint_dvsec_info *info)
{ {
struct device *dev = &port->dev;
struct cxl_hdm *cxlhdm; struct cxl_hdm *cxlhdm;
void __iomem *hdm;
u32 ctrl;
int i;
if (!info)
return false;
cxlhdm = dev_get_drvdata(&info->port->dev);
hdm = cxlhdm->regs.hdm_decoder;
if (!hdm)
return true;
/*
* If HDM decoders are present and the driver is in control of
* Mem_Enable skip DVSEC based emulation
*/
if (!info->mem_enabled) if (!info->mem_enabled)
return ERR_PTR(-ENODEV); return false;
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); /*
if (!cxlhdm) * If any decoders are committed already, there should not be any
return ERR_PTR(-ENOMEM); * emulated DVSEC decoders.
*/
for (i = 0; i < cxlhdm->decoder_count; i++) {
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
return false;
}
cxlhdm->port = port; return true;
cxlhdm->decoder_count = info->ranges;
cxlhdm->target_count = info->ranges;
dev_set_drvdata(&port->dev, cxlhdm);
return cxlhdm;
} }
/** /**
@ -138,13 +153,14 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
if (!cxlhdm) if (!cxlhdm)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
cxlhdm->port = port; cxlhdm->port = port;
crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); dev_set_drvdata(dev, cxlhdm);
if (!crb) {
if (info && info->mem_enabled)
return devm_cxl_setup_emulated_hdm(port, info);
crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
if (!crb && info && info->mem_enabled) {
cxlhdm->decoder_count = info->ranges;
return cxlhdm;
} else if (!crb) {
dev_err(dev, "No component registers mapped\n"); dev_err(dev, "No component registers mapped\n");
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
} }
@ -160,7 +176,15 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
} }
dev_set_drvdata(dev, cxlhdm); /*
* Now that the hdm capability is parsed, decide if range
* register emulation is needed and fixup cxlhdm accordingly.
*/
if (should_emulate_decoders(info)) {
dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
info->ranges > 1 ? "s" : "");
cxlhdm->decoder_count = info->ranges;
}
return cxlhdm; return cxlhdm;
} }
@ -714,14 +738,20 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
return 0; return 0;
} }
static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port, static int cxl_setup_hdm_decoder_from_dvsec(
struct cxl_decoder *cxld, int which, struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
struct cxl_endpoint_dvsec_info *info) int which, struct cxl_endpoint_dvsec_info *info)
{ {
struct cxl_endpoint_decoder *cxled;
u64 len;
int rc;
if (!is_cxl_endpoint(port)) if (!is_cxl_endpoint(port))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!range_len(&info->dvsec_range[which])) cxled = to_cxl_endpoint_decoder(&cxld->dev);
len = range_len(&info->dvsec_range[which]);
if (!len)
return -ENOENT; return -ENOENT;
cxld->target_type = CXL_DECODER_EXPANDER; cxld->target_type = CXL_DECODER_EXPANDER;
@ -736,40 +766,24 @@ static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
port->commit_end = cxld->id; port->commit_end = cxld->id;
return 0; rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
} if (rc) {
dev_err(&port->dev,
static bool should_emulate_decoders(struct cxl_port *port) "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
{ port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); return rc;
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
u32 ctrl;
int i;
if (!is_cxl_endpoint(cxlhdm->port))
return false;
if (!hdm)
return true;
/*
* If any decoders are committed already, there should not be any
* emulated DVSEC decoders.
*/
for (i = 0; i < cxlhdm->decoder_count; i++) {
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
return false;
} }
*dpa_base += len;
cxled->state = CXL_DECODER_STATE_AUTO;
return true; return 0;
} }
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map, void __iomem *hdm, int which, int *target_map, void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{ {
struct cxl_endpoint_decoder *cxled = NULL; struct cxl_endpoint_decoder *cxled;
u64 size, base, skip, dpa_size; u64 size, base, skip, dpa_size;
bool committed; bool committed;
u32 remainder; u32 remainder;
@ -780,11 +794,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
unsigned char target_id[8]; unsigned char target_id[8];
} target_list; } target_list;
if (should_emulate_decoders(port)) if (should_emulate_decoders(info))
return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info); return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
which, info);
if (is_endpoint_decoder(&cxld->dev))
cxled = to_cxl_endpoint_decoder(&cxld->dev);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
@ -806,9 +818,6 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
.end = base + size - 1, .end = base + size - 1,
}; };
if (cxled && !committed && range_len(&info->dvsec_range[which]))
return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
/* decoders are enabled if committed */ /* decoders are enabled if committed */
if (committed) { if (committed) {
cxld->flags |= CXL_DECODER_F_ENABLE; cxld->flags |= CXL_DECODER_F_ENABLE;
@ -846,7 +855,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc) if (rc)
return rc; return rc;
if (!cxled) { if (!info) {
target_list.value = target_list.value =
ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which)); ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
for (i = 0; i < cxld->interleave_ways; i++) for (i = 0; i < cxld->interleave_ways; i++)
@ -866,6 +875,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO; return -ENXIO;
} }
skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
cxled = to_cxl_endpoint_decoder(&cxld->dev);
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
if (rc) { if (rc) {
dev_err(&port->dev, dev_err(&port->dev,

View File

@ -462,7 +462,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport)
return NULL; return NULL;
} }
#define CDAT_DOE_REQ(entry_handle) \ #define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \ (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \ CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \ FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
@ -475,8 +475,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task)
} }
struct cdat_doe_task { struct cdat_doe_task {
u32 request_pl; __le32 request_pl;
u32 response_pl[32]; __le32 response_pl[32];
struct completion c; struct completion c;
struct pci_doe_task task; struct pci_doe_task task;
}; };
@ -510,10 +510,10 @@ static int cxl_cdat_get_length(struct device *dev,
return rc; return rc;
} }
wait_for_completion(&t.c); wait_for_completion(&t.c);
if (t.task.rv < sizeof(u32)) if (t.task.rv < 2 * sizeof(__le32))
return -EIO; return -EIO;
*length = t.response_pl[1]; *length = le32_to_cpu(t.response_pl[1]);
dev_dbg(dev, "CDAT length %zu\n", *length); dev_dbg(dev, "CDAT length %zu\n", *length);
return 0; return 0;
@ -524,13 +524,13 @@ static int cxl_cdat_read_table(struct device *dev,
struct cxl_cdat *cdat) struct cxl_cdat *cdat)
{ {
size_t length = cdat->length; size_t length = cdat->length;
u32 *data = cdat->table; __le32 *data = cdat->table;
int entry_handle = 0; int entry_handle = 0;
do { do {
DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t); DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
struct cdat_entry_header *entry;
size_t entry_dw; size_t entry_dw;
u32 *entry;
int rc; int rc;
rc = pci_doe_submit_task(cdat_doe, &t.task); rc = pci_doe_submit_task(cdat_doe, &t.task);
@ -539,26 +539,34 @@ static int cxl_cdat_read_table(struct device *dev,
return rc; return rc;
} }
wait_for_completion(&t.c); wait_for_completion(&t.c);
/* 1 DW header + 1 DW data min */
if (t.task.rv < (2 * sizeof(u32))) /* 1 DW Table Access Response Header + CDAT entry */
entry = (struct cdat_entry_header *)(t.response_pl + 1);
if ((entry_handle == 0 &&
t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) ||
(entry_handle > 0 &&
(t.task.rv < sizeof(__le32) + sizeof(*entry) ||
t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length))))
return -EIO; return -EIO;
/* Get the CXL table access header entry handle */ /* Get the CXL table access header entry handle */
entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
t.response_pl[0]); le32_to_cpu(t.response_pl[0]));
entry = t.response_pl + 1; entry_dw = t.task.rv / sizeof(__le32);
entry_dw = t.task.rv / sizeof(u32);
/* Skip Header */ /* Skip Header */
entry_dw -= 1; entry_dw -= 1;
entry_dw = min(length / sizeof(u32), entry_dw); entry_dw = min(length / sizeof(__le32), entry_dw);
/* Prevent length < 1 DW from causing a buffer overflow */ /* Prevent length < 1 DW from causing a buffer overflow */
if (entry_dw) { if (entry_dw) {
memcpy(data, entry, entry_dw * sizeof(u32)); memcpy(data, entry, entry_dw * sizeof(__le32));
length -= entry_dw * sizeof(u32); length -= entry_dw * sizeof(__le32);
data += entry_dw; data += entry_dw;
} }
} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
/* Length in CDAT header may exceed concatenation of CDAT entries */
cdat->length -= length;
return 0; return 0;
} }

View File

@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev); return is_cxl_nvdimm_bridge(dev);
} }
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start) struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
{ {
struct cxl_port *port = find_cxl_root(start); struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev));
struct device *dev; struct device *dev;
if (!port) if (!port)
@ -253,7 +253,7 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
struct device *dev; struct device *dev;
int rc; int rc;
cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev); cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
if (!cxl_nvb) if (!cxl_nvb)
return -ENODEV; return -ENODEV;

View File

@ -823,41 +823,17 @@ static bool dev_is_cxl_root_child(struct device *dev)
return false; return false;
} }
/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */ struct cxl_port *find_cxl_root(struct cxl_port *port)
static int match_root_child(struct device *dev, const void *match)
{ {
const struct device *iter = NULL; struct cxl_port *iter = port;
struct cxl_dport *dport;
struct cxl_port *port;
if (!dev_is_cxl_root_child(dev)) while (iter && !is_cxl_root(iter))
return 0; iter = to_cxl_port(iter->dev.parent);
port = to_cxl_port(dev); if (!iter)
iter = match;
while (iter) {
dport = cxl_find_dport_by_dev(port, iter);
if (dport)
break;
iter = iter->parent;
}
return !!iter;
}
struct cxl_port *find_cxl_root(struct device *dev)
{
struct device *port_dev;
struct cxl_port *root;
port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
if (!port_dev)
return NULL; return NULL;
get_device(&iter->dev);
root = to_cxl_port(port_dev->parent); return iter;
get_device(&root->dev);
put_device(port_dev);
return root;
} }
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);

View File

@ -134,9 +134,13 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_endpoint_decoder *cxled = p->targets[i]; struct cxl_endpoint_decoder *cxled = p->targets[i];
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *iter = cxled_to_port(cxled); struct cxl_port *iter = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_ep *ep; struct cxl_ep *ep;
int rc = 0; int rc = 0;
if (cxlds->rcd)
goto endpoint_reset;
while (!is_cxl_root(to_cxl_port(iter->dev.parent))) while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
iter = to_cxl_port(iter->dev.parent); iter = to_cxl_port(iter->dev.parent);
@ -153,6 +157,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
return rc; return rc;
} }
endpoint_reset:
rc = cxled->cxld.reset(&cxled->cxld); rc = cxled->cxld.reset(&cxled->cxld);
if (rc) if (rc)
return rc; return rc;
@ -1199,6 +1204,7 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
{ {
struct cxl_region_params *p = &cxlr->params; struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled; struct cxl_endpoint_decoder *cxled;
struct cxl_dev_state *cxlds;
struct cxl_memdev *cxlmd; struct cxl_memdev *cxlmd;
struct cxl_port *iter; struct cxl_port *iter;
struct cxl_ep *ep; struct cxl_ep *ep;
@ -1214,6 +1220,10 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
for (i = 0; i < p->nr_targets; i++) { for (i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i]; cxled = p->targets[i];
cxlmd = cxled_to_memdev(cxled); cxlmd = cxled_to_memdev(cxled);
cxlds = cxlmd->cxlds;
if (cxlds->rcd)
continue;
iter = cxled_to_port(cxled); iter = cxled_to_port(cxled);
while (!is_cxl_root(to_cxl_port(iter->dev.parent))) while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
@ -1229,14 +1239,24 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
{ {
struct cxl_region_params *p = &cxlr->params; struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled; struct cxl_endpoint_decoder *cxled;
struct cxl_dev_state *cxlds;
int i, rc, rch = 0, vh = 0;
struct cxl_memdev *cxlmd; struct cxl_memdev *cxlmd;
struct cxl_port *iter; struct cxl_port *iter;
struct cxl_ep *ep; struct cxl_ep *ep;
int i, rc;
for (i = 0; i < p->nr_targets; i++) { for (i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i]; cxled = p->targets[i];
cxlmd = cxled_to_memdev(cxled); cxlmd = cxled_to_memdev(cxled);
cxlds = cxlmd->cxlds;
/* validate that all targets agree on topology */
if (!cxlds->rcd) {
vh++;
} else {
rch++;
continue;
}
iter = cxled_to_port(cxled); iter = cxled_to_port(cxled);
while (!is_cxl_root(to_cxl_port(iter->dev.parent))) while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
@ -1256,6 +1276,12 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
} }
} }
if (rch && vh) {
dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
cxl_region_teardown_targets(cxlr);
return -ENXIO;
}
return 0; return 0;
} }
@ -1648,6 +1674,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (rc) if (rc)
goto err_decrement; goto err_decrement;
p->state = CXL_CONFIG_ACTIVE; p->state = CXL_CONFIG_ACTIVE;
set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
} }
cxled->cxld.interleave_ways = p->interleave_ways; cxled->cxld.interleave_ways = p->interleave_ways;
@ -1749,8 +1776,6 @@ static int attach_target(struct cxl_region *cxlr,
down_read(&cxl_dpa_rwsem); down_read(&cxl_dpa_rwsem);
rc = cxl_region_attach(cxlr, cxled, pos); rc = cxl_region_attach(cxlr, cxled, pos);
if (rc == 0)
set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
up_read(&cxl_dpa_rwsem); up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem); up_write(&cxl_region_rwsem);
return rc; return rc;
@ -2251,7 +2276,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
* bridge for one device is the same for all. * bridge for one device is the same for all.
*/ */
if (i == 0) { if (i == 0) {
cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev); cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
if (!cxl_nvb) { if (!cxl_nvb) {
cxlr_pmem = ERR_PTR(-ENODEV); cxlr_pmem = ERR_PTR(-ENODEV);
goto out; goto out;

View File

@ -658,7 +658,7 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys, resource_size_t component_reg_phys,
struct cxl_dport *parent_dport); struct cxl_dport *parent_dport);
struct cxl_port *find_cxl_root(struct device *dev); struct cxl_port *find_cxl_root(struct cxl_port *port);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void); void cxl_bus_rescan(void);
void cxl_bus_drain(void); void cxl_bus_drain(void);
@ -695,13 +695,15 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
/** /**
* struct cxl_endpoint_dvsec_info - Cached DVSEC info * struct cxl_endpoint_dvsec_info - Cached DVSEC info
* @mem_enabled: cached value of mem_enabled in the DVSEC, PCIE_DEVICE * @mem_enabled: cached value of mem_enabled in the DVSEC at init time
* @ranges: Number of active HDM ranges this device uses. * @ranges: Number of active HDM ranges this device uses.
* @port: endpoint port associated with this info instance
* @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE
*/ */
struct cxl_endpoint_dvsec_info { struct cxl_endpoint_dvsec_info {
bool mem_enabled; bool mem_enabled;
int ranges; int ranges;
struct cxl_port *port;
struct range dvsec_range[2]; struct range dvsec_range[2];
}; };
@ -758,7 +760,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev); bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd); int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev); struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
#ifdef CONFIG_CXL_REGION #ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev); bool is_cxl_pmem_region(struct device *dev);

View File

@ -68,6 +68,20 @@ enum cxl_regloc_type {
CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_TYPES
}; };
struct cdat_header {
__le32 length;
u8 revision;
u8 checksum;
u8 reserved[6];
__le32 sequence;
} __packed;
struct cdat_entry_header {
u8 type;
u8 reserved;
__le16 length;
} __packed;
int devm_cxl_port_enumerate_dports(struct cxl_port *port); int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state; struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,

View File

@ -78,8 +78,8 @@ static int cxl_switch_port_probe(struct cxl_port *port)
static int cxl_endpoint_port_probe(struct cxl_port *port) static int cxl_endpoint_port_probe(struct cxl_port *port)
{ {
struct cxl_endpoint_dvsec_info info = { .port = port };
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport); struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
struct cxl_endpoint_dvsec_info info = { 0 };
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_hdm *cxlhdm; struct cxl_hdm *cxlhdm;
struct cxl_port *root; struct cxl_port *root;
@ -119,7 +119,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
* This can't fail in practice as CXL root exit unregisters all * This can't fail in practice as CXL root exit unregisters all
* descendant ports and that in turn synchronizes with cxl_port_probe() * descendant ports and that in turn synchronizes with cxl_port_probe()
*/ */
root = find_cxl_root(&cxlmd->dev); root = find_cxl_root(port);
/* /*
* Now that all endpoint decoders are successfully enumerated, try to * Now that all endpoint decoders are successfully enumerated, try to

View File

@ -100,7 +100,7 @@ config GPIO_GENERIC
tristate tristate
config GPIO_REGMAP config GPIO_REGMAP
depends on REGMAP select REGMAP
tristate tristate
# put drivers in the right section, in alphabetical order # put drivers in the right section, in alphabetical order

View File

@ -324,7 +324,7 @@ static struct irq_chip gpio_irqchip = {
.irq_enable = gpio_irq_enable, .irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable, .irq_disable = gpio_irq_disable,
.irq_set_type = gpio_irq_type, .irq_set_type = gpio_irq_type,
.flags = IRQCHIP_SET_TYPE_MASKED, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
}; };
static void gpio_irq_handler(struct irq_desc *desc) static void gpio_irq_handler(struct irq_desc *desc)
@ -641,9 +641,6 @@ static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
context->set_falling = readl_relaxed(&g->set_falling); context->set_falling = readl_relaxed(&g->set_falling);
} }
/* Clear Bank interrupt enable bit */
writel_relaxed(0, base + BINTEN);
/* Clear all interrupt status registers */ /* Clear all interrupt status registers */
writel_relaxed(GENMASK(31, 0), &g->intstat); writel_relaxed(GENMASK(31, 0), &g->intstat);
} }

View File

@ -981,7 +981,12 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
*/ */
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{ {
if (adev->flags & AMD_IS_APU) if ((adev->flags & AMD_IS_APU) &&
adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
return false;
if ((adev->flags & AMD_IS_APU) &&
amdgpu_acpi_is_s3_active(adev))
return false; return false;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))

View File

@ -212,6 +212,21 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
return false; return false;
} }
bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
{
u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
DRM_INFO("Synaptics Cascaded MST hub\n");
return true;
}
}
return false;
}
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{ {
struct dc_sink *dc_sink = aconnector->dc_sink; struct dc_sink *dc_sink = aconnector->dc_sink;
@ -235,6 +250,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
needs_dsc_aux_workaround(aconnector->dc_link)) needs_dsc_aux_workaround(aconnector->dc_link))
aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux; aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
/* synaptics cascaded MST hub case */
if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux) if (!aconnector->dsc_aux)
return false; return false;
@ -662,12 +681,25 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
}; };
static int kbps_to_peak_pbn(int kbps) static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
{
u8 link_coding_cap;
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
if (link_coding_cap == DP_128b_132b_ENCODING)
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
return fec_overhead_multiplier_x1000;
}
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
{ {
u64 peak_kbps = kbps; u64 peak_kbps = kbps;
peak_kbps *= 1006; peak_kbps *= 1006;
peak_kbps = div_u64(peak_kbps, 1000); peak_kbps *= fec_overhead_multiplier_x1000;
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
} }
@ -761,11 +793,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used; int link_timeslots_used;
int fair_pbn_alloc; int fair_pbn_alloc;
int ret = 0; int ret = 0;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) { if (vars[i + k].dsc_enabled) {
initial_slack[i] = initial_slack[i] =
kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
bpp_increased[i] = false; bpp_increased[i] = false;
remaining_to_increase += 1; remaining_to_increase += 1;
} else { } else {
@ -861,6 +894,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index; int next_index;
int remaining_to_try = 0; int remaining_to_try = 0;
int ret; int ret;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled if (vars[i + k].dsc_enabled
@ -890,7 +924,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1) if (next_index == -1)
break; break;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state, ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr, params[next_index].port->mgr,
params[next_index].port, params[next_index].port,
@ -903,7 +937,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false; vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0; vars[next_index].bpp_x16 = 0;
} else { } else {
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state, ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr, params[next_index].port->mgr,
params[next_index].port, params[next_index].port,
@ -932,6 +966,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0; int count = 0;
int i, k, ret; int i, k, ret;
bool debugfs_overwrite = false; bool debugfs_overwrite = false;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params)); memset(params, 0, sizeof(params));
@ -993,7 +1028,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */ /* Try no compression */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector; vars[i + k].aconnector = params[i].aconnector;
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false; vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0; vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@ -1012,7 +1047,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */ /* Try max compression */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = true; vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@ -1020,7 +1055,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0) if (ret < 0)
return ret; return ret;
} else { } else {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false; vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0; vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,

View File

@ -34,6 +34,21 @@
#define SYNAPTICS_RC_OFFSET 0x4BC #define SYNAPTICS_RC_OFFSET 0x4BC
#define SYNAPTICS_RC_DATA 0x4C0 #define SYNAPTICS_RC_DATA 0x4C0
#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
/**
* Panamera MST Hub detection
* Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
* Check from beginning of branch device vendor specific field (050Ch)
*/
#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
struct amdgpu_display_manager; struct amdgpu_display_manager;
struct amdgpu_dm_connector; struct amdgpu_dm_connector;

View File

@ -146,8 +146,8 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
unsigned int order; unsigned int order;
u64 root_size; u64 root_size;
root_size = rounddown_pow_of_two(size); order = ilog2(size) - ilog2(chunk_size);
order = ilog2(root_size) - ilog2(chunk_size); root_size = chunk_size << order;
root = drm_block_alloc(mm, NULL, order, offset); root = drm_block_alloc(mm, NULL, order, offset);
if (!root) if (!root)

View File

@ -22,7 +22,6 @@
#include "etnaviv_gem.h" #include "etnaviv_gem.h"
#include "etnaviv_mmu.h" #include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h" #include "etnaviv_perfmon.h"
#include "common.xml.h"
/* /*
* DRM operations: * DRM operations:
@ -476,47 +475,7 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW), ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
}; };
static void etnaviv_fop_show_fdinfo(struct seq_file *m, struct file *f) DEFINE_DRM_GEM_FOPS(fops);
{
struct drm_file *file = f->private_data;
struct drm_device *dev = file->minor->dev;
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx = file->driver_priv;
/*
* For a description of the text output format used here, see
* Documentation/gpu/drm-usage-stats.rst.
*/
seq_printf(m, "drm-driver:\t%s\n", dev->driver->name);
seq_printf(m, "drm-client-id:\t%u\n", ctx->id);
for (int i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
char engine[10] = "UNK";
int cur = 0;
if (!gpu)
continue;
if (gpu->identity.features & chipFeatures_PIPE_2D)
cur = snprintf(engine, sizeof(engine), "2D");
if (gpu->identity.features & chipFeatures_PIPE_3D)
cur = snprintf(engine + cur, sizeof(engine) - cur,
"%s3D", cur ? "/" : "");
if (gpu->identity.nn_core_count > 0)
cur = snprintf(engine + cur, sizeof(engine) - cur,
"%sNN", cur ? "/" : "");
seq_printf(m, "drm-engine-%s:\t%llu ns\n", engine,
ctx->sched_entity[i].elapsed_ns);
}
}
static const struct file_operations fops = {
.owner = THIS_MODULE,
DRM_GEM_FOPS,
.show_fdinfo = etnaviv_fop_show_fdinfo,
};
static const struct drm_driver etnaviv_drm_driver = { static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER, .driver_features = DRIVER_GEM | DRIVER_RENDER,

View File

@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0); int ret;
ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
if (!ret) {
/* Drop the reference acquired by drm_gem_mmap_obj(). */
drm_gem_object_put(&etnaviv_obj->base);
}
return ret;
} }
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {

View File

@ -46,6 +46,11 @@ struct intel_color_funcs {
* registers involved with the same commit. * registers involved with the same commit.
*/ */
void (*color_commit_arm)(const struct intel_crtc_state *crtc_state); void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
/*
* Perform any extra tasks needed after all the
* double buffered registers have been latched.
*/
void (*color_post_update)(const struct intel_crtc_state *crtc_state);
/* /*
* Load LUTs (and other single buffered color management * Load LUTs (and other single buffered color management
* registers). Will (hopefully) be called during the vblank * registers). Will (hopefully) be called during the vblank
@ -614,9 +619,33 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state) static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{ {
/*
* Despite Wa_1406463849, ICL no longer suffers from the SKL
* DC5/PSR CSC black screen issue (see skl_color_commit_noarm()).
* Possibly due to the extra sticky CSC arming
* (see icl_color_post_update()).
*
* On TGL+ all CSC arming issues have been properly fixed.
*/
icl_load_csc_matrix(crtc_state); icl_load_csc_matrix(crtc_state);
} }
static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
/*
* Possibly related to display WA #1184, SKL CSC loses the latched
* CSC coeff/offset register values if the CSC registers are disarmed
* between DC5 exit and PSR exit. This will cause the plane(s) to
* output all black (until CSC_MODE is rearmed and properly latched).
* Once PSR exit (and proper register latching) has occurred the
* danger is over. Thus when PSR is enabled the CSC coeff/offset
* register programming will be peformed from skl_color_commit_arm()
* which is called after PSR exit.
*/
if (!crtc_state->has_psr)
ilk_load_csc_matrix(crtc_state);
}
static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state) static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{ {
ilk_load_csc_matrix(crtc_state); ilk_load_csc_matrix(crtc_state);
@ -659,6 +688,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe; enum pipe pipe = crtc->pipe;
u32 val = 0; u32 val = 0;
if (crtc_state->has_psr)
ilk_load_csc_matrix(crtc_state);
/* /*
* We don't (yet) allow userspace to control the pipe background color, * We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC appropriately * so force it to black, but apply pipe gamma and CSC appropriately
@ -677,6 +709,47 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
crtc_state->csc_mode); crtc_state->csc_mode);
} }
static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black.
*/
intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
intel_de_write(i915, GAMMA_MODE(crtc->pipe),
crtc_state->gamma_mode);
intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
crtc_state->csc_mode);
}
static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/*
* Despite Wa_1406463849, ICL CSC is no longer disarmed by
* coeff/offset register *writes*. Instead, once CSC_MODE
* is armed it stays armed, even after it has been latched.
* Afterwards the coeff/offset registers become effectively
* self-arming. That self-arming must be disabled before the
* next icl_color_commit_noarm() tries to write the next set
* of coeff/offset registers. Fortunately register *reads*
* do still disarm the CSC. Naturally this must not be done
* until the previously written CSC registers have actually
* been latched.
*
* TGL+ no longer need this workaround.
*/
intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe));
}
static struct drm_property_blob * static struct drm_property_blob *
create_linear_lut(struct drm_i915_private *i915, int lut_size) create_linear_lut(struct drm_i915_private *i915, int lut_size)
{ {
@ -1373,6 +1446,14 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
i915->display.funcs.color->color_commit_arm(crtc_state); i915->display.funcs.color->color_commit_arm(crtc_state);
} }
void intel_color_post_update(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (i915->display.funcs.color->color_post_update)
i915->display.funcs.color->color_post_update(crtc_state);
}
void intel_color_prepare_commit(struct intel_crtc_state *crtc_state) void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
{ {
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@ -3064,10 +3145,20 @@ static const struct intel_color_funcs i9xx_color_funcs = {
.lut_equal = i9xx_lut_equal, .lut_equal = i9xx_lut_equal,
}; };
static const struct intel_color_funcs tgl_color_funcs = {
.color_check = icl_color_check,
.color_commit_noarm = icl_color_commit_noarm,
.color_commit_arm = icl_color_commit_arm,
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
.lut_equal = icl_lut_equal,
};
static const struct intel_color_funcs icl_color_funcs = { static const struct intel_color_funcs icl_color_funcs = {
.color_check = icl_color_check, .color_check = icl_color_check,
.color_commit_noarm = icl_color_commit_noarm, .color_commit_noarm = icl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm, .color_commit_arm = icl_color_commit_arm,
.color_post_update = icl_color_post_update,
.load_luts = icl_load_luts, .load_luts = icl_load_luts,
.read_luts = icl_read_luts, .read_luts = icl_read_luts,
.lut_equal = icl_lut_equal, .lut_equal = icl_lut_equal,
@ -3075,7 +3166,7 @@ static const struct intel_color_funcs icl_color_funcs = {
static const struct intel_color_funcs glk_color_funcs = { static const struct intel_color_funcs glk_color_funcs = {
.color_check = glk_color_check, .color_check = glk_color_check,
.color_commit_noarm = ilk_color_commit_noarm, .color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm, .color_commit_arm = skl_color_commit_arm,
.load_luts = glk_load_luts, .load_luts = glk_load_luts,
.read_luts = glk_read_luts, .read_luts = glk_read_luts,
@ -3084,7 +3175,7 @@ static const struct intel_color_funcs glk_color_funcs = {
static const struct intel_color_funcs skl_color_funcs = { static const struct intel_color_funcs skl_color_funcs = {
.color_check = ivb_color_check, .color_check = ivb_color_check,
.color_commit_noarm = ilk_color_commit_noarm, .color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm, .color_commit_arm = skl_color_commit_arm,
.load_luts = bdw_load_luts, .load_luts = bdw_load_luts,
.read_luts = bdw_read_luts, .read_luts = bdw_read_luts,
@ -3180,7 +3271,9 @@ void intel_color_init_hooks(struct drm_i915_private *i915)
else else
i915->display.funcs.color = &i9xx_color_funcs; i915->display.funcs.color = &i9xx_color_funcs;
} else { } else {
if (DISPLAY_VER(i915) >= 11) if (DISPLAY_VER(i915) >= 12)
i915->display.funcs.color = &tgl_color_funcs;
else if (DISPLAY_VER(i915) == 11)
i915->display.funcs.color = &icl_color_funcs; i915->display.funcs.color = &icl_color_funcs;
else if (DISPLAY_VER(i915) == 10) else if (DISPLAY_VER(i915) == 10)
i915->display.funcs.color = &glk_color_funcs; i915->display.funcs.color = &glk_color_funcs;

View File

@ -21,6 +21,7 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state); void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state); void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state); void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
void intel_color_post_update(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state); void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state); void intel_color_get_config(struct intel_crtc_state *crtc_state);
bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,

View File

@ -1209,6 +1209,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_cursorclk_wa(old_crtc_state) && if (needs_cursorclk_wa(old_crtc_state) &&
!needs_cursorclk_wa(new_crtc_state)) !needs_cursorclk_wa(new_crtc_state))
icl_wa_cursorclkgating(dev_priv, pipe, false); icl_wa_cursorclkgating(dev_priv, pipe, false);
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_post_update(new_crtc_state);
} }
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@ -7091,6 +7094,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc); intel_fbc_update(state, crtc);
drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
if (!modeset && if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state)) intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state); intel_color_commit_noarm(new_crtc_state);
@ -7458,8 +7463,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base); drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base); drm_dp_mst_atomic_wait_for_dependencies(&state->base);
if (state->modeset) /*
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); * During full modesets we write a lot of registers, wait
* for PLLs, etc. Doing that while DC states are enabled
* is not a good idea.
*
* During fastsets and other updates we also need to
* disable DC states due to the following scenario:
* 1. DC5 exit and PSR exit happen
* 2. Some or all _noarm() registers are written
* 3. Due to some long delay PSR is re-entered
* 4. DC5 entry -> DMC saves the already written new
* _noarm() registers and the old not yet written
* _arm() registers
* 5. DC5 exit -> DMC restores a mixture of old and
* new register values and arms the update
* 6. PSR exit -> hardware latches a mixture of old and
* new register values -> corrupted frame, or worse
* 7. New _arm() registers are finally written
* 8. Hardware finally latches a complete set of new
* register values, and subsequent frames will be OK again
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
intel_atomic_prepare_plane_clear_colors(state); intel_atomic_prepare_plane_clear_colors(state);
@ -7608,8 +7633,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the culprit. * the culprit.
*/ */
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
} }
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/* /*

View File

@ -232,7 +232,7 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
return slots; return slots;
} }
intel_link_compute_m_n(crtc_state->pipe_bpp, intel_link_compute_m_n(crtc_state->dsc.compressed_bpp,
crtc_state->lane_count, crtc_state->lane_count,
adjusted_mode->crtc_clock, adjusted_mode->crtc_clock,
crtc_state->port_clock, crtc_state->port_clock,

View File

@ -301,6 +301,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm->pte_encode = gen8_ggtt_pte_encode; vm->pte_encode = gen8_ggtt_pte_encode;
dpt->obj = dpt_obj; dpt->obj = dpt_obj;
dpt->obj->is_dpt = true;
return &dpt->vm; return &dpt->vm;
} }
@ -309,5 +310,6 @@ void intel_dpt_destroy(struct i915_address_space *vm)
{ {
struct i915_dpt *dpt = i915_vm_to_dpt(vm); struct i915_dpt *dpt = i915_vm_to_dpt(vm);
dpt->obj->is_dpt = false;
i915_vm_put(&dpt->vm); i915_vm_put(&dpt->vm);
} }

View File

@ -418,9 +418,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) { if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm, drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assume safe mode\n", "Port %s: PHY in TCCOLD, assume not owned\n",
dig_port->tc_port_name); dig_port->tc_port_name);
return true; return false;
} }
return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);

View File

@ -127,7 +127,8 @@ i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
memcpy(map, data, size); memcpy(map, data, size);
i915_gem_object_unpin_map(obj); i915_gem_object_flush_map(obj);
__i915_gem_object_release_map(obj);
return obj; return obj;
} }

View File

@ -303,7 +303,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
static inline bool static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{ {
return READ_ONCE(obj->frontbuffer); return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
} }
static inline unsigned int static inline unsigned int

View File

@ -491,6 +491,9 @@ struct drm_i915_gem_object {
*/ */
unsigned int cache_dirty:1; unsigned int cache_dirty:1;
/* @is_dpt: Object houses a display page table (DPT) */
unsigned int is_dpt:1;
/** /**
* @read_domains: Read memory domains. * @read_domains: Read memory domains.
* *

View File

@ -1067,11 +1067,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
.interruptible = true, .interruptible = true,
.no_wait_gpu = true, /* should be idle already */ .no_wait_gpu = true, /* should be idle already */
}; };
int err;
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)); GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
if (ret) { if (err) {
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }

View File

@ -2018,6 +2018,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* inspecting the queue to see if we need to resumbit. * inspecting the queue to see if we need to resumbit.
*/ */
if (*prev != *execlists->active) { /* elide lite-restores */ if (*prev != *execlists->active) { /* elide lite-restores */
struct intel_context *prev_ce = NULL, *active_ce = NULL;
/* /*
* Note the inherent discrepancy between the HW runtime, * Note the inherent discrepancy between the HW runtime,
* recorded as part of the context switch, and the CPU * recorded as part of the context switch, and the CPU
@ -2029,9 +2031,15 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* and correct overselves later when updating from HW. * and correct overselves later when updating from HW.
*/ */
if (*prev) if (*prev)
lrc_runtime_stop((*prev)->context); prev_ce = (*prev)->context;
if (*execlists->active) if (*execlists->active)
lrc_runtime_start((*execlists->active)->context); active_ce = (*execlists->active)->context;
if (prev_ce != active_ce) {
if (prev_ce)
lrc_runtime_stop(prev_ce);
if (active_ce)
lrc_runtime_start(active_ce);
}
new_timeslice(execlists); new_timeslice(execlists);
} }

View File

@ -2075,16 +2075,6 @@ void intel_rps_sanitize(struct intel_rps *rps)
rps_disable_interrupts(rps); rps_disable_interrupts(rps);
} }
u32 intel_rps_read_rpstat_fw(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
i915_reg_t rpstat;
rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1;
return intel_uncore_read_fw(rps_to_gt(rps)->uncore, rpstat);
}
u32 intel_rps_read_rpstat(struct intel_rps *rps) u32 intel_rps_read_rpstat(struct intel_rps *rps)
{ {
struct drm_i915_private *i915 = rps_to_i915(rps); struct drm_i915_private *i915 = rps_to_i915(rps);
@ -2095,7 +2085,7 @@ u32 intel_rps_read_rpstat(struct intel_rps *rps)
return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat); return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat);
} }
u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
{ {
struct drm_i915_private *i915 = rps_to_i915(rps); struct drm_i915_private *i915 = rps_to_i915(rps);
u32 cagf; u32 cagf;
@ -2118,10 +2108,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
return cagf; return cagf;
} }
static u32 read_cagf(struct intel_rps *rps) static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
{ {
struct drm_i915_private *i915 = rps_to_i915(rps); struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps); struct intel_uncore *uncore = rps_to_uncore(rps);
i915_reg_t r = INVALID_MMIO_REG;
u32 freq; u32 freq;
/* /*
@ -2129,22 +2120,30 @@ static u32 read_cagf(struct intel_rps *rps)
* registers will return 0 freq when GT is in RC6 * registers will return 0 freq when GT is in RC6
*/ */
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) { if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
freq = intel_uncore_read(uncore, MTL_MIRROR_TARGET_WP1); r = MTL_MIRROR_TARGET_WP1;
} else if (GRAPHICS_VER(i915) >= 12) { } else if (GRAPHICS_VER(i915) >= 12) {
freq = intel_uncore_read(uncore, GEN12_RPSTAT1); r = GEN12_RPSTAT1;
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
vlv_punit_get(i915); vlv_punit_get(i915);
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915); vlv_punit_put(i915);
} else if (GRAPHICS_VER(i915) >= 6) { } else if (GRAPHICS_VER(i915) >= 6) {
freq = intel_uncore_read(uncore, GEN6_RPSTAT1); r = GEN6_RPSTAT1;
} else { } else {
freq = intel_uncore_read(uncore, MEMSTAT_ILK); r = MEMSTAT_ILK;
} }
if (i915_mmio_reg_valid(r))
freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r);
return intel_rps_get_cagf(rps, freq); return intel_rps_get_cagf(rps, freq);
} }
static u32 read_cagf(struct intel_rps *rps)
{
return __read_cagf(rps, true);
}
u32 intel_rps_read_actual_frequency(struct intel_rps *rps) u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
{ {
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
@ -2157,7 +2156,12 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
return freq; return freq;
} }
u32 intel_rps_read_punit_req(struct intel_rps *rps) u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps)
{
return intel_gpu_freq(rps, __read_cagf(rps, false));
}
static u32 intel_rps_read_punit_req(struct intel_rps *rps)
{ {
struct intel_uncore *uncore = rps_to_uncore(rps); struct intel_uncore *uncore = rps_to_uncore(rps);
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;

Some files were not shown because too many files have changed in this diff Show More