Merge 6.2-rc3 into usb-next
We need the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
c2fb9a214d
|
@ -39,6 +39,7 @@
|
|||
*.o.*
|
||||
*.patch
|
||||
*.rmeta
|
||||
*.rpm
|
||||
*.rsi
|
||||
*.s
|
||||
*.so
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
What: /sys/kernel/debug/pktcdvd/pktcdvd[0-7]
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
|
||||
The pktcdvd module (packet writing driver) creates
|
||||
these files in debugfs:
|
||||
|
||||
/sys/kernel/debug/pktcdvd/pktcdvd[0-7]/
|
||||
|
||||
==== ====== ====================================
|
||||
info 0444 Lots of driver statistics and infos.
|
||||
==== ====== ====================================
|
||||
|
||||
Example::
|
||||
|
||||
cat /sys/kernel/debug/pktcdvd/pktcdvd0/info
|
|
@ -0,0 +1,97 @@
|
|||
sysfs interface
|
||||
---------------
|
||||
The pktcdvd module (packet writing driver) creates the following files in the
|
||||
sysfs: (<devid> is in the format major:minor)
|
||||
|
||||
What: /sys/class/pktcdvd/add
|
||||
What: /sys/class/pktcdvd/remove
|
||||
What: /sys/class/pktcdvd/device_map
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
|
||||
========== ==============================================
|
||||
add (WO) Write a block device id (major:minor) to
|
||||
create a new pktcdvd device and map it to the
|
||||
block device.
|
||||
|
||||
remove (WO) Write the pktcdvd device id (major:minor)
|
||||
to remove the pktcdvd device.
|
||||
|
||||
device_map (RO) Shows the device mapping in format:
|
||||
pktcdvd[0-7] <pktdevid> <blkdevid>
|
||||
========== ==============================================
|
||||
|
||||
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/dev
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/uevent
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
dev: (RO) Device id
|
||||
|
||||
uevent: (WO) To send a uevent
|
||||
|
||||
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/packets_started
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/packets_finished
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/kb_written
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/kb_read
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/kb_read_gather
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/reset
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
packets_started: (RO) Number of started packets.
|
||||
|
||||
packets_finished: (RO) Number of finished packets.
|
||||
|
||||
kb_written: (RO) kBytes written.
|
||||
|
||||
kb_read: (RO) kBytes read.
|
||||
|
||||
kb_read_gather: (RO) kBytes read to fill write packets.
|
||||
|
||||
reset: (WO) Write any value to it to reset
|
||||
pktcdvd device statistic values, like
|
||||
bytes read/written.
|
||||
|
||||
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/write_queue/size
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/write_queue/congestion_off
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/write_queue/congestion_on
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
============== ================================================
|
||||
size (RO) Contains the size of the bio write queue.
|
||||
|
||||
congestion_off (RW) If bio write queue size is below this mark,
|
||||
accept new bio requests from the block layer.
|
||||
|
||||
congestion_on (RW) If bio write queue size is higher as this
|
||||
mark, do no longer accept bio write requests
|
||||
from the block layer and wait till the pktcdvd
|
||||
device has processed enough bio's so that bio
|
||||
write queue size is below congestion off mark.
|
||||
A value of <= 0 disables congestion control.
|
||||
============== ================================================
|
||||
|
||||
|
||||
Example:
|
||||
--------
|
||||
To use the pktcdvd sysfs interface directly, you can do::
|
||||
|
||||
# create a new pktcdvd device mapped to /dev/hdc
|
||||
echo "22:0" >/sys/class/pktcdvd/add
|
||||
cat /sys/class/pktcdvd/device_map
|
||||
# assuming device pktcdvd0 was created, look at stat's
|
||||
cat /sys/class/pktcdvd/pktcdvd0/stat/kb_written
|
||||
# print the device id of the mapped block device
|
||||
fgrep pktcdvd0 /sys/class/pktcdvd/device_map
|
||||
# remove device, using pktcdvd0 device id 253:0
|
||||
echo "253:0" >/sys/class/pktcdvd/remove
|
|
@ -40,6 +40,9 @@ properties:
|
|||
clock-names:
|
||||
const: stmmaceth
|
||||
|
||||
phy-supply:
|
||||
description: PHY regulator
|
||||
|
||||
syscon:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description:
|
||||
|
|
|
@ -16,9 +16,6 @@ description: |
|
|||
8k has a second unit which provides an interface with the xMDIO bus. This
|
||||
driver handles these interfaces.
|
||||
|
||||
allOf:
|
||||
- $ref: "mdio.yaml#"
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
|
@ -39,13 +36,38 @@ required:
|
|||
- compatible
|
||||
- reg
|
||||
|
||||
allOf:
|
||||
- $ref: mdio.yaml#
|
||||
|
||||
- if:
|
||||
required:
|
||||
- interrupts
|
||||
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- items:
|
||||
- $ref: /schemas/types.yaml#/definitions/cell
|
||||
- const: 0x84
|
||||
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- items:
|
||||
- $ref: /schemas/types.yaml#/definitions/cell
|
||||
- enum:
|
||||
- 0x4
|
||||
- 0x10
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
mdio@d0072004 {
|
||||
compatible = "marvell,orion-mdio";
|
||||
reg = <0xd0072004 0x4>;
|
||||
reg = <0xd0072004 0x84>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <30>;
|
||||
|
|
|
@ -80,7 +80,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
|
||||
qcom,ports-sinterval-low:
|
||||
$ref: /schemas/types.yaml#/definitions/uint8-array
|
||||
|
@ -124,7 +124,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
|
||||
qcom,ports-block-pack-mode:
|
||||
$ref: /schemas/types.yaml#/definitions/uint8-array
|
||||
|
@ -154,7 +154,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
items:
|
||||
oneOf:
|
||||
- minimum: 0
|
||||
|
@ -171,7 +171,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
items:
|
||||
oneOf:
|
||||
- minimum: 0
|
||||
|
@ -187,7 +187,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
items:
|
||||
oneOf:
|
||||
- minimum: 0
|
||||
|
|
|
@ -104,3 +104,4 @@ to do something different in the near future.
|
|||
../riscv/patch-acceptance
|
||||
../driver-api/media/maintainer-entry-profile
|
||||
../driver-api/vfio-pci-device-specific-driver-acceptance
|
||||
../nvme/feature-and-quirk-policy
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=======================================
|
||||
Linux NVMe feature and and quirk policy
|
||||
=======================================
|
||||
|
||||
This file explains the policy used to decide what is supported by the
|
||||
Linux NVMe driver and what is not.
|
||||
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
NVM Express is an open collection of standards and information.
|
||||
|
||||
The Linux NVMe host driver in drivers/nvme/host/ supports devices
|
||||
implementing the NVM Express (NVMe) family of specifications, which
|
||||
currently consists of a number of documents:
|
||||
|
||||
- the NVMe Base specification
|
||||
- various Command Set specifications (e.g. NVM Command Set)
|
||||
- various Transport specifications (e.g. PCIe, Fibre Channel, RDMA, TCP)
|
||||
- the NVMe Management Interface specification
|
||||
|
||||
See https://nvmexpress.org/developers/ for the NVMe specifications.
|
||||
|
||||
|
||||
Supported features
|
||||
==================
|
||||
|
||||
NVMe is a large suite of specifications, and contains features that are only
|
||||
useful or suitable for specific use-cases. It is important to note that Linux
|
||||
does not aim to implement every feature in the specification. Every additional
|
||||
feature implemented introduces more code, more maintenance and potentially more
|
||||
bugs. Hence there is an inherent tradeoff between functionality and
|
||||
maintainability of the NVMe host driver.
|
||||
|
||||
Any feature implemented in the Linux NVMe host driver must support the
|
||||
following requirements:
|
||||
|
||||
1. The feature is specified in a release version of an official NVMe
|
||||
specification, or in a ratified Technical Proposal (TP) that is
|
||||
available on NVMe website. Or if it is not directly related to the
|
||||
on-wire protocol, does not contradict any of the NVMe specifications.
|
||||
2. Does not conflict with the Linux architecture, nor the design of the
|
||||
NVMe host driver.
|
||||
3. Has a clear, indisputable value-proposition and a wide consensus across
|
||||
the community.
|
||||
|
||||
Vendor specific extensions are generally not supported in the NVMe host
|
||||
driver.
|
||||
|
||||
It is strongly recommended to work with the Linux NVMe and block layer
|
||||
maintainers and get feedback on specification changes that are intended
|
||||
to be used by the Linux NVMe host driver in order to avoid conflict at a
|
||||
later stage.
|
||||
|
||||
|
||||
Quirks
|
||||
======
|
||||
|
||||
Sometimes implementations of open standards fail to correctly implement parts
|
||||
of the standards. Linux uses identifier-based quirks to work around such
|
||||
implementation bugs. The intent of quirks is to deal with widely available
|
||||
hardware, usually consumer, which Linux users can't use without these quirks.
|
||||
Typically these implementations are not or only superficially tested with Linux
|
||||
by the hardware manufacturer.
|
||||
|
||||
The Linux NVMe maintainers decide ad hoc whether to quirk implementations
|
||||
based on the impact of the problem to Linux users and how it impacts
|
||||
maintainability of the driver. In general quirks are a last resort, if no
|
||||
firmware updates or other workarounds are available from the vendor.
|
||||
|
||||
Quirks will not be added to the Linux kernel for hardware that isn't available
|
||||
on the mass market. Hardware that fails qualification for enterprise Linux
|
||||
distributions, ChromeOS, Android or other consumers of the Linux kernel
|
||||
should be fixed before it is shipped instead of relying on Linux quirks.
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
.. _netdev-FAQ:
|
||||
|
||||
==========
|
||||
netdev FAQ
|
||||
==========
|
||||
=============================
|
||||
Networking subsystem (netdev)
|
||||
=============================
|
||||
|
||||
tl;dr
|
||||
-----
|
||||
|
@ -15,14 +15,15 @@ tl;dr
|
|||
- don't repost your patches within one 24h period
|
||||
- reverse xmas tree
|
||||
|
||||
What is netdev?
|
||||
---------------
|
||||
It is a mailing list for all network-related Linux stuff. This
|
||||
netdev
|
||||
------
|
||||
|
||||
netdev is a mailing list for all network-related Linux stuff. This
|
||||
includes anything found under net/ (i.e. core code like IPv6) and
|
||||
drivers/net (i.e. hardware specific drivers) in the Linux source tree.
|
||||
|
||||
Note that some subsystems (e.g. wireless drivers) which have a high
|
||||
volume of traffic have their own specific mailing lists.
|
||||
volume of traffic have their own specific mailing lists and trees.
|
||||
|
||||
The netdev list is managed (like many other Linux mailing lists) through
|
||||
VGER (http://vger.kernel.org/) with archives available at
|
||||
|
@ -32,32 +33,10 @@ Aside from subsystems like those mentioned above, all network-related
|
|||
Linux development (i.e. RFC, review, comments, etc.) takes place on
|
||||
netdev.
|
||||
|
||||
How do the changes posted to netdev make their way into Linux?
|
||||
--------------------------------------------------------------
|
||||
There are always two trees (git repositories) in play. Both are
|
||||
driven by David Miller, the main network maintainer. There is the
|
||||
``net`` tree, and the ``net-next`` tree. As you can probably guess from
|
||||
the names, the ``net`` tree is for fixes to existing code already in the
|
||||
mainline tree from Linus, and ``net-next`` is where the new code goes
|
||||
for the future release. You can find the trees here:
|
||||
Development cycle
|
||||
-----------------
|
||||
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
|
||||
How do I indicate which tree (net vs. net-next) my patch should be in?
|
||||
----------------------------------------------------------------------
|
||||
To help maintainers and CI bots you should explicitly mark which tree
|
||||
your patch is targeting. Assuming that you use git, use the prefix
|
||||
flag::
|
||||
|
||||
git format-patch --subject-prefix='PATCH net-next' start..finish
|
||||
|
||||
Use ``net`` instead of ``net-next`` (always lower case) in the above for
|
||||
bug-fix ``net`` content.
|
||||
|
||||
How often do changes from these trees make it to the mainline Linus tree?
|
||||
-------------------------------------------------------------------------
|
||||
To understand this, you need to know a bit of background information on
|
||||
Here is a bit of background information on
|
||||
the cadence of Linux development. Each new release starts off with a
|
||||
two week "merge window" where the main maintainers feed their new stuff
|
||||
to Linus for merging into the mainline tree. After the two weeks, the
|
||||
|
@ -69,9 +48,33 @@ rc2 is released. This repeats on a roughly weekly basis until rc7
|
|||
state of churn), and a week after the last vX.Y-rcN was done, the
|
||||
official vX.Y is released.
|
||||
|
||||
Relating that to netdev: At the beginning of the 2-week merge window,
|
||||
the ``net-next`` tree will be closed - no new changes/features. The
|
||||
accumulated new content of the past ~10 weeks will be passed onto
|
||||
To find out where we are now in the cycle - load the mainline (Linus)
|
||||
page here:
|
||||
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
|
||||
|
||||
and note the top of the "tags" section. If it is rc1, it is early in
|
||||
the dev cycle. If it was tagged rc7 a week ago, then a release is
|
||||
probably imminent. If the most recent tag is a final release tag
|
||||
(without an ``-rcN`` suffix) - we are most likely in a merge window
|
||||
and ``net-next`` is closed.
|
||||
|
||||
git trees and patch flow
|
||||
------------------------
|
||||
|
||||
There are two networking trees (git repositories) in play. Both are
|
||||
driven by David Miller, the main network maintainer. There is the
|
||||
``net`` tree, and the ``net-next`` tree. As you can probably guess from
|
||||
the names, the ``net`` tree is for fixes to existing code already in the
|
||||
mainline tree from Linus, and ``net-next`` is where the new code goes
|
||||
for the future release. You can find the trees here:
|
||||
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
|
||||
Relating that to kernel development: At the beginning of the 2-week
|
||||
merge window, the ``net-next`` tree will be closed - no new changes/features.
|
||||
The accumulated new content of the past ~10 weeks will be passed onto
|
||||
mainline/Linus via a pull request for vX.Y -- at the same time, the
|
||||
``net`` tree will start accumulating fixes for this pulled content
|
||||
relating to vX.Y
|
||||
|
@ -103,22 +106,14 @@ focus for ``net`` is on stabilization and bug fixes.
|
|||
|
||||
Finally, the vX.Y gets released, and the whole cycle starts over.
|
||||
|
||||
So where are we now in this cycle?
|
||||
----------------------------------
|
||||
netdev patch review
|
||||
-------------------
|
||||
|
||||
Load the mainline (Linus) page here:
|
||||
Patch status
|
||||
~~~~~~~~~~~~
|
||||
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
|
||||
|
||||
and note the top of the "tags" section. If it is rc1, it is early in
|
||||
the dev cycle. If it was tagged rc7 a week ago, then a release is
|
||||
probably imminent. If the most recent tag is a final release tag
|
||||
(without an ``-rcN`` suffix) - we are most likely in a merge window
|
||||
and ``net-next`` is closed.
|
||||
|
||||
How can I tell the status of a patch I've sent?
|
||||
-----------------------------------------------
|
||||
Start by looking at the main patchworks queue for netdev:
|
||||
Status of a patch can be checked by looking at the main patchwork
|
||||
queue for netdev:
|
||||
|
||||
https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
|
||||
|
@ -127,8 +122,20 @@ patch. Patches are indexed by the ``Message-ID`` header of the emails
|
|||
which carried them so if you have trouble finding your patch append
|
||||
the value of ``Message-ID`` to the URL above.
|
||||
|
||||
How long before my patch is accepted?
|
||||
-------------------------------------
|
||||
Updating patch status
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
It may be tempting to help the maintainers and update the state of your
|
||||
own patches when you post a new version or spot a bug. Please **do not**
|
||||
do that.
|
||||
Interfering with the patch status on patchwork will only cause confusion. Leave
|
||||
it to the maintainer to figure out what is the most recent and current
|
||||
version that should be applied. If there is any doubt, the maintainer
|
||||
will reply and ask what should be done.
|
||||
|
||||
Review timelines
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Generally speaking, the patches get triaged quickly (in less than
|
||||
48h). But be patient, if your patch is active in patchwork (i.e. it's
|
||||
listed on the project's patch list) the chances it was missed are close to zero.
|
||||
|
@ -136,116 +143,47 @@ Asking the maintainer for status updates on your
|
|||
patch is a good way to ensure your patch is ignored or pushed to the
|
||||
bottom of the priority list.
|
||||
|
||||
Should I directly update patchwork state of my own patches?
|
||||
-----------------------------------------------------------
|
||||
It may be tempting to help the maintainers and update the state of your
|
||||
own patches when you post a new version or spot a bug. Please do not do that.
|
||||
Interfering with the patch status on patchwork will only cause confusion. Leave
|
||||
it to the maintainer to figure out what is the most recent and current
|
||||
version that should be applied. If there is any doubt, the maintainer
|
||||
will reply and ask what should be done.
|
||||
Partial resends
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
How do I divide my work into patches?
|
||||
-------------------------------------
|
||||
|
||||
Put yourself in the shoes of the reviewer. Each patch is read separately
|
||||
and therefore should constitute a comprehensible step towards your stated
|
||||
goal.
|
||||
|
||||
Avoid sending series longer than 15 patches. Larger series takes longer
|
||||
to review as reviewers will defer looking at it until they find a large
|
||||
chunk of time. A small series can be reviewed in a short time, so Maintainers
|
||||
just do it. As a result, a sequence of smaller series gets merged quicker and
|
||||
with better review coverage. Re-posting large series also increases the mailing
|
||||
list traffic.
|
||||
|
||||
I made changes to only a few patches in a patch series should I resend only those changed?
|
||||
------------------------------------------------------------------------------------------
|
||||
No, please resend the entire patch series and make sure you do number your
|
||||
Please always resend the entire patch series and make sure you do number your
|
||||
patches such that it is clear this is the latest and greatest set of patches
|
||||
that can be applied.
|
||||
that can be applied. Do not try to resend just the patches which changed.
|
||||
|
||||
I have received review feedback, when should I post a revised version of the patches?
|
||||
-------------------------------------------------------------------------------------
|
||||
Allow at least 24 hours to pass between postings. This will ensure reviewers
|
||||
from all geographical locations have a chance to chime in. Do not wait
|
||||
too long (weeks) between postings either as it will make it harder for reviewers
|
||||
to recall all the context.
|
||||
Handling misapplied patches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Make sure you address all the feedback in your new posting. Do not post a new
|
||||
version of the code if the discussion about the previous version is still
|
||||
ongoing, unless directly instructed by a reviewer.
|
||||
|
||||
I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
|
||||
----------------------------------------------------------------------------------------------------------------------------------------
|
||||
Occasionally a patch series gets applied before receiving critical feedback,
|
||||
or the wrong version of a series gets applied.
|
||||
There is no revert possible, once it is pushed out, it stays like that.
|
||||
Please send incremental versions on top of what has been merged in order to fix
|
||||
the patches the way they would look like if your latest patch series was to be
|
||||
merged.
|
||||
|
||||
Are there special rules regarding stable submissions on netdev?
|
||||
---------------------------------------------------------------
|
||||
Stable tree
|
||||
~~~~~~~~~~~
|
||||
|
||||
While it used to be the case that netdev submissions were not supposed
|
||||
to carry explicit ``CC: stable@vger.kernel.org`` tags that is no longer
|
||||
the case today. Please follow the standard stable rules in
|
||||
:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`,
|
||||
and make sure you include appropriate Fixes tags!
|
||||
|
||||
Is the comment style convention different for the networking content?
|
||||
---------------------------------------------------------------------
|
||||
Yes, in a largely trivial way. Instead of this::
|
||||
Security fixes
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
/*
|
||||
* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
it is requested that you make it look like this::
|
||||
|
||||
/* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
What is "reverse xmas tree"?
|
||||
----------------------------
|
||||
|
||||
Netdev has a convention for ordering local variables in functions.
|
||||
Order the variable declaration lines longest to shortest, e.g.::
|
||||
|
||||
struct scatterlist *sg;
|
||||
struct sk_buff *skb;
|
||||
int err, i;
|
||||
|
||||
If there are dependencies between the variables preventing the ordering
|
||||
move the initialization out of line.
|
||||
|
||||
I am working in existing code which uses non-standard formatting. Which formatting should I use?
|
||||
------------------------------------------------------------------------------------------------
|
||||
Make your code follow the most recent guidelines, so that eventually all code
|
||||
in the domain of netdev is in the preferred format.
|
||||
|
||||
I found a bug that might have possible security implications or similar. Should I mail the main netdev maintainer off-list?
|
||||
---------------------------------------------------------------------------------------------------------------------------
|
||||
No. The current netdev maintainer has consistently requested that
|
||||
Do not email netdev maintainers directly if you think you discovered
|
||||
a bug that might have possible security implications.
|
||||
The current netdev maintainer has consistently requested that
|
||||
people use the mailing lists and not reach out directly. If you aren't
|
||||
OK with that, then perhaps consider mailing security@kernel.org or
|
||||
reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
|
||||
as possible alternative mechanisms.
|
||||
|
||||
What level of testing is expected before I submit my change?
|
||||
------------------------------------------------------------
|
||||
At the very minimum your changes must survive an ``allyesconfig`` and an
|
||||
``allmodconfig`` build with ``W=1`` set without new warnings or failures.
|
||||
|
||||
Ideally you will have done run-time testing specific to your change,
|
||||
and the patch series contains a set of kernel selftest for
|
||||
``tools/testing/selftests/net`` or using the KUnit framework.
|
||||
Co-posting changes to user space components
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You are expected to test your changes on top of the relevant networking
|
||||
tree (``net`` or ``net-next``) and not e.g. a stable tree or ``linux-next``.
|
||||
|
||||
How do I post corresponding changes to user space components?
|
||||
-------------------------------------------------------------
|
||||
User space code exercising kernel features should be posted
|
||||
alongside kernel patches. This gives reviewers a chance to see
|
||||
how any new interface is used and how well it works.
|
||||
|
@ -270,42 +208,10 @@ to the mailing list, e.g.::
|
|||
Posting as one thread is discouraged because it confuses patchwork
|
||||
(as of patchwork 2.2.2).
|
||||
|
||||
Can I reproduce the checks from patchwork on my local machine?
|
||||
--------------------------------------------------------------
|
||||
Preparing changes
|
||||
-----------------
|
||||
|
||||
Checks in patchwork are mostly simple wrappers around existing kernel
|
||||
scripts, the sources are available at:
|
||||
|
||||
https://github.com/kuba-moo/nipa/tree/master/tests
|
||||
|
||||
Running all the builds and checks locally is a pain, can I post my patches and have the patchwork bot validate them?
|
||||
--------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
No, you must ensure that your patches are ready by testing them locally
|
||||
before posting to the mailing list. The patchwork build bot instance
|
||||
gets overloaded very easily and netdev@vger really doesn't need more
|
||||
traffic if we can help it.
|
||||
|
||||
netdevsim is great, can I extend it for my out-of-tree tests?
|
||||
-------------------------------------------------------------
|
||||
|
||||
No, ``netdevsim`` is a test vehicle solely for upstream tests.
|
||||
(Please add your tests under ``tools/testing/selftests/``.)
|
||||
|
||||
We also give no guarantees that ``netdevsim`` won't change in the future
|
||||
in a way which would break what would normally be considered uAPI.
|
||||
|
||||
Is netdevsim considered a "user" of an API?
|
||||
-------------------------------------------
|
||||
|
||||
Linux kernel has a long standing rule that no API should be added unless
|
||||
it has a real, in-tree user. Mock-ups and tests based on ``netdevsim`` are
|
||||
strongly encouraged when adding new APIs, but ``netdevsim`` in itself
|
||||
is **not** considered a use case/user.
|
||||
|
||||
Any other tips to help ensure my net/net-next patch gets OK'd?
|
||||
--------------------------------------------------------------
|
||||
Attention to detail. Re-read your own work as if you were the
|
||||
Attention to detail is important. Re-read your own work as if you were the
|
||||
reviewer. You can start with using ``checkpatch.pl``, perhaps even with
|
||||
the ``--strict`` flag. But do not be mindlessly robotic in doing so.
|
||||
If your change is a bug fix, make sure your commit log indicates the
|
||||
|
@ -320,10 +226,133 @@ Finally, go back and read
|
|||
:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
|
||||
to be sure you are not repeating some common mistake documented there.
|
||||
|
||||
My company uses peer feedback in employee performance reviews. Can I ask netdev maintainers for feedback?
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
Indicating target tree
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Yes, especially if you spend significant amount of time reviewing code
|
||||
To help maintainers and CI bots you should explicitly mark which tree
|
||||
your patch is targeting. Assuming that you use git, use the prefix
|
||||
flag::
|
||||
|
||||
git format-patch --subject-prefix='PATCH net-next' start..finish
|
||||
|
||||
Use ``net`` instead of ``net-next`` (always lower case) in the above for
|
||||
bug-fix ``net`` content.
|
||||
|
||||
Dividing work into patches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Put yourself in the shoes of the reviewer. Each patch is read separately
|
||||
and therefore should constitute a comprehensible step towards your stated
|
||||
goal.
|
||||
|
||||
Avoid sending series longer than 15 patches. Larger series takes longer
|
||||
to review as reviewers will defer looking at it until they find a large
|
||||
chunk of time. A small series can be reviewed in a short time, so Maintainers
|
||||
just do it. As a result, a sequence of smaller series gets merged quicker and
|
||||
with better review coverage. Re-posting large series also increases the mailing
|
||||
list traffic.
|
||||
|
||||
Multi-line comments
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Comment style convention is slightly different for networking and most of
|
||||
the tree. Instead of this::
|
||||
|
||||
/*
|
||||
* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
it is requested that you make it look like this::
|
||||
|
||||
/* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
Local variable ordering ("reverse xmas tree", "RCS")
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Netdev has a convention for ordering local variables in functions.
|
||||
Order the variable declaration lines longest to shortest, e.g.::
|
||||
|
||||
struct scatterlist *sg;
|
||||
struct sk_buff *skb;
|
||||
int err, i;
|
||||
|
||||
If there are dependencies between the variables preventing the ordering
|
||||
move the initialization out of line.
|
||||
|
||||
Format precedence
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
When working in existing code which uses nonstandard formatting make
|
||||
your code follow the most recent guidelines, so that eventually all code
|
||||
in the domain of netdev is in the preferred format.
|
||||
|
||||
Resending after review
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Allow at least 24 hours to pass between postings. This will ensure reviewers
|
||||
from all geographical locations have a chance to chime in. Do not wait
|
||||
too long (weeks) between postings either as it will make it harder for reviewers
|
||||
to recall all the context.
|
||||
|
||||
Make sure you address all the feedback in your new posting. Do not post a new
|
||||
version of the code if the discussion about the previous version is still
|
||||
ongoing, unless directly instructed by a reviewer.
|
||||
|
||||
Testing
|
||||
-------
|
||||
|
||||
Expected level of testing
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
At the very minimum your changes must survive an ``allyesconfig`` and an
|
||||
``allmodconfig`` build with ``W=1`` set without new warnings or failures.
|
||||
|
||||
Ideally you will have done run-time testing specific to your change,
|
||||
and the patch series contains a set of kernel selftest for
|
||||
``tools/testing/selftests/net`` or using the KUnit framework.
|
||||
|
||||
You are expected to test your changes on top of the relevant networking
|
||||
tree (``net`` or ``net-next``) and not e.g. a stable tree or ``linux-next``.
|
||||
|
||||
patchwork checks
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Checks in patchwork are mostly simple wrappers around existing kernel
|
||||
scripts, the sources are available at:
|
||||
|
||||
https://github.com/kuba-moo/nipa/tree/master/tests
|
||||
|
||||
**Do not** post your patches just to run them through the checks.
|
||||
You must ensure that your patches are ready by testing them locally
|
||||
before posting to the mailing list. The patchwork build bot instance
|
||||
gets overloaded very easily and netdev@vger really doesn't need more
|
||||
traffic if we can help it.
|
||||
|
||||
netdevsim
|
||||
~~~~~~~~~
|
||||
|
||||
``netdevsim`` is a test driver which can be used to exercise driver
|
||||
configuration APIs without requiring capable hardware.
|
||||
Mock-ups and tests based on ``netdevsim`` are strongly encouraged when
|
||||
adding new APIs, but ``netdevsim`` in itself is **not** considered
|
||||
a use case/user. You must also implement the new APIs in a real driver.
|
||||
|
||||
We give no guarantees that ``netdevsim`` won't change in the future
|
||||
in a way which would break what would normally be considered uAPI.
|
||||
|
||||
``netdevsim`` is reserved for use by upstream tests only, so any
|
||||
new ``netdevsim`` features must be accompanied by selftests under
|
||||
``tools/testing/selftests/``.
|
||||
|
||||
Testimonials / feedback
|
||||
-----------------------
|
||||
|
||||
Some companies use peer feedback in employee performance reviews.
|
||||
Please feel free to request feedback from netdev maintainers,
|
||||
especially if you spend significant amount of time reviewing code
|
||||
and go out of your way to improve shared infrastructure.
|
||||
|
||||
The feedback must be requested by you, the contributor, and will always
|
||||
|
|
|
@ -5343,9 +5343,9 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
|
|||
32 vCPUs in the shared_info page, KVM does not automatically do so
|
||||
and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used
|
||||
explicitly even when the vcpu_info for a given vCPU resides at the
|
||||
"default" location in the shared_info page. This is because KVM is
|
||||
not aware of the Xen CPU id which is used as the index into the
|
||||
vcpu_info[] array, so cannot know the correct default location.
|
||||
"default" location in the shared_info page. This is because KVM may
|
||||
not be aware of the Xen CPU id which is used as the index into the
|
||||
vcpu_info[] array, so may know the correct default location.
|
||||
|
||||
Note that the shared info page may be constantly written to by KVM;
|
||||
it contains the event channel bitmap used to deliver interrupts to
|
||||
|
@ -5356,23 +5356,29 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
|
|||
any vCPU has been running or any event channel interrupts can be
|
||||
routed to the guest.
|
||||
|
||||
Setting the gfn to KVM_XEN_INVALID_GFN will disable the shared info
|
||||
page.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
|
||||
Sets the exception vector used to deliver Xen event channel upcalls.
|
||||
This is the HVM-wide vector injected directly by the hypervisor
|
||||
(not through the local APIC), typically configured by a guest via
|
||||
HVM_PARAM_CALLBACK_IRQ.
|
||||
HVM_PARAM_CALLBACK_IRQ. This can be disabled again (e.g. for guest
|
||||
SHUTDOWN_soft_reset) by setting it to zero.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_EVTCHN
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It configures
|
||||
an outbound port number for interception of EVTCHNOP_send requests
|
||||
from the guest. A given sending port number may be directed back
|
||||
to a specified vCPU (by APIC ID) / port / priority on the guest,
|
||||
or to trigger events on an eventfd. The vCPU and priority can be
|
||||
changed by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call,
|
||||
but other fields cannot change for a given sending port. A port
|
||||
mapping is removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags
|
||||
field.
|
||||
from the guest. A given sending port number may be directed back to
|
||||
a specified vCPU (by APIC ID) / port / priority on the guest, or to
|
||||
trigger events on an eventfd. The vCPU and priority can be changed
|
||||
by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call, but but other
|
||||
fields cannot change for a given sending port. A port mapping is
|
||||
removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags field. Passing
|
||||
KVM_XEN_EVTCHN_RESET in the flags field removes all interception of
|
||||
outbound event channels. The values of the flags field are mutually
|
||||
exclusive and cannot be combined as a bitmask.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_XEN_VERSION
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
|
@ -5388,7 +5394,7 @@ KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG
|
|||
support for KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG. It enables the
|
||||
XEN_RUNSTATE_UPDATE flag which allows guest vCPUs to safely read
|
||||
other vCPUs' vcpu_runstate_info. Xen guests enable this feature via
|
||||
the VM_ASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist
|
||||
the VMASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist
|
||||
hypercall.
|
||||
|
||||
4.127 KVM_XEN_HVM_GET_ATTR
|
||||
|
@ -5446,15 +5452,18 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
|
|||
As with the shared_info page for the VM, the corresponding page may be
|
||||
dirtied at any time if event channel interrupt delivery is enabled, so
|
||||
userspace should always assume that the page is dirty without relying
|
||||
on dirty logging.
|
||||
on dirty logging. Setting the gpa to KVM_XEN_INVALID_GPA will disable
|
||||
the vcpu_info.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
|
||||
Sets the guest physical address of an additional pvclock structure
|
||||
for a given vCPU. This is typically used for guest vsyscall support.
|
||||
Setting the gpa to KVM_XEN_INVALID_GPA will disable the structure.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR
|
||||
Sets the guest physical address of the vcpu_runstate_info for a given
|
||||
vCPU. This is how a Xen guest tracks CPU state such as steal time.
|
||||
Setting the gpa to KVM_XEN_INVALID_GPA will disable the runstate area.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT
|
||||
Sets the runstate (RUNSTATE_running/_runnable/_blocked/_offline) of
|
||||
|
@ -5487,7 +5496,8 @@ KVM_XEN_VCPU_ATTR_TYPE_TIMER
|
|||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the
|
||||
event channel port/priority for the VIRQ_TIMER of the vCPU, as well
|
||||
as allowing a pending timer to be saved/restored.
|
||||
as allowing a pending timer to be saved/restored. Setting the timer
|
||||
port to zero disables kernel handling of the singleshot timer.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
|
@ -5495,7 +5505,8 @@ KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR
|
|||
per-vCPU local APIC upcall vector, configured by a Xen guest with
|
||||
the HVMOP_set_evtchn_upcall_vector hypercall. This is typically
|
||||
used by Windows guests, and is distinct from the HVM-wide upcall
|
||||
vector configured with HVM_PARAM_CALLBACK_IRQ.
|
||||
vector configured with HVM_PARAM_CALLBACK_IRQ. It is disabled by
|
||||
setting the vector to zero.
|
||||
|
||||
|
||||
4.129 KVM_XEN_VCPU_GET_ATTR
|
||||
|
@ -6577,11 +6588,6 @@ Please note that the kernel is allowed to use the kvm_run structure as the
|
|||
primary storage for certain register types. Therefore, the kernel may use the
|
||||
values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set.
|
||||
|
||||
::
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
6. Capabilities that can be enabled on vCPUs
|
||||
============================================
|
||||
|
|
|
@ -16,17 +16,26 @@ The acquisition orders for mutexes are as follows:
|
|||
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
|
||||
them together is quite rare.
|
||||
|
||||
- Unlike kvm->slots_lock, kvm->slots_arch_lock is released before
|
||||
synchronize_srcu(&kvm->srcu). Therefore kvm->slots_arch_lock
|
||||
can be taken inside a kvm->srcu read-side critical section,
|
||||
while kvm->slots_lock cannot.
|
||||
|
||||
- kvm->mn_active_invalidate_count ensures that pairs of
|
||||
invalidate_range_start() and invalidate_range_end() callbacks
|
||||
use the same memslots array. kvm->slots_lock and kvm->slots_arch_lock
|
||||
are taken on the waiting side in install_new_memslots, so MMU notifiers
|
||||
must not take either kvm->slots_lock or kvm->slots_arch_lock.
|
||||
|
||||
For SRCU:
|
||||
|
||||
- ``synchronize_srcu(&kvm->srcu)`` is called _inside_
|
||||
the kvm->slots_lock critical section, therefore kvm->slots_lock
|
||||
cannot be taken inside a kvm->srcu read-side critical section.
|
||||
Instead, kvm->slots_arch_lock is released before the call
|
||||
to ``synchronize_srcu()`` and _can_ be taken inside a
|
||||
kvm->srcu read-side critical section.
|
||||
|
||||
- kvm->lock is taken inside kvm->srcu, therefore
|
||||
``synchronize_srcu(&kvm->srcu)`` cannot be called inside
|
||||
a kvm->lock critical section. If you cannot delay the
|
||||
call until after kvm->lock is released, use ``call_srcu``.
|
||||
|
||||
On x86:
|
||||
|
||||
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
|
||||
|
|
14
MAINTAINERS
14
MAINTAINERS
|
@ -11468,7 +11468,7 @@ F: arch/x86/kvm/hyperv.*
|
|||
F: arch/x86/kvm/kvm_onhyperv.*
|
||||
F: arch/x86/kvm/svm/hyperv.*
|
||||
F: arch/x86/kvm/svm/svm_onhyperv.*
|
||||
F: arch/x86/kvm/vmx/evmcs.*
|
||||
F: arch/x86/kvm/vmx/hyperv.*
|
||||
|
||||
KVM X86 Xen (KVM/Xen)
|
||||
M: David Woodhouse <dwmw2@infradead.org>
|
||||
|
@ -14916,6 +14916,7 @@ L: linux-nvme@lists.infradead.org
|
|||
S: Supported
|
||||
W: http://git.infradead.org/nvme.git
|
||||
T: git://git.infradead.org/nvme.git
|
||||
F: Documentation/nvme/
|
||||
F: drivers/nvme/host/
|
||||
F: drivers/nvme/common/
|
||||
F: include/linux/nvme*
|
||||
|
@ -16609,6 +16610,13 @@ S: Supported
|
|||
F: Documentation/devicetree/bindings/input/pine64,pinephone-keyboard.yaml
|
||||
F: drivers/input/keyboard/pinephone-keyboard.c
|
||||
|
||||
PKTCDVD DRIVER
|
||||
M: linux-block@vger.kernel.org
|
||||
S: Orphan
|
||||
F: drivers/block/pktcdvd.c
|
||||
F: include/linux/pktcdvd.h
|
||||
F: include/uapi/linux/pktcdvd.h
|
||||
|
||||
PLANTOWER PMS7003 AIR POLLUTION SENSOR DRIVER
|
||||
M: Tomasz Duszynski <tduszyns@gmail.com>
|
||||
S: Maintained
|
||||
|
@ -22245,7 +22253,9 @@ F: drivers/scsi/vmw_pvscsi.c
|
|||
F: drivers/scsi/vmw_pvscsi.h
|
||||
|
||||
VMWARE VIRTUAL PTP CLOCK DRIVER
|
||||
M: Vivek Thampi <vithampi@vmware.com>
|
||||
M: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
|
||||
M: Deep Shah <sdeep@vmware.com>
|
||||
R: Alexey Makhalov <amakhalov@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
6
Makefile
6
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -297,7 +297,7 @@ no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
|
|||
headers_install modules_install kernelrelease image_name
|
||||
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
|
||||
image_name
|
||||
single-targets := %.a %.i %.rsi %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
|
||||
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.rsi %.s %.symtypes %/
|
||||
|
||||
config-build :=
|
||||
mixed-build :=
|
||||
|
@ -1986,7 +1986,7 @@ $(single-no-ko): $(build-dir)
|
|||
# Remove MODORDER when done because it is not the real one.
|
||||
PHONY += single_modules
|
||||
single_modules: $(single-no-ko) modules_prepare
|
||||
$(Q){ $(foreach m, $(single-ko), echo $(extmod_prefix)$m;) } > $(MODORDER)
|
||||
$(Q){ $(foreach m, $(single-ko), echo $(extmod_prefix)$(m:%.ko=%.o);) } > $(MODORDER)
|
||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
|
||||
ifneq ($(KBUILD_MODPOST_NOFINAL),1)
|
||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modfinal
|
||||
|
|
|
@ -128,15 +128,16 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
|||
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
|
||||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_UPROBE 3 /* breakpointed or singlestepping */
|
||||
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
|
||||
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
|
||||
#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
|
||||
#define TIF_NOTIFY_SIGNAL 8 /* signal notifications exist */
|
||||
#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
|
||||
|
||||
#define TIF_USING_IWMMXT 17
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
#define TIF_RESTORE_SIGMASK 19
|
||||
#define TIF_SYSCALL_TRACE 20 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */
|
||||
#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */
|
||||
#define TIF_SECCOMP 23 /* seccomp syscall filtering active */
|
||||
|
||||
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/assembler.h>
|
||||
#include "sm4-ce-asm.h"
|
||||
|
||||
|
@ -104,7 +105,7 @@ SYM_FUNC_START(sm4_ce_ccm_final)
|
|||
SYM_FUNC_END(sm4_ce_ccm_final)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_ccm_enc)
|
||||
SYM_TYPED_FUNC_START(sm4_ce_ccm_enc)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
|
@ -216,7 +217,7 @@ SYM_FUNC_START(sm4_ce_ccm_enc)
|
|||
SYM_FUNC_END(sm4_ce_ccm_enc)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_ccm_dec)
|
||||
SYM_TYPED_FUNC_START(sm4_ce_ccm_dec)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/assembler.h>
|
||||
#include "sm4-ce-asm.h"
|
||||
|
||||
|
@ -370,7 +371,7 @@ SYM_FUNC_START(pmull_ghash_update)
|
|||
SYM_FUNC_END(pmull_ghash_update)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_pmull_gcm_enc)
|
||||
SYM_TYPED_FUNC_START(sm4_ce_pmull_gcm_enc)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
|
@ -581,7 +582,7 @@ SYM_FUNC_END(sm4_ce_pmull_gcm_enc)
|
|||
#define RH3 v20
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_pmull_gcm_dec)
|
||||
SYM_TYPED_FUNC_START(sm4_ce_pmull_gcm_dec)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
|
|
|
@ -64,7 +64,7 @@ void __init plat_mem_setup(void)
|
|||
dtb = get_fdt();
|
||||
__dt_setup_arch(dtb);
|
||||
|
||||
if (!early_init_dt_scan_memory())
|
||||
if (early_init_dt_scan_memory())
|
||||
return;
|
||||
|
||||
if (soc_info.mem_detect)
|
||||
|
|
|
@ -659,3 +659,19 @@
|
|||
interrupts = <16 2 1 9>;
|
||||
};
|
||||
};
|
||||
|
||||
&fman0_rx_0x08 {
|
||||
/delete-property/ fsl,fman-10g-port;
|
||||
};
|
||||
|
||||
&fman0_tx_0x28 {
|
||||
/delete-property/ fsl,fman-10g-port;
|
||||
};
|
||||
|
||||
&fman0_rx_0x09 {
|
||||
/delete-property/ fsl,fman-10g-port;
|
||||
};
|
||||
|
||||
&fman0_tx_0x29 {
|
||||
/delete-property/ fsl,fman-10g-port;
|
||||
};
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#define BSS_FIRST_SECTIONS *(.bss.prominit)
|
||||
#define EMITS_PT_NOTE
|
||||
#define RO_EXCEPTION_TABLE_ALIGN 0
|
||||
#define RUNTIME_DISCARD_EXIT
|
||||
|
||||
#define SOFT_MASK_TABLE(align) \
|
||||
. = ALIGN(align); \
|
||||
|
@ -410,9 +411,12 @@ SECTIONS
|
|||
DISCARDS
|
||||
/DISCARD/ : {
|
||||
*(*.EMB.apuinfo)
|
||||
*(.glink .iplt .plt .rela* .comment)
|
||||
*(.glink .iplt .plt)
|
||||
*(.gnu.version*)
|
||||
*(.gnu.attributes)
|
||||
*(.eh_frame)
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
*(.rela*)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ do { \
|
|||
might_fault(); \
|
||||
access_ok(__p, sizeof(*__p)) ? \
|
||||
__get_user((x), __p) : \
|
||||
((x) = 0, -EFAULT); \
|
||||
((x) = (__force __typeof__(x))0, -EFAULT); \
|
||||
})
|
||||
|
||||
#define __put_user_asm(insn, x, ptr, err) \
|
||||
|
|
|
@ -31,9 +31,9 @@ __RISCV_INSN_FUNCS(fence, 0x7f, 0x0f);
|
|||
} while (0)
|
||||
|
||||
__RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001);
|
||||
__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002);
|
||||
__RISCV_INSN_FUNCS(c_jr, 0xf07f, 0x8002);
|
||||
__RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001);
|
||||
__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002);
|
||||
__RISCV_INSN_FUNCS(c_jalr, 0xf07f, 0x9002);
|
||||
__RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001);
|
||||
__RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001);
|
||||
__RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002);
|
||||
|
|
|
@ -386,8 +386,8 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
|
|||
{
|
||||
unsigned long *reg, val, vaddr;
|
||||
char buffer[MAX_INSN_SIZE];
|
||||
enum insn_mmio_type mmio;
|
||||
struct insn insn = {};
|
||||
enum mmio_type mmio;
|
||||
int size, extend_size;
|
||||
u8 extend_val = 0;
|
||||
|
||||
|
@ -402,10 +402,10 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
|
|||
return -EINVAL;
|
||||
|
||||
mmio = insn_decode_mmio(&insn, &size);
|
||||
if (WARN_ON_ONCE(mmio == MMIO_DECODE_FAILED))
|
||||
if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
|
||||
return -EINVAL;
|
||||
|
||||
if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
|
||||
if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
|
||||
reg = insn_get_modrm_reg_ptr(&insn, regs);
|
||||
if (!reg)
|
||||
return -EINVAL;
|
||||
|
@ -426,23 +426,23 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
|
|||
|
||||
/* Handle writes first */
|
||||
switch (mmio) {
|
||||
case MMIO_WRITE:
|
||||
case INSN_MMIO_WRITE:
|
||||
memcpy(&val, reg, size);
|
||||
if (!mmio_write(size, ve->gpa, val))
|
||||
return -EIO;
|
||||
return insn.length;
|
||||
case MMIO_WRITE_IMM:
|
||||
case INSN_MMIO_WRITE_IMM:
|
||||
val = insn.immediate.value;
|
||||
if (!mmio_write(size, ve->gpa, val))
|
||||
return -EIO;
|
||||
return insn.length;
|
||||
case MMIO_READ:
|
||||
case MMIO_READ_ZERO_EXTEND:
|
||||
case MMIO_READ_SIGN_EXTEND:
|
||||
case INSN_MMIO_READ:
|
||||
case INSN_MMIO_READ_ZERO_EXTEND:
|
||||
case INSN_MMIO_READ_SIGN_EXTEND:
|
||||
/* Reads are handled below */
|
||||
break;
|
||||
case MMIO_MOVS:
|
||||
case MMIO_DECODE_FAILED:
|
||||
case INSN_MMIO_MOVS:
|
||||
case INSN_MMIO_DECODE_FAILED:
|
||||
/*
|
||||
* MMIO was accessed with an instruction that could not be
|
||||
* decoded or handled properly. It was likely not using io.h
|
||||
|
@ -459,15 +459,15 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
|
|||
return -EIO;
|
||||
|
||||
switch (mmio) {
|
||||
case MMIO_READ:
|
||||
case INSN_MMIO_READ:
|
||||
/* Zero-extend for 32-bit operation */
|
||||
extend_size = size == 4 ? sizeof(*reg) : 0;
|
||||
break;
|
||||
case MMIO_READ_ZERO_EXTEND:
|
||||
case INSN_MMIO_READ_ZERO_EXTEND:
|
||||
/* Zero extend based on operand size */
|
||||
extend_size = insn.opnd_bytes;
|
||||
break;
|
||||
case MMIO_READ_SIGN_EXTEND:
|
||||
case INSN_MMIO_READ_SIGN_EXTEND:
|
||||
/* Sign extend based on operand size */
|
||||
extend_size = insn.opnd_bytes;
|
||||
if (size == 1 && val & BIT(7))
|
||||
|
|
|
@ -1387,7 +1387,7 @@ static int __init amd_core_pmu_init(void)
|
|||
* numbered counter following it.
|
||||
*/
|
||||
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
|
||||
even_ctr_mask |= 1 << i;
|
||||
even_ctr_mask |= BIT_ULL(i);
|
||||
|
||||
pair_constraint = (struct event_constraint)
|
||||
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
|
||||
|
|
|
@ -800,13 +800,18 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
|
|||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &model_spr),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &model_skl),
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
|
||||
|
|
|
@ -32,16 +32,16 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs,
|
|||
bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
|
||||
unsigned char buf[MAX_INSN_SIZE], int buf_size);
|
||||
|
||||
enum mmio_type {
|
||||
MMIO_DECODE_FAILED,
|
||||
MMIO_WRITE,
|
||||
MMIO_WRITE_IMM,
|
||||
MMIO_READ,
|
||||
MMIO_READ_ZERO_EXTEND,
|
||||
MMIO_READ_SIGN_EXTEND,
|
||||
MMIO_MOVS,
|
||||
enum insn_mmio_type {
|
||||
INSN_MMIO_DECODE_FAILED,
|
||||
INSN_MMIO_WRITE,
|
||||
INSN_MMIO_WRITE_IMM,
|
||||
INSN_MMIO_READ,
|
||||
INSN_MMIO_READ_ZERO_EXTEND,
|
||||
INSN_MMIO_READ_SIGN_EXTEND,
|
||||
INSN_MMIO_MOVS,
|
||||
};
|
||||
|
||||
enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes);
|
||||
enum insn_mmio_type insn_decode_mmio(struct insn *insn, int *bytes);
|
||||
|
||||
#endif /* _ASM_X86_INSN_EVAL_H */
|
||||
|
|
|
@ -119,7 +119,7 @@ static bool is_coretext(const struct core_text *ct, void *addr)
|
|||
return within_module_coretext(addr);
|
||||
}
|
||||
|
||||
static __init_or_module bool skip_addr(void *dest)
|
||||
static bool skip_addr(void *dest)
|
||||
{
|
||||
if (dest == error_entry)
|
||||
return true;
|
||||
|
@ -181,7 +181,7 @@ static const u8 nops[] = {
|
|||
0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
|
||||
};
|
||||
|
||||
static __init_or_module void *patch_dest(void *dest, bool direct)
|
||||
static void *patch_dest(void *dest, bool direct)
|
||||
{
|
||||
unsigned int tsize = SKL_TMPL_SIZE;
|
||||
u8 *pad = dest - tsize;
|
||||
|
|
|
@ -1981,6 +1981,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
|||
if (ctrl == PR_SPEC_FORCE_DISABLE)
|
||||
task_set_spec_ib_force_disable(task);
|
||||
task_update_spec_tif(task);
|
||||
if (task == current)
|
||||
indirect_branch_prediction_barrier();
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
|
|
|
@ -401,10 +401,8 @@ int crash_load_segments(struct kimage *image)
|
|||
kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret) {
|
||||
vfree((void *)image->elf_headers);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
image->elf_load_addr = kbuf.mem;
|
||||
pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
||||
image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/extable.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/moduleloader.h>
|
||||
|
@ -281,12 +282,15 @@ static int can_probe(unsigned long paddr)
|
|||
if (ret < 0)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
/*
|
||||
* Another debugging subsystem might insert this breakpoint.
|
||||
* In that case, we can't recover it.
|
||||
* If there is a dynamically installed kgdb sw breakpoint,
|
||||
* this function should not be probed.
|
||||
*/
|
||||
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
|
||||
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
|
||||
kgdb_has_hit_break(addr))
|
||||
return 0;
|
||||
#endif
|
||||
addr += insn.length;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/extable.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/objtool.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
@ -279,19 +280,6 @@ static int insn_is_indirect_jump(struct insn *insn)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
|
||||
{
|
||||
unsigned char ops;
|
||||
|
||||
for (; addr < eaddr; addr++) {
|
||||
if (get_kernel_nofault(ops, (void *)addr) < 0 ||
|
||||
ops != INT3_INSN_OPCODE)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Decode whole function to ensure any instructions don't jump into target */
|
||||
static int can_optimize(unsigned long paddr)
|
||||
{
|
||||
|
@ -334,15 +322,15 @@ static int can_optimize(unsigned long paddr)
|
|||
ret = insn_decode_kernel(&insn, (void *)recovered_insn);
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
/*
|
||||
* In the case of detecting unknown breakpoint, this could be
|
||||
* a padding INT3 between functions. Let's check that all the
|
||||
* rest of the bytes are also INT3.
|
||||
* If there is a dynamically installed kgdb sw breakpoint,
|
||||
* this function should not be probed.
|
||||
*/
|
||||
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
|
||||
return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
|
||||
|
||||
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
|
||||
kgdb_has_hit_break(addr))
|
||||
return 0;
|
||||
#endif
|
||||
/* Recover address */
|
||||
insn.kaddr = (void *)addr;
|
||||
insn.next_byte = (void *)(addr + insn.length);
|
||||
|
|
|
@ -1536,32 +1536,32 @@ static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
|
|||
static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
||||
{
|
||||
struct insn *insn = &ctxt->insn;
|
||||
enum insn_mmio_type mmio;
|
||||
unsigned int bytes = 0;
|
||||
enum mmio_type mmio;
|
||||
enum es_result ret;
|
||||
u8 sign_byte;
|
||||
long *reg_data;
|
||||
|
||||
mmio = insn_decode_mmio(insn, &bytes);
|
||||
if (mmio == MMIO_DECODE_FAILED)
|
||||
if (mmio == INSN_MMIO_DECODE_FAILED)
|
||||
return ES_DECODE_FAILED;
|
||||
|
||||
if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
|
||||
if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
|
||||
reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
|
||||
if (!reg_data)
|
||||
return ES_DECODE_FAILED;
|
||||
}
|
||||
|
||||
switch (mmio) {
|
||||
case MMIO_WRITE:
|
||||
case INSN_MMIO_WRITE:
|
||||
memcpy(ghcb->shared_buffer, reg_data, bytes);
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, false);
|
||||
break;
|
||||
case MMIO_WRITE_IMM:
|
||||
case INSN_MMIO_WRITE_IMM:
|
||||
memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, false);
|
||||
break;
|
||||
case MMIO_READ:
|
||||
case INSN_MMIO_READ:
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, true);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -1572,7 +1572,7 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
|||
|
||||
memcpy(reg_data, ghcb->shared_buffer, bytes);
|
||||
break;
|
||||
case MMIO_READ_ZERO_EXTEND:
|
||||
case INSN_MMIO_READ_ZERO_EXTEND:
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, true);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -1581,7 +1581,7 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
|||
memset(reg_data, 0, insn->opnd_bytes);
|
||||
memcpy(reg_data, ghcb->shared_buffer, bytes);
|
||||
break;
|
||||
case MMIO_READ_SIGN_EXTEND:
|
||||
case INSN_MMIO_READ_SIGN_EXTEND:
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, true);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -1600,7 +1600,7 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
|||
memset(reg_data, sign_byte, insn->opnd_bytes);
|
||||
memcpy(reg_data, ghcb->shared_buffer, bytes);
|
||||
break;
|
||||
case MMIO_MOVS:
|
||||
case INSN_MMIO_MOVS:
|
||||
ret = vc_handle_mmio_movs(ctxt, bytes);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -1769,6 +1769,7 @@ static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_ba
|
|||
}
|
||||
|
||||
struct kvm_hv_hcall {
|
||||
/* Hypercall input data */
|
||||
u64 param;
|
||||
u64 ingpa;
|
||||
u64 outgpa;
|
||||
|
@ -1779,12 +1780,21 @@ struct kvm_hv_hcall {
|
|||
bool fast;
|
||||
bool rep;
|
||||
sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
|
||||
|
||||
/*
|
||||
* Current read offset when KVM reads hypercall input data gradually,
|
||||
* either offset in bytes from 'ingpa' for regular hypercalls or the
|
||||
* number of already consumed 'XMM halves' for 'fast' hypercalls.
|
||||
*/
|
||||
union {
|
||||
gpa_t data_offset;
|
||||
int consumed_xmm_halves;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
|
||||
u16 orig_cnt, u16 cnt_cap, u64 *data,
|
||||
int consumed_xmm_halves, gpa_t offset)
|
||||
u16 orig_cnt, u16 cnt_cap, u64 *data)
|
||||
{
|
||||
/*
|
||||
* Preserve the original count when ignoring entries via a "cap", KVM
|
||||
|
@ -1799,11 +1809,11 @@ static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
|
|||
* Each XMM holds two sparse banks, but do not count halves that
|
||||
* have already been consumed for hypercall parameters.
|
||||
*/
|
||||
if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - consumed_xmm_halves)
|
||||
if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
|
||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
j = i + consumed_xmm_halves;
|
||||
j = i + hc->consumed_xmm_halves;
|
||||
if (j % 2)
|
||||
data[i] = sse128_hi(hc->xmm[j / 2]);
|
||||
else
|
||||
|
@ -1812,27 +1822,24 @@ static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
return kvm_read_guest(kvm, hc->ingpa + offset, data,
|
||||
return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
|
||||
cnt * sizeof(*data));
|
||||
}
|
||||
|
||||
static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
|
||||
u64 *sparse_banks, int consumed_xmm_halves,
|
||||
gpa_t offset)
|
||||
u64 *sparse_banks)
|
||||
{
|
||||
if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
|
||||
return -EINVAL;
|
||||
|
||||
/* Cap var_cnt to ignore banks that cannot contain a legal VP index. */
|
||||
return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
|
||||
sparse_banks, consumed_xmm_halves, offset);
|
||||
sparse_banks);
|
||||
}
|
||||
|
||||
static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[],
|
||||
int consumed_xmm_halves, gpa_t offset)
|
||||
static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
|
||||
{
|
||||
return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt,
|
||||
entries, consumed_xmm_halves, offset);
|
||||
return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
|
||||
}
|
||||
|
||||
static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
|
||||
|
@ -1926,8 +1933,6 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
|||
struct kvm_vcpu *v;
|
||||
unsigned long i;
|
||||
bool all_cpus;
|
||||
int consumed_xmm_halves = 0;
|
||||
gpa_t data_offset;
|
||||
|
||||
/*
|
||||
* The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
|
||||
|
@ -1955,12 +1960,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
|||
flush.address_space = hc->ingpa;
|
||||
flush.flags = hc->outgpa;
|
||||
flush.processor_mask = sse128_lo(hc->xmm[0]);
|
||||
consumed_xmm_halves = 1;
|
||||
hc->consumed_xmm_halves = 1;
|
||||
} else {
|
||||
if (unlikely(kvm_read_guest(kvm, hc->ingpa,
|
||||
&flush, sizeof(flush))))
|
||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||
data_offset = sizeof(flush);
|
||||
hc->data_offset = sizeof(flush);
|
||||
}
|
||||
|
||||
trace_kvm_hv_flush_tlb(flush.processor_mask,
|
||||
|
@ -1985,12 +1990,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
|||
flush_ex.flags = hc->outgpa;
|
||||
memcpy(&flush_ex.hv_vp_set,
|
||||
&hc->xmm[0], sizeof(hc->xmm[0]));
|
||||
consumed_xmm_halves = 2;
|
||||
hc->consumed_xmm_halves = 2;
|
||||
} else {
|
||||
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
|
||||
sizeof(flush_ex))))
|
||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||
data_offset = sizeof(flush_ex);
|
||||
hc->data_offset = sizeof(flush_ex);
|
||||
}
|
||||
|
||||
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
|
||||
|
@ -2009,8 +2014,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
|||
if (!hc->var_cnt)
|
||||
goto ret_success;
|
||||
|
||||
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks,
|
||||
consumed_xmm_halves, data_offset))
|
||||
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
|
||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||
}
|
||||
|
||||
|
@ -2021,8 +2025,10 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
|||
* consumed_xmm_halves to make sure TLB flush entries are read
|
||||
* from the correct offset.
|
||||
*/
|
||||
data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
|
||||
consumed_xmm_halves += hc->var_cnt;
|
||||
if (hc->fast)
|
||||
hc->consumed_xmm_halves += hc->var_cnt;
|
||||
else
|
||||
hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
|
||||
}
|
||||
|
||||
if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
|
||||
|
@ -2030,8 +2036,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
|||
hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
|
||||
tlb_flush_entries = NULL;
|
||||
} else {
|
||||
if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries,
|
||||
consumed_xmm_halves, data_offset))
|
||||
if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
|
||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||
tlb_flush_entries = __tlb_flush_entries;
|
||||
}
|
||||
|
@ -2180,9 +2185,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
|||
if (!hc->var_cnt)
|
||||
goto ret_success;
|
||||
|
||||
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, 1,
|
||||
offsetof(struct hv_send_ipi_ex,
|
||||
vp_set.bank_contents)))
|
||||
if (!hc->fast)
|
||||
hc->data_offset = offsetof(struct hv_send_ipi_ex,
|
||||
vp_set.bank_contents);
|
||||
else
|
||||
hc->consumed_xmm_halves = 1;
|
||||
|
||||
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
|
||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||
}
|
||||
|
||||
|
|
|
@ -426,8 +426,9 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
|
|||
kvm_set_msi_irq(vcpu->kvm, entry, &irq);
|
||||
|
||||
if (irq.trig_mode &&
|
||||
kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
|
||||
irq.dest_id, irq.dest_mode))
|
||||
(kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
|
||||
irq.dest_id, irq.dest_mode) ||
|
||||
kvm_apic_pending_eoi(vcpu, irq.vector)))
|
||||
__set_bit(irq.vector, ioapic_handled_vectors);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,11 +188,11 @@ static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu)
|
|||
|
||||
extern struct static_key_false_deferred apic_hw_disabled;
|
||||
|
||||
static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic)
|
||||
static inline bool kvm_apic_hw_enabled(struct kvm_lapic *apic)
|
||||
{
|
||||
if (static_branch_unlikely(&apic_hw_disabled.key))
|
||||
return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
|
||||
return MSR_IA32_APICBASE_ENABLE;
|
||||
return true;
|
||||
}
|
||||
|
||||
extern struct static_key_false_deferred apic_sw_disabled;
|
||||
|
|
|
@ -363,7 +363,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
|
|||
* A shadow-present leaf SPTE may be non-writable for 4 possible reasons:
|
||||
*
|
||||
* 1. To intercept writes for dirty logging. KVM write-protects huge pages
|
||||
* so that they can be split be split down into the dirty logging
|
||||
* so that they can be split down into the dirty logging
|
||||
* granularity (4KiB) whenever the guest writes to them. KVM also
|
||||
* write-protects 4KiB pages so that writes can be recorded in the dirty log
|
||||
* (e.g. if not using PML). SPTEs are write-protected for dirty logging
|
||||
|
|
|
@ -1074,7 +1074,9 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
|
|||
int ret = RET_PF_FIXED;
|
||||
bool wrprot = false;
|
||||
|
||||
WARN_ON(sp->role.level != fault->goal_level);
|
||||
if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
|
||||
return RET_PF_RETRY;
|
||||
|
||||
if (unlikely(!fault->slot))
|
||||
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
|
||||
else
|
||||
|
@ -1173,9 +1175,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
|||
if (fault->nx_huge_page_workaround_enabled)
|
||||
disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
|
||||
|
||||
if (iter.level == fault->goal_level)
|
||||
break;
|
||||
|
||||
/*
|
||||
* If SPTE has been frozen by another thread, just give up and
|
||||
* retry, avoiding unnecessary page table allocation and free.
|
||||
|
@ -1183,6 +1182,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
|||
if (is_removed_spte(iter.old_spte))
|
||||
goto retry;
|
||||
|
||||
if (iter.level == fault->goal_level)
|
||||
goto map_target_level;
|
||||
|
||||
/* Step down into the lower level page table if it exists. */
|
||||
if (is_shadow_present_pte(iter.old_spte) &&
|
||||
!is_large_pte(iter.old_spte))
|
||||
|
@ -1203,8 +1205,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
|||
r = tdp_mmu_link_sp(kvm, &iter, sp, true);
|
||||
|
||||
/*
|
||||
* Also force the guest to retry the access if the upper level SPTEs
|
||||
* aren't in place.
|
||||
* Force the guest to retry if installing an upper level SPTE
|
||||
* failed, e.g. because a different task modified the SPTE.
|
||||
*/
|
||||
if (r) {
|
||||
tdp_mmu_free_sp(sp);
|
||||
|
@ -1214,11 +1216,20 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
|||
if (fault->huge_page_disallowed &&
|
||||
fault->req_level >= iter.level) {
|
||||
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
|
||||
track_possible_nx_huge_page(kvm, sp);
|
||||
if (sp->nx_huge_page_disallowed)
|
||||
track_possible_nx_huge_page(kvm, sp);
|
||||
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The walk aborted before reaching the target level, e.g. because the
|
||||
* iterator detected an upper level SPTE was frozen during traversal.
|
||||
*/
|
||||
WARN_ON_ONCE(iter.level == fault->goal_level);
|
||||
goto retry;
|
||||
|
||||
map_target_level:
|
||||
ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
|
||||
|
||||
retry:
|
||||
|
|
|
@ -238,7 +238,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
|
|||
return false;
|
||||
|
||||
/* recalibrate sample period and check if it's accepted by perf core */
|
||||
if (perf_event_period(pmc->perf_event,
|
||||
if (is_sampling_event(pmc->perf_event) &&
|
||||
perf_event_period(pmc->perf_event,
|
||||
get_sample_period(pmc, pmc->counter)))
|
||||
return false;
|
||||
|
||||
|
|
|
@ -140,7 +140,8 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
|
|||
|
||||
static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
|
||||
{
|
||||
if (!pmc->perf_event || pmc->is_paused)
|
||||
if (!pmc->perf_event || pmc->is_paused ||
|
||||
!is_sampling_event(pmc->perf_event))
|
||||
return;
|
||||
|
||||
perf_event_period(pmc->perf_event,
|
||||
|
|
|
@ -5296,10 +5296,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
|
|||
if (vmptr == vmx->nested.current_vmptr)
|
||||
nested_release_vmcs12(vcpu);
|
||||
|
||||
kvm_vcpu_write_guest(vcpu,
|
||||
vmptr + offsetof(struct vmcs12,
|
||||
launch_state),
|
||||
&zero, sizeof(zero));
|
||||
/*
|
||||
* Silently ignore memory errors on VMCLEAR, Intel's pseudocode
|
||||
* for VMCLEAR includes a "ensure that data for VMCS referenced
|
||||
* by the operand is in memory" clause that guards writes to
|
||||
* memory, i.e. doing nothing for I/O is architecturally valid.
|
||||
*
|
||||
* FIXME: Suppress failures if and only if no memslot is found,
|
||||
* i.e. exit to userspace if __copy_to_user() fails.
|
||||
*/
|
||||
(void)kvm_vcpu_write_guest(vcpu,
|
||||
vmptr + offsetof(struct vmcs12,
|
||||
launch_state),
|
||||
&zero, sizeof(zero));
|
||||
} else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
|
||||
nested_release_evmcs(vcpu);
|
||||
}
|
||||
|
@ -6873,7 +6882,8 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps)
|
|||
SECONDARY_EXEC_ENABLE_INVPCID |
|
||||
SECONDARY_EXEC_RDSEED_EXITING |
|
||||
SECONDARY_EXEC_XSAVES |
|
||||
SECONDARY_EXEC_TSC_SCALING;
|
||||
SECONDARY_EXEC_TSC_SCALING |
|
||||
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
|
||||
|
||||
/*
|
||||
* We can emulate "VMCS shadowing," even if the hardware
|
||||
|
|
|
@ -4459,6 +4459,13 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
|
|||
* controls for features that are/aren't exposed to the guest.
|
||||
*/
|
||||
if (nested) {
|
||||
/*
|
||||
* All features that can be added or removed to VMX MSRs must
|
||||
* be supported in the first place for nested virtualization.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!(vmcs_config.nested.secondary_ctls_high & control)))
|
||||
enabled = false;
|
||||
|
||||
if (enabled)
|
||||
vmx->nested.msrs.secondary_ctls_high |= control;
|
||||
else
|
||||
|
|
|
@ -13132,6 +13132,9 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
|
|||
struct x86_exception *e)
|
||||
{
|
||||
if (r == X86EMUL_PROPAGATE_FAULT) {
|
||||
if (KVM_BUG_ON(!e, vcpu->kvm))
|
||||
return -EIO;
|
||||
|
||||
kvm_inject_emulated_page_fault(vcpu, e);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
|
|||
int ret = 0;
|
||||
int idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (gfn == GPA_INVALID) {
|
||||
if (gfn == KVM_XEN_INVALID_GFN) {
|
||||
kvm_gpc_deactivate(gpc);
|
||||
goto out;
|
||||
}
|
||||
|
@ -659,7 +659,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
|
|||
if (kvm->arch.xen.shinfo_cache.active)
|
||||
data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
|
||||
else
|
||||
data->u.shared_info.gfn = GPA_INVALID;
|
||||
data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
|
@ -705,7 +705,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
|||
BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
if (data->u.gpa == KVM_XEN_INVALID_GPA) {
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
|
||||
r = 0;
|
||||
break;
|
||||
|
@ -719,7 +719,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
|||
break;
|
||||
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
if (data->u.gpa == KVM_XEN_INVALID_GPA) {
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
|
||||
r = 0;
|
||||
break;
|
||||
|
@ -739,7 +739,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
|||
r = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
if (data->u.gpa == KVM_XEN_INVALID_GPA) {
|
||||
r = 0;
|
||||
deactivate_out:
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
|
||||
|
@ -937,7 +937,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
|||
if (vcpu->arch.xen.vcpu_info_cache.active)
|
||||
data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
|
||||
else
|
||||
data->u.gpa = GPA_INVALID;
|
||||
data->u.gpa = KVM_XEN_INVALID_GPA;
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
|
@ -945,7 +945,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
|||
if (vcpu->arch.xen.vcpu_time_info_cache.active)
|
||||
data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
|
||||
else
|
||||
data->u.gpa = GPA_INVALID;
|
||||
data->u.gpa = KVM_XEN_INVALID_GPA;
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
|
@ -1069,6 +1069,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
|
|||
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
|
||||
: kvm->arch.xen_hvm_config.blob_size_32;
|
||||
u8 *page;
|
||||
int ret;
|
||||
|
||||
if (page_num >= blob_size)
|
||||
return 1;
|
||||
|
@ -1079,10 +1080,10 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
|
|||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
|
||||
if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
|
||||
kfree(page);
|
||||
ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
|
||||
kfree(page);
|
||||
if (ret)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1183,30 +1184,22 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
|
|||
static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
|
||||
u64 param, u64 *r)
|
||||
{
|
||||
int idx, i;
|
||||
struct sched_poll sched_poll;
|
||||
evtchn_port_t port, *ports;
|
||||
gpa_t gpa;
|
||||
struct x86_exception e;
|
||||
int i;
|
||||
|
||||
if (!lapic_in_kernel(vcpu) ||
|
||||
!(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
|
||||
return false;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (!gpa) {
|
||||
*r = -EFAULT;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
|
||||
struct compat_sched_poll sp32;
|
||||
|
||||
/* Sanity check that the compat struct definition is correct */
|
||||
BUILD_BUG_ON(sizeof(sp32) != 16);
|
||||
|
||||
if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) {
|
||||
if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) {
|
||||
*r = -EFAULT;
|
||||
return true;
|
||||
}
|
||||
|
@ -1220,8 +1213,8 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
|
|||
sched_poll.nr_ports = sp32.nr_ports;
|
||||
sched_poll.timeout = sp32.timeout;
|
||||
} else {
|
||||
if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
|
||||
sizeof(sched_poll))) {
|
||||
if (kvm_read_guest_virt(vcpu, param, &sched_poll,
|
||||
sizeof(sched_poll), &e)) {
|
||||
*r = -EFAULT;
|
||||
return true;
|
||||
}
|
||||
|
@ -1243,18 +1236,13 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
|
|||
} else
|
||||
ports = &port;
|
||||
|
||||
for (i = 0; i < sched_poll.nr_ports; i++) {
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
gpa = kvm_mmu_gva_to_gpa_system(vcpu,
|
||||
(gva_t)(sched_poll.ports + i),
|
||||
NULL);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
|
||||
sched_poll.nr_ports * sizeof(*ports), &e)) {
|
||||
*r = -EFAULT;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!gpa || kvm_vcpu_read_guest(vcpu, gpa,
|
||||
&ports[i], sizeof(port))) {
|
||||
*r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
for (i = 0; i < sched_poll.nr_ports; i++) {
|
||||
if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
|
||||
*r = -EINVAL;
|
||||
goto out;
|
||||
|
@ -1330,9 +1318,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
|
|||
int vcpu_id, u64 param, u64 *r)
|
||||
{
|
||||
struct vcpu_set_singleshot_timer oneshot;
|
||||
struct x86_exception e;
|
||||
s64 delta;
|
||||
gpa_t gpa;
|
||||
int idx;
|
||||
|
||||
if (!kvm_xen_timer_enabled(vcpu))
|
||||
return false;
|
||||
|
@ -1343,9 +1330,6 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
|
|||
*r = -EINVAL;
|
||||
return true;
|
||||
}
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
/*
|
||||
* The only difference for 32-bit compat is the 4 bytes of
|
||||
|
@ -1363,9 +1347,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
|
|||
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
|
||||
sizeof_field(struct vcpu_set_singleshot_timer, flags));
|
||||
|
||||
if (!gpa ||
|
||||
kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) :
|
||||
sizeof(struct compat_vcpu_set_singleshot_timer))) {
|
||||
if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) :
|
||||
sizeof(struct compat_vcpu_set_singleshot_timer), &e)) {
|
||||
*r = -EFAULT;
|
||||
return true;
|
||||
}
|
||||
|
@ -1825,20 +1808,20 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
|
|||
{
|
||||
u32 port = data->u.evtchn.send_port;
|
||||
struct evtchnfd *evtchnfd;
|
||||
int ret;
|
||||
|
||||
if (!port || port >= max_evtchn_port(kvm))
|
||||
return -EINVAL;
|
||||
|
||||
/* Protect writes to evtchnfd as well as the idr lookup. */
|
||||
mutex_lock(&kvm->lock);
|
||||
evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
ret = -ENOENT;
|
||||
if (!evtchnfd)
|
||||
return -ENOENT;
|
||||
goto out_unlock;
|
||||
|
||||
/* For an UPDATE, nothing may change except the priority/vcpu */
|
||||
ret = -EINVAL;
|
||||
if (evtchnfd->type != data->u.evtchn.type)
|
||||
return -EINVAL;
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Port cannot change, and if it's zero that was an eventfd
|
||||
|
@ -1846,20 +1829,21 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
|
|||
*/
|
||||
if (!evtchnfd->deliver.port.port ||
|
||||
evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
|
||||
return -EINVAL;
|
||||
goto out_unlock;
|
||||
|
||||
/* We only support 2 level event channels for now */
|
||||
if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
|
||||
return -EINVAL;
|
||||
goto out_unlock;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
|
||||
if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
|
||||
evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
|
||||
evtchnfd->deliver.port.vcpu_idx = -1;
|
||||
}
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1871,12 +1855,9 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
|
|||
{
|
||||
u32 port = data->u.evtchn.send_port;
|
||||
struct eventfd_ctx *eventfd = NULL;
|
||||
struct evtchnfd *evtchnfd = NULL;
|
||||
struct evtchnfd *evtchnfd;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!port || port >= max_evtchn_port(kvm))
|
||||
return -EINVAL;
|
||||
|
||||
evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
|
||||
if (!evtchnfd)
|
||||
return -ENOMEM;
|
||||
|
@ -1952,8 +1933,7 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
|
|||
if (!evtchnfd)
|
||||
return -ENOENT;
|
||||
|
||||
if (kvm)
|
||||
synchronize_srcu(&kvm->srcu);
|
||||
synchronize_srcu(&kvm->srcu);
|
||||
if (!evtchnfd->deliver.port.port)
|
||||
eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
|
||||
kfree(evtchnfd);
|
||||
|
@ -1962,18 +1942,42 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
|
|||
|
||||
static int kvm_xen_eventfd_reset(struct kvm *kvm)
|
||||
{
|
||||
struct evtchnfd *evtchnfd;
|
||||
struct evtchnfd *evtchnfd, **all_evtchnfds;
|
||||
int i;
|
||||
int n = 0;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
/*
|
||||
* Because synchronize_srcu() cannot be called inside the
|
||||
* critical section, first collect all the evtchnfd objects
|
||||
* in an array as they are removed from evtchn_ports.
|
||||
*/
|
||||
idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
|
||||
n++;
|
||||
|
||||
all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
|
||||
if (!all_evtchnfds) {
|
||||
mutex_unlock(&kvm->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
n = 0;
|
||||
idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
|
||||
all_evtchnfds[n++] = evtchnfd;
|
||||
idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
|
||||
synchronize_srcu(&kvm->srcu);
|
||||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
synchronize_srcu(&kvm->srcu);
|
||||
|
||||
while (n--) {
|
||||
evtchnfd = all_evtchnfds[n];
|
||||
if (!evtchnfd->deliver.port.port)
|
||||
eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
|
||||
kfree(evtchnfd);
|
||||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
kfree(all_evtchnfds);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2002,20 +2006,22 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
|
|||
{
|
||||
struct evtchnfd *evtchnfd;
|
||||
struct evtchn_send send;
|
||||
gpa_t gpa;
|
||||
int idx;
|
||||
struct x86_exception e;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) {
|
||||
/* Sanity check: this structure is the same for 32-bit and 64-bit */
|
||||
BUILD_BUG_ON(sizeof(send) != 4);
|
||||
if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) {
|
||||
*r = -EFAULT;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* The evtchn_ports idr is protected by vcpu->kvm->srcu */
|
||||
/*
|
||||
* evtchnfd is protected by kvm->srcu; the idr lookup instead
|
||||
* is protected by RCU.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
|
||||
rcu_read_unlock();
|
||||
if (!evtchnfd)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -1595,16 +1595,16 @@ bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
|
|||
* Returns:
|
||||
*
|
||||
* Type of the instruction. Size of the memory operand is stored in
|
||||
* @bytes. If decode failed, MMIO_DECODE_FAILED returned.
|
||||
* @bytes. If decode failed, INSN_MMIO_DECODE_FAILED returned.
|
||||
*/
|
||||
enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
|
||||
enum insn_mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
|
||||
{
|
||||
enum mmio_type type = MMIO_DECODE_FAILED;
|
||||
enum insn_mmio_type type = INSN_MMIO_DECODE_FAILED;
|
||||
|
||||
*bytes = 0;
|
||||
|
||||
if (insn_get_opcode(insn))
|
||||
return MMIO_DECODE_FAILED;
|
||||
return INSN_MMIO_DECODE_FAILED;
|
||||
|
||||
switch (insn->opcode.bytes[0]) {
|
||||
case 0x88: /* MOV m8,r8 */
|
||||
|
@ -1613,7 +1613,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
|
|||
case 0x89: /* MOV m16/m32/m64, r16/m32/m64 */
|
||||
if (!*bytes)
|
||||
*bytes = insn->opnd_bytes;
|
||||
type = MMIO_WRITE;
|
||||
type = INSN_MMIO_WRITE;
|
||||
break;
|
||||
|
||||
case 0xc6: /* MOV m8, imm8 */
|
||||
|
@ -1622,7 +1622,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
|
|||
case 0xc7: /* MOV m16/m32/m64, imm16/imm32/imm64 */
|
||||
if (!*bytes)
|
||||
*bytes = insn->opnd_bytes;
|
||||
type = MMIO_WRITE_IMM;
|
||||
type = INSN_MMIO_WRITE_IMM;
|
||||
break;
|
||||
|
||||
case 0x8a: /* MOV r8, m8 */
|
||||
|
@ -1631,7 +1631,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
|
|||
case 0x8b: /* MOV r16/r32/r64, m16/m32/m64 */
|
||||
if (!*bytes)
|
||||
*bytes = insn->opnd_bytes;
|
||||
type = MMIO_READ;
|
||||
type = INSN_MMIO_READ;
|
||||
break;
|
||||
|
||||
case 0xa4: /* MOVS m8, m8 */
|
||||
|
@ -1640,7 +1640,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
|
|||
case 0xa5: /* MOVS m16/m32/m64, m16/m32/m64 */
|
||||
if (!*bytes)
|
||||
*bytes = insn->opnd_bytes;
|
||||
type = MMIO_MOVS;
|
||||
type = INSN_MMIO_MOVS;
|
||||
break;
|
||||
|
||||
case 0x0f: /* Two-byte instruction */
|
||||
|
@ -1651,7 +1651,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
|
|||
case 0xb7: /* MOVZX r32/r64, m16 */
|
||||
if (!*bytes)
|
||||
*bytes = 2;
|
||||
type = MMIO_READ_ZERO_EXTEND;
|
||||
type = INSN_MMIO_READ_ZERO_EXTEND;
|
||||
break;
|
||||
|
||||
case 0xbe: /* MOVSX r16/r32/r64, m8 */
|
||||
|
@ -1660,7 +1660,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
|
|||
case 0xbf: /* MOVSX r32/r64, m16 */
|
||||
if (!*bytes)
|
||||
*bytes = 2;
|
||||
type = MMIO_READ_SIGN_EXTEND;
|
||||
type = INSN_MMIO_READ_SIGN_EXTEND;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -10,6 +10,6 @@
|
|||
*/
|
||||
SYM_FUNC_START(__iowrite32_copy)
|
||||
movl %edx,%ecx
|
||||
rep movsd
|
||||
rep movsl
|
||||
RET
|
||||
SYM_FUNC_END(__iowrite32_copy)
|
||||
|
|
|
@ -6,7 +6,6 @@ menuconfig BLOCK
|
|||
bool "Enable the block layer" if EXPERT
|
||||
default y
|
||||
select SBITMAP
|
||||
select SRCU
|
||||
help
|
||||
Provide block layer support for the kernel.
|
||||
|
||||
|
|
|
@ -5317,8 +5317,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bfqd->lock, flags);
|
||||
bfq_exit_bfqq(bfqd, bfqq);
|
||||
bic_set_bfqq(bic, NULL, is_sync);
|
||||
bfq_exit_bfqq(bfqd, bfqq);
|
||||
spin_unlock_irqrestore(&bfqd->lock, flags);
|
||||
}
|
||||
}
|
||||
|
|
37
block/bio.c
37
block/bio.c
|
@ -1401,6 +1401,27 @@ void __bio_advance(struct bio *bio, unsigned bytes)
|
|||
}
|
||||
EXPORT_SYMBOL(__bio_advance);
|
||||
|
||||
void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
|
||||
struct bio *src, struct bvec_iter *src_iter)
|
||||
{
|
||||
while (src_iter->bi_size && dst_iter->bi_size) {
|
||||
struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
|
||||
struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
|
||||
unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
|
||||
void *src_buf = bvec_kmap_local(&src_bv);
|
||||
void *dst_buf = bvec_kmap_local(&dst_bv);
|
||||
|
||||
memcpy(dst_buf, src_buf, bytes);
|
||||
|
||||
kunmap_local(dst_buf);
|
||||
kunmap_local(src_buf);
|
||||
|
||||
bio_advance_iter_single(src, src_iter, bytes);
|
||||
bio_advance_iter_single(dst, dst_iter, bytes);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bio_copy_data_iter);
|
||||
|
||||
/**
|
||||
* bio_copy_data - copy contents of data buffers from one bio to another
|
||||
* @src: source bio
|
||||
|
@ -1414,21 +1435,7 @@ void bio_copy_data(struct bio *dst, struct bio *src)
|
|||
struct bvec_iter src_iter = src->bi_iter;
|
||||
struct bvec_iter dst_iter = dst->bi_iter;
|
||||
|
||||
while (src_iter.bi_size && dst_iter.bi_size) {
|
||||
struct bio_vec src_bv = bio_iter_iovec(src, src_iter);
|
||||
struct bio_vec dst_bv = bio_iter_iovec(dst, dst_iter);
|
||||
unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
|
||||
void *src_buf = bvec_kmap_local(&src_bv);
|
||||
void *dst_buf = bvec_kmap_local(&dst_bv);
|
||||
|
||||
memcpy(dst_buf, src_buf, bytes);
|
||||
|
||||
kunmap_local(dst_buf);
|
||||
kunmap_local(src_buf);
|
||||
|
||||
bio_advance_iter_single(src, &src_iter, bytes);
|
||||
bio_advance_iter_single(dst, &dst_iter, bytes);
|
||||
}
|
||||
bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_copy_data);
|
||||
|
||||
|
|
|
@ -309,6 +309,16 @@ static struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
|
|||
*segs = nsegs;
|
||||
return NULL;
|
||||
split:
|
||||
/*
|
||||
* We can't sanely support splitting for a REQ_NOWAIT bio. End it
|
||||
* with EAGAIN if splitting is required and return an error pointer.
|
||||
*/
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
bio->bi_status = BLK_STS_AGAIN;
|
||||
bio_endio(bio);
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
*segs = nsegs;
|
||||
|
||||
/*
|
||||
|
@ -358,11 +368,13 @@ struct bio *__bio_split_to_limits(struct bio *bio,
|
|||
default:
|
||||
split = bio_split_rw(bio, lim, nr_segs, bs,
|
||||
get_max_io_size(bio, lim) << SECTOR_SHIFT);
|
||||
if (IS_ERR(split))
|
||||
return NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (split) {
|
||||
/* there isn't chance to merge the splitted bio */
|
||||
/* there isn't chance to merge the split bio */
|
||||
split->bi_opf |= REQ_NOMERGE;
|
||||
|
||||
blkcg_bio_issue_init(split);
|
||||
|
|
|
@ -2951,8 +2951,11 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||
blk_status_t ret;
|
||||
|
||||
bio = blk_queue_bounce(bio, q);
|
||||
if (bio_may_exceed_limits(bio, &q->limits))
|
||||
if (bio_may_exceed_limits(bio, &q->limits)) {
|
||||
bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
|
||||
if (!bio)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!bio_integrity_prep(bio))
|
||||
return;
|
||||
|
|
|
@ -1201,10 +1201,21 @@ struct class block_class = {
|
|||
.dev_uevent = block_uevent,
|
||||
};
|
||||
|
||||
static char *block_devnode(struct device *dev, umode_t *mode,
|
||||
kuid_t *uid, kgid_t *gid)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
|
||||
if (disk->fops->devnode)
|
||||
return disk->fops->devnode(disk, mode);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const struct device_type disk_type = {
|
||||
.name = "disk",
|
||||
.groups = disk_attr_groups,
|
||||
.release = disk_release,
|
||||
.devnode = block_devnode,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
|
|
@ -70,11 +70,7 @@ module_param(device_id_scheme, bool, 0444);
|
|||
static int only_lcd = -1;
|
||||
module_param(only_lcd, int, 0444);
|
||||
|
||||
/*
|
||||
* Display probing is known to take up to 5 seconds, so delay the fallback
|
||||
* backlight registration by 5 seconds + 3 seconds for some extra margin.
|
||||
*/
|
||||
static int register_backlight_delay = 8;
|
||||
static int register_backlight_delay;
|
||||
module_param(register_backlight_delay, int, 0444);
|
||||
MODULE_PARM_DESC(register_backlight_delay,
|
||||
"Delay in seconds before doing fallback (non GPU driver triggered) "
|
||||
|
@ -2176,6 +2172,17 @@ static bool should_check_lcd_flag(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* At least one graphics driver has reported that no LCD is connected
|
||||
* via the native interface. cancel the registration for fallback acpi_video0.
|
||||
* If another driver still deems this necessary, it can explicitly register it.
|
||||
*/
|
||||
void acpi_video_report_nolcd(void)
|
||||
{
|
||||
cancel_delayed_work(&video_bus_register_backlight_work);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_video_report_nolcd);
|
||||
|
||||
int acpi_video_register(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
|
|
@ -432,10 +432,24 @@ static const struct dmi_system_id asus_laptop[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B2502",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct dmi_system_id lenovo_82ra[] = {
|
||||
static const struct dmi_system_id lenovo_laptop[] = {
|
||||
{
|
||||
.ident = "LENOVO IdeaPad Flex 5 14ALC7",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "LENOVO IdeaPad Flex 5 16ALC7",
|
||||
.matches = {
|
||||
|
@ -446,6 +460,17 @@ static const struct dmi_system_id lenovo_82ra[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static const struct dmi_system_id schenker_gm_rg[] = {
|
||||
{
|
||||
.ident = "XMG CORE 15 (M22)",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
struct irq_override_cmp {
|
||||
const struct dmi_system_id *system;
|
||||
unsigned char irq;
|
||||
|
@ -458,8 +483,9 @@ struct irq_override_cmp {
|
|||
static const struct irq_override_cmp override_table[] = {
|
||||
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
|
||||
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
|
||||
{ lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
|
||||
{ lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
|
||||
{ lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
|
||||
{ lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
|
||||
{ schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
|
||||
};
|
||||
|
||||
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
|
||||
#include <linux/pnp.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <acpi/video.h>
|
||||
|
@ -105,6 +106,26 @@ static bool nvidia_wmi_ec_supported(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static bool apple_gmux_backlight_present(void)
|
||||
{
|
||||
struct acpi_device *adev;
|
||||
struct device *dev;
|
||||
|
||||
adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
|
||||
if (!adev)
|
||||
return false;
|
||||
|
||||
dev = acpi_get_first_physical_node(adev);
|
||||
if (!dev)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* drivers/platform/x86/apple-gmux.c only supports old style
|
||||
* Apple GMUX with an IO-resource.
|
||||
*/
|
||||
return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
|
||||
}
|
||||
|
||||
/* Force to use vendor driver when the ACPI device is known to be
|
||||
* buggy */
|
||||
static int video_detect_force_vendor(const struct dmi_system_id *d)
|
||||
|
@ -767,7 +788,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
|
|||
if (nvidia_wmi_ec_present)
|
||||
return acpi_backlight_nvidia_wmi_ec;
|
||||
|
||||
if (apple_gmux_present())
|
||||
if (apple_gmux_backlight_present())
|
||||
return acpi_backlight_apple_gmux;
|
||||
|
||||
/* Use ACPI video if available, except when native should be preferred. */
|
||||
|
|
|
@ -28,10 +28,6 @@ static bool sleep_no_lps0 __read_mostly;
|
|||
module_param(sleep_no_lps0, bool, 0644);
|
||||
MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
|
||||
|
||||
static bool prefer_microsoft_dsm_guid __read_mostly;
|
||||
module_param(prefer_microsoft_dsm_guid, bool, 0644);
|
||||
MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation");
|
||||
|
||||
static const struct acpi_device_id lps0_device_ids[] = {
|
||||
{"PNP0D80", },
|
||||
{"", },
|
||||
|
@ -369,27 +365,15 @@ out:
|
|||
}
|
||||
|
||||
struct amd_lps0_hid_device_data {
|
||||
const unsigned int rev_id;
|
||||
const bool check_off_by_one;
|
||||
const bool prefer_amd_guid;
|
||||
};
|
||||
|
||||
static const struct amd_lps0_hid_device_data amd_picasso = {
|
||||
.rev_id = 0,
|
||||
.check_off_by_one = true,
|
||||
.prefer_amd_guid = false,
|
||||
};
|
||||
|
||||
static const struct amd_lps0_hid_device_data amd_cezanne = {
|
||||
.rev_id = 0,
|
||||
.check_off_by_one = false,
|
||||
.prefer_amd_guid = false,
|
||||
};
|
||||
|
||||
static const struct amd_lps0_hid_device_data amd_rembrandt = {
|
||||
.rev_id = 2,
|
||||
.check_off_by_one = false,
|
||||
.prefer_amd_guid = true,
|
||||
};
|
||||
|
||||
static const struct acpi_device_id amd_hid_ids[] = {
|
||||
|
@ -397,69 +381,27 @@ static const struct acpi_device_id amd_hid_ids[] = {
|
|||
{"AMD0005", (kernel_ulong_t)&amd_picasso, },
|
||||
{"AMDI0005", (kernel_ulong_t)&amd_picasso, },
|
||||
{"AMDI0006", (kernel_ulong_t)&amd_cezanne, },
|
||||
{"AMDI0007", (kernel_ulong_t)&amd_rembrandt, },
|
||||
{}
|
||||
};
|
||||
|
||||
static int lps0_prefer_microsoft(const struct dmi_system_id *id)
|
||||
static int lps0_prefer_amd(const struct dmi_system_id *id)
|
||||
{
|
||||
pr_debug("Preferring Microsoft GUID.\n");
|
||||
prefer_microsoft_dsm_guid = true;
|
||||
pr_debug("Using AMD GUID w/ _REV 2.\n");
|
||||
rev_id = 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
|
||||
{
|
||||
/*
|
||||
* ASUS TUF Gaming A17 FA707RE
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=216101
|
||||
* AMD Rembrandt based HP EliteBook 835/845/865 G9
|
||||
* Contains specialized AML in AMD/_REV 2 path to avoid
|
||||
* triggering a bug in Qualcomm WLAN firmware. This may be
|
||||
* removed in the future if that firmware is fixed.
|
||||
*/
|
||||
.callback = lps0_prefer_microsoft,
|
||||
.callback = lps0_prefer_amd,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* ASUS ROG Zephyrus G14 (2022) */
|
||||
.callback = lps0_prefer_microsoft,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Lenovo Yoga Slim 7 Pro X 14ARH7
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL
|
||||
*/
|
||||
.callback = lps0_prefer_microsoft,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "82"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2148
|
||||
*/
|
||||
.callback = lps0_prefer_microsoft,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2148
|
||||
*/
|
||||
.callback = lps0_prefer_microsoft,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"),
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "8990"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
|
@ -484,16 +426,14 @@ static int lps0_device_attach(struct acpi_device *adev,
|
|||
if (dev_id->id[0])
|
||||
data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data;
|
||||
else
|
||||
data = &amd_rembrandt;
|
||||
rev_id = data->rev_id;
|
||||
data = &amd_cezanne;
|
||||
lps0_dsm_func_mask = validate_dsm(adev->handle,
|
||||
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
|
||||
if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) {
|
||||
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
|
||||
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
|
||||
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
|
||||
} else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid &&
|
||||
!prefer_microsoft_dsm_guid) {
|
||||
} else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) {
|
||||
lps0_dsm_func_mask_microsoft = -EINVAL;
|
||||
acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
|
||||
}
|
||||
|
@ -501,8 +441,7 @@ static int lps0_device_attach(struct acpi_device *adev,
|
|||
rev_id = 1;
|
||||
lps0_dsm_func_mask = validate_dsm(adev->handle,
|
||||
ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
|
||||
if (!prefer_microsoft_dsm_guid)
|
||||
lps0_dsm_func_mask_microsoft = -EINVAL;
|
||||
lps0_dsm_func_mask_microsoft = -EINVAL;
|
||||
}
|
||||
|
||||
if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
|
||||
|
|
|
@ -83,6 +83,7 @@ enum board_ids {
|
|||
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static void ahci_remove_one(struct pci_dev *dev);
|
||||
static void ahci_shutdown_one(struct pci_dev *dev);
|
||||
static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
|
||||
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline);
|
||||
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
|
||||
|
@ -676,6 +677,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
|||
ahci_save_initial_config(&pdev->dev, hpriv);
|
||||
}
|
||||
|
||||
static int ahci_pci_reset_controller(struct ata_host *host)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(host->dev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
int rc;
|
||||
|
||||
rc = ahci_reset_controller(host);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/*
|
||||
* If platform firmware failed to enable ports, try to enable
|
||||
* them here.
|
||||
*/
|
||||
ahci_intel_pcs_quirk(pdev, hpriv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ahci_pci_init_controller(struct ata_host *host)
|
||||
{
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
|
@ -870,7 +890,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
|
|||
struct ata_host *host = pci_get_drvdata(pdev);
|
||||
int rc;
|
||||
|
||||
rc = ahci_reset_controller(host);
|
||||
rc = ahci_pci_reset_controller(host);
|
||||
if (rc)
|
||||
return rc;
|
||||
ahci_pci_init_controller(host);
|
||||
|
@ -906,7 +926,7 @@ static int ahci_pci_device_resume(struct device *dev)
|
|||
ahci_mcp89_apple_enable(pdev);
|
||||
|
||||
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
|
||||
rc = ahci_reset_controller(host);
|
||||
rc = ahci_pci_reset_controller(host);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -1784,12 +1804,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* save initial config */
|
||||
ahci_pci_save_initial_config(pdev, hpriv);
|
||||
|
||||
/*
|
||||
* If platform firmware failed to enable ports, try to enable
|
||||
* them here.
|
||||
*/
|
||||
ahci_intel_pcs_quirk(pdev, hpriv);
|
||||
|
||||
/* prepare host */
|
||||
if (hpriv->cap & HOST_CAP_NCQ) {
|
||||
pi.flags |= ATA_FLAG_NCQ;
|
||||
|
@ -1899,7 +1913,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = ahci_reset_controller(host);
|
||||
rc = ahci_pci_reset_controller(host);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
|
|
@ -285,6 +285,49 @@ config BLK_DEV_RAM_SIZE
|
|||
The default value is 4096 kilobytes. Only change this if you know
|
||||
what you are doing.
|
||||
|
||||
config CDROM_PKTCDVD
|
||||
tristate "Packet writing on CD/DVD media (DEPRECATED)"
|
||||
depends on !UML
|
||||
depends on SCSI
|
||||
select CDROM
|
||||
help
|
||||
Note: This driver is deprecated and will be removed from the
|
||||
kernel in the near future!
|
||||
|
||||
If you have a CDROM/DVD drive that supports packet writing, say
|
||||
Y to include support. It should work with any MMC/Mt Fuji
|
||||
compliant ATAPI or SCSI drive, which is just about any newer
|
||||
DVD/CD writer.
|
||||
|
||||
Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs
|
||||
is possible.
|
||||
DVD-RW disks must be in restricted overwrite mode.
|
||||
|
||||
See the file <file:Documentation/cdrom/packet-writing.rst>
|
||||
for further information on the use of this driver.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called pktcdvd.
|
||||
|
||||
config CDROM_PKTCDVD_BUFFERS
|
||||
int "Free buffers for data gathering"
|
||||
depends on CDROM_PKTCDVD
|
||||
default "8"
|
||||
help
|
||||
This controls the maximum number of active concurrent packets. More
|
||||
concurrent packets can increase write performance, but also require
|
||||
more memory. Each concurrent packet will require approximately 64Kb
|
||||
of non-swappable kernel memory, memory which will be allocated when
|
||||
a disc is opened for writing.
|
||||
|
||||
config CDROM_PKTCDVD_WCACHE
|
||||
bool "Enable write caching"
|
||||
depends on CDROM_PKTCDVD
|
||||
help
|
||||
If enabled, write caching will be set for the CD-R/W device. For now
|
||||
this option is dangerous unless the CD-RW media is known good, as we
|
||||
don't do deferred write error handling yet.
|
||||
|
||||
config ATA_OVER_ETH
|
||||
tristate "ATA over Ethernet support"
|
||||
depends on NET
|
||||
|
|
|
@ -20,6 +20,7 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
|
|||
obj-$(CONFIG_N64CART) += n64cart.o
|
||||
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
|
||||
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
|
||||
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
|
||||
obj-$(CONFIG_SUNVDC) += sunvdc.o
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
|
||||
|
|
|
@ -1607,6 +1607,8 @@ void drbd_submit_bio(struct bio *bio)
|
|||
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
|
||||
|
||||
bio = bio_split_to_limits(bio);
|
||||
if (!bio)
|
||||
return;
|
||||
|
||||
/*
|
||||
* what we "blindly" assume:
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -587,6 +587,8 @@ static void ps3vram_submit_bio(struct bio *bio)
|
|||
dev_dbg(&dev->core, "%s\n", __func__);
|
||||
|
||||
bio = bio_split_to_limits(bio);
|
||||
if (!bio)
|
||||
return;
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
busy = !bio_list_empty(&priv->list);
|
||||
|
|
|
@ -1992,6 +1992,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
|
|||
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
|
||||
ublk_ctrl_cmd_dump(cmd);
|
||||
|
||||
if (!(issue_flags & IO_URING_F_SQE128))
|
||||
|
|
|
@ -315,22 +315,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
|
|||
virtqueue_notify(vq->vq);
|
||||
}
|
||||
|
||||
static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
|
||||
{
|
||||
virtblk_cleanup_cmd(req);
|
||||
switch (rc) {
|
||||
case -ENOSPC:
|
||||
return BLK_STS_DEV_RESOURCE;
|
||||
case -ENOMEM:
|
||||
return BLK_STS_RESOURCE;
|
||||
default:
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
}
|
||||
|
||||
static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
|
||||
struct virtio_blk *vblk,
|
||||
struct request *req,
|
||||
struct virtblk_req *vbr)
|
||||
{
|
||||
blk_status_t status;
|
||||
int num;
|
||||
|
||||
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
|
||||
if (unlikely(status))
|
||||
return status;
|
||||
|
||||
vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
|
||||
if (unlikely(vbr->sg_table.nents < 0)) {
|
||||
virtblk_cleanup_cmd(req);
|
||||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
num = virtblk_map_data(hctx, req, vbr);
|
||||
if (unlikely(num < 0))
|
||||
return virtblk_fail_to_queue(req, -ENOMEM);
|
||||
vbr->sg_table.nents = num;
|
||||
|
||||
blk_mq_start_request(req);
|
||||
|
||||
|
@ -364,15 +377,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
blk_mq_stop_hw_queue(hctx);
|
||||
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
|
||||
virtblk_unmap_data(req, vbr);
|
||||
virtblk_cleanup_cmd(req);
|
||||
switch (err) {
|
||||
case -ENOSPC:
|
||||
return BLK_STS_DEV_RESOURCE;
|
||||
case -ENOMEM:
|
||||
return BLK_STS_RESOURCE;
|
||||
default:
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
return virtblk_fail_to_queue(req, err);
|
||||
}
|
||||
|
||||
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
|
||||
|
@ -991,7 +996,7 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
blk_queue_max_segments(q, sg_elems);
|
||||
|
||||
/* No real sector limit. */
|
||||
blk_queue_max_hw_sectors(q, -1U);
|
||||
blk_queue_max_hw_sectors(q, UINT_MAX);
|
||||
|
||||
max_size = virtio_max_dma_size(vdev);
|
||||
|
||||
|
|
|
@ -412,7 +412,9 @@ int tpm_pm_suspend(struct device *dev)
|
|||
}
|
||||
|
||||
suspended:
|
||||
return rc;
|
||||
if (rc)
|
||||
dev_err(dev, "Ignoring error %d while suspending\n", rc);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
|
|||
}
|
||||
|
||||
ctrlpriv = dev_get_drvdata(jrdev->parent);
|
||||
moo = FIELD_GET(CSTA_MOO, ioread32(&ctrlpriv->ctrl->perfmon.status));
|
||||
moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
|
||||
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
|
||||
dev_warn(jrdev,
|
||||
"using insecure test key, enable HAB to use unique device key!\n");
|
||||
|
|
|
@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session(
|
|||
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
|
||||
ctrl_status->status, destroy_session->session_id);
|
||||
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
|
|
@ -168,14 +168,11 @@ void dma_buf_uninit_sysfs_statistics(void)
|
|||
kset_unregister(dma_buf_stats_kset);
|
||||
}
|
||||
|
||||
int dma_buf_stats_setup(struct dma_buf *dmabuf)
|
||||
int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
|
||||
{
|
||||
struct dma_buf_sysfs_entry *sysfs_entry;
|
||||
int ret;
|
||||
|
||||
if (!dmabuf || !dmabuf->file)
|
||||
return -EINVAL;
|
||||
|
||||
if (!dmabuf->exp_name) {
|
||||
pr_err("exporter name must not be empty if stats needed\n");
|
||||
return -EINVAL;
|
||||
|
@ -192,7 +189,7 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
|
|||
|
||||
/* create the directory for buffer stats */
|
||||
ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
|
||||
"%lu", file_inode(dmabuf->file)->i_ino);
|
||||
"%lu", file_inode(file)->i_ino);
|
||||
if (ret)
|
||||
goto err_sysfs_dmabuf;
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
int dma_buf_init_sysfs_statistics(void);
|
||||
void dma_buf_uninit_sysfs_statistics(void);
|
||||
|
||||
int dma_buf_stats_setup(struct dma_buf *dmabuf);
|
||||
int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file);
|
||||
|
||||
void dma_buf_stats_teardown(struct dma_buf *dmabuf);
|
||||
#else
|
||||
|
@ -25,7 +25,7 @@ static inline int dma_buf_init_sysfs_statistics(void)
|
|||
|
||||
static inline void dma_buf_uninit_sysfs_statistics(void) {}
|
||||
|
||||
static inline int dma_buf_stats_setup(struct dma_buf *dmabuf)
|
||||
static inline int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -95,10 +95,11 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
|
|||
return -EINVAL;
|
||||
|
||||
dmabuf = file->private_data;
|
||||
|
||||
mutex_lock(&db_list.lock);
|
||||
list_del(&dmabuf->list_node);
|
||||
mutex_unlock(&db_list.lock);
|
||||
if (dmabuf) {
|
||||
mutex_lock(&db_list.lock);
|
||||
list_del(&dmabuf->list_node);
|
||||
mutex_unlock(&db_list.lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -528,17 +529,17 @@ static inline int is_dma_buf_file(struct file *file)
|
|||
return file->f_op == &dma_buf_fops;
|
||||
}
|
||||
|
||||
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
|
||||
static struct file *dma_buf_getfile(size_t size, int flags)
|
||||
{
|
||||
static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
|
||||
struct file *file;
|
||||
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
|
||||
struct file *file;
|
||||
|
||||
if (IS_ERR(inode))
|
||||
return ERR_CAST(inode);
|
||||
|
||||
inode->i_size = dmabuf->size;
|
||||
inode_set_bytes(inode, dmabuf->size);
|
||||
inode->i_size = size;
|
||||
inode_set_bytes(inode, size);
|
||||
|
||||
/*
|
||||
* The ->i_ino acquired from get_next_ino() is not unique thus
|
||||
|
@ -552,8 +553,6 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
|
|||
flags, &dma_buf_fops);
|
||||
if (IS_ERR(file))
|
||||
goto err_alloc_file;
|
||||
file->private_data = dmabuf;
|
||||
file->f_path.dentry->d_fsdata = dmabuf;
|
||||
|
||||
return file;
|
||||
|
||||
|
@ -619,19 +618,11 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
size_t alloc_size = sizeof(struct dma_buf);
|
||||
int ret;
|
||||
|
||||
if (!exp_info->resv)
|
||||
alloc_size += sizeof(struct dma_resv);
|
||||
else
|
||||
/* prevent &dma_buf[1] == dma_buf->resv */
|
||||
alloc_size += 1;
|
||||
|
||||
if (WARN_ON(!exp_info->priv
|
||||
|| !exp_info->ops
|
||||
|| !exp_info->ops->map_dma_buf
|
||||
|| !exp_info->ops->unmap_dma_buf
|
||||
|| !exp_info->ops->release)) {
|
||||
if (WARN_ON(!exp_info->priv || !exp_info->ops
|
||||
|| !exp_info->ops->map_dma_buf
|
||||
|| !exp_info->ops->unmap_dma_buf
|
||||
|| !exp_info->ops->release))
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
|
||||
(exp_info->ops->pin || exp_info->ops->unpin)))
|
||||
|
@ -643,10 +634,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
if (!try_module_get(exp_info->owner))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
file = dma_buf_getfile(exp_info->size, exp_info->flags);
|
||||
if (IS_ERR(file)) {
|
||||
ret = PTR_ERR(file);
|
||||
goto err_module;
|
||||
}
|
||||
|
||||
if (!exp_info->resv)
|
||||
alloc_size += sizeof(struct dma_resv);
|
||||
else
|
||||
/* prevent &dma_buf[1] == dma_buf->resv */
|
||||
alloc_size += 1;
|
||||
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (!dmabuf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_module;
|
||||
goto err_file;
|
||||
}
|
||||
|
||||
dmabuf->priv = exp_info->priv;
|
||||
|
@ -658,43 +660,35 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
init_waitqueue_head(&dmabuf->poll);
|
||||
dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
|
||||
dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
|
||||
INIT_LIST_HEAD(&dmabuf->attachments);
|
||||
|
||||
if (!resv) {
|
||||
resv = (struct dma_resv *)&dmabuf[1];
|
||||
dma_resv_init(resv);
|
||||
dmabuf->resv = (struct dma_resv *)&dmabuf[1];
|
||||
dma_resv_init(dmabuf->resv);
|
||||
} else {
|
||||
dmabuf->resv = resv;
|
||||
}
|
||||
dmabuf->resv = resv;
|
||||
|
||||
file = dma_buf_getfile(dmabuf, exp_info->flags);
|
||||
if (IS_ERR(file)) {
|
||||
ret = PTR_ERR(file);
|
||||
ret = dma_buf_stats_setup(dmabuf, file);
|
||||
if (ret)
|
||||
goto err_dmabuf;
|
||||
}
|
||||
|
||||
file->private_data = dmabuf;
|
||||
file->f_path.dentry->d_fsdata = dmabuf;
|
||||
dmabuf->file = file;
|
||||
|
||||
INIT_LIST_HEAD(&dmabuf->attachments);
|
||||
|
||||
mutex_lock(&db_list.lock);
|
||||
list_add(&dmabuf->list_node, &db_list.head);
|
||||
mutex_unlock(&db_list.lock);
|
||||
|
||||
ret = dma_buf_stats_setup(dmabuf);
|
||||
if (ret)
|
||||
goto err_sysfs;
|
||||
|
||||
return dmabuf;
|
||||
|
||||
err_sysfs:
|
||||
/*
|
||||
* Set file->f_path.dentry->d_fsdata to NULL so that when
|
||||
* dma_buf_release() gets invoked by dentry_ops, it exits
|
||||
* early before calling the release() dma_buf op.
|
||||
*/
|
||||
file->f_path.dentry->d_fsdata = NULL;
|
||||
fput(file);
|
||||
err_dmabuf:
|
||||
if (!resv)
|
||||
dma_resv_fini(dmabuf->resv);
|
||||
kfree(dmabuf);
|
||||
err_file:
|
||||
fput(file);
|
||||
err_module:
|
||||
module_put(exp_info->owner);
|
||||
return ERR_PTR(ret);
|
||||
|
|
|
@ -91,7 +91,6 @@ enum sprd_eic_type {
|
|||
|
||||
struct sprd_eic {
|
||||
struct gpio_chip chip;
|
||||
struct irq_chip intc;
|
||||
void __iomem *base[SPRD_EIC_MAX_BANK];
|
||||
enum sprd_eic_type type;
|
||||
spinlock_t lock;
|
||||
|
@ -255,6 +254,8 @@ static void sprd_eic_irq_mask(struct irq_data *data)
|
|||
default:
|
||||
dev_err(chip->parent, "Unsupported EIC type.\n");
|
||||
}
|
||||
|
||||
gpiochip_disable_irq(chip, offset);
|
||||
}
|
||||
|
||||
static void sprd_eic_irq_unmask(struct irq_data *data)
|
||||
|
@ -263,6 +264,8 @@ static void sprd_eic_irq_unmask(struct irq_data *data)
|
|||
struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
|
||||
u32 offset = irqd_to_hwirq(data);
|
||||
|
||||
gpiochip_enable_irq(chip, offset);
|
||||
|
||||
switch (sprd_eic->type) {
|
||||
case SPRD_EIC_DEBOUNCE:
|
||||
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IE, 1);
|
||||
|
@ -564,6 +567,15 @@ static void sprd_eic_irq_handler(struct irq_desc *desc)
|
|||
chained_irq_exit(ic, desc);
|
||||
}
|
||||
|
||||
static const struct irq_chip sprd_eic_irq = {
|
||||
.name = "sprd-eic",
|
||||
.irq_ack = sprd_eic_irq_ack,
|
||||
.irq_mask = sprd_eic_irq_mask,
|
||||
.irq_unmask = sprd_eic_irq_unmask,
|
||||
.irq_set_type = sprd_eic_irq_set_type,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
|
||||
GPIOCHIP_IRQ_RESOURCE_HELPERS,
|
||||
};
|
||||
static int sprd_eic_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct sprd_eic_variant_data *pdata;
|
||||
|
@ -626,15 +638,8 @@ static int sprd_eic_probe(struct platform_device *pdev)
|
|||
break;
|
||||
}
|
||||
|
||||
sprd_eic->intc.name = dev_name(&pdev->dev);
|
||||
sprd_eic->intc.irq_ack = sprd_eic_irq_ack;
|
||||
sprd_eic->intc.irq_mask = sprd_eic_irq_mask;
|
||||
sprd_eic->intc.irq_unmask = sprd_eic_irq_unmask;
|
||||
sprd_eic->intc.irq_set_type = sprd_eic_irq_set_type;
|
||||
sprd_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE;
|
||||
|
||||
irq = &sprd_eic->chip.irq;
|
||||
irq->chip = &sprd_eic->intc;
|
||||
gpio_irq_chip_set_chip(irq, &sprd_eic_irq);
|
||||
irq->handler = handle_bad_irq;
|
||||
irq->default_type = IRQ_TYPE_NONE;
|
||||
irq->parent_handler = sprd_eic_irq_handler;
|
||||
|
|
|
@ -474,6 +474,9 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
|
|||
case PCAL6524_DEBOUNCE:
|
||||
pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x1c;
|
||||
break;
|
||||
default:
|
||||
pinctrl = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return pinctrl + addr + (off / BANK_SZ);
|
||||
|
|
|
@ -47,7 +47,6 @@ enum {
|
|||
/**
|
||||
* struct sprd_pmic_eic - PMIC EIC controller
|
||||
* @chip: the gpio_chip structure.
|
||||
* @intc: the irq_chip structure.
|
||||
* @map: the regmap from the parent device.
|
||||
* @offset: the EIC controller's offset address of the PMIC.
|
||||
* @reg: the array to cache the EIC registers.
|
||||
|
@ -56,7 +55,6 @@ enum {
|
|||
*/
|
||||
struct sprd_pmic_eic {
|
||||
struct gpio_chip chip;
|
||||
struct irq_chip intc;
|
||||
struct regmap *map;
|
||||
u32 offset;
|
||||
u8 reg[CACHE_NR_REGS];
|
||||
|
@ -151,15 +149,21 @@ static void sprd_pmic_eic_irq_mask(struct irq_data *data)
|
|||
{
|
||||
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
|
||||
struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
|
||||
u32 offset = irqd_to_hwirq(data);
|
||||
|
||||
pmic_eic->reg[REG_IE] = 0;
|
||||
pmic_eic->reg[REG_TRIG] = 0;
|
||||
|
||||
gpiochip_disable_irq(chip, offset);
|
||||
}
|
||||
|
||||
static void sprd_pmic_eic_irq_unmask(struct irq_data *data)
|
||||
{
|
||||
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
|
||||
struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
|
||||
u32 offset = irqd_to_hwirq(data);
|
||||
|
||||
gpiochip_enable_irq(chip, offset);
|
||||
|
||||
pmic_eic->reg[REG_IE] = 1;
|
||||
pmic_eic->reg[REG_TRIG] = 1;
|
||||
|
@ -292,6 +296,17 @@ static irqreturn_t sprd_pmic_eic_irq_handler(int irq, void *data)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static const struct irq_chip pmic_eic_irq_chip = {
|
||||
.name = "sprd-pmic-eic",
|
||||
.irq_mask = sprd_pmic_eic_irq_mask,
|
||||
.irq_unmask = sprd_pmic_eic_irq_unmask,
|
||||
.irq_set_type = sprd_pmic_eic_irq_set_type,
|
||||
.irq_bus_lock = sprd_pmic_eic_bus_lock,
|
||||
.irq_bus_sync_unlock = sprd_pmic_eic_bus_sync_unlock,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
|
||||
GPIOCHIP_IRQ_RESOURCE_HELPERS,
|
||||
};
|
||||
|
||||
static int sprd_pmic_eic_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct gpio_irq_chip *irq;
|
||||
|
@ -338,16 +353,8 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
|
|||
pmic_eic->chip.set = sprd_pmic_eic_set;
|
||||
pmic_eic->chip.get = sprd_pmic_eic_get;
|
||||
|
||||
pmic_eic->intc.name = dev_name(&pdev->dev);
|
||||
pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask;
|
||||
pmic_eic->intc.irq_unmask = sprd_pmic_eic_irq_unmask;
|
||||
pmic_eic->intc.irq_set_type = sprd_pmic_eic_irq_set_type;
|
||||
pmic_eic->intc.irq_bus_lock = sprd_pmic_eic_bus_lock;
|
||||
pmic_eic->intc.irq_bus_sync_unlock = sprd_pmic_eic_bus_sync_unlock;
|
||||
pmic_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE;
|
||||
|
||||
irq = &pmic_eic->chip.irq;
|
||||
irq->chip = &pmic_eic->intc;
|
||||
gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip);
|
||||
irq->threaded = true;
|
||||
|
||||
ret = devm_gpiochip_add_data(&pdev->dev, &pmic_eic->chip, pmic_eic);
|
||||
|
|
|
@ -215,6 +215,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
|
|||
return -ENODEV;
|
||||
}
|
||||
parent = irq_find_host(irq_parent);
|
||||
of_node_put(irq_parent);
|
||||
if (!parent) {
|
||||
dev_err(dev, "no IRQ parent domain\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -120,6 +120,7 @@ static void sprd_gpio_irq_mask(struct irq_data *data)
|
|||
u32 offset = irqd_to_hwirq(data);
|
||||
|
||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 0);
|
||||
gpiochip_disable_irq(chip, offset);
|
||||
}
|
||||
|
||||
static void sprd_gpio_irq_ack(struct irq_data *data)
|
||||
|
@ -136,6 +137,7 @@ static void sprd_gpio_irq_unmask(struct irq_data *data)
|
|||
u32 offset = irqd_to_hwirq(data);
|
||||
|
||||
sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 1);
|
||||
gpiochip_enable_irq(chip, offset);
|
||||
}
|
||||
|
||||
static int sprd_gpio_irq_set_type(struct irq_data *data,
|
||||
|
@ -205,13 +207,14 @@ static void sprd_gpio_irq_handler(struct irq_desc *desc)
|
|||
chained_irq_exit(ic, desc);
|
||||
}
|
||||
|
||||
static struct irq_chip sprd_gpio_irqchip = {
|
||||
static const struct irq_chip sprd_gpio_irqchip = {
|
||||
.name = "sprd-gpio",
|
||||
.irq_ack = sprd_gpio_irq_ack,
|
||||
.irq_mask = sprd_gpio_irq_mask,
|
||||
.irq_unmask = sprd_gpio_irq_unmask,
|
||||
.irq_set_type = sprd_gpio_irq_set_type,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
|
||||
GPIOCHIP_IRQ_RESOURCE_HELPERS,
|
||||
};
|
||||
|
||||
static int sprd_gpio_probe(struct platform_device *pdev)
|
||||
|
@ -245,7 +248,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
|
|||
sprd_gpio->chip.direction_output = sprd_gpio_direction_output;
|
||||
|
||||
irq = &sprd_gpio->chip.irq;
|
||||
irq->chip = &sprd_gpio_irqchip;
|
||||
gpio_irq_chip_set_chip(irq, &sprd_gpio_irqchip);
|
||||
irq->handler = handle_bad_irq;
|
||||
irq->default_type = IRQ_TYPE_NONE;
|
||||
irq->parent_handler = sprd_gpio_irq_handler;
|
||||
|
|
|
@ -3905,8 +3905,8 @@ static struct gpio_desc *gpiod_find_and_request(struct device *consumer,
|
|||
const char *label,
|
||||
bool platform_lookup_allowed)
|
||||
{
|
||||
unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
|
||||
struct gpio_desc *desc = ERR_PTR(-ENOENT);
|
||||
unsigned long lookupflags;
|
||||
int ret;
|
||||
|
||||
if (!IS_ERR_OR_NULL(fwnode))
|
||||
|
|
|
@ -195,6 +195,7 @@ extern int amdgpu_emu_mode;
|
|||
extern uint amdgpu_smu_memory_pool_size;
|
||||
extern int amdgpu_smu_pptable_id;
|
||||
extern uint amdgpu_dc_feature_mask;
|
||||
extern uint amdgpu_freesync_vid_mode;
|
||||
extern uint amdgpu_dc_debug_mask;
|
||||
extern uint amdgpu_dc_visual_confirm;
|
||||
extern uint amdgpu_dm_abm_level;
|
||||
|
|
|
@ -181,6 +181,7 @@ int amdgpu_mes_kiq;
|
|||
int amdgpu_noretry = -1;
|
||||
int amdgpu_force_asic_type = -1;
|
||||
int amdgpu_tmz = -1; /* auto */
|
||||
uint amdgpu_freesync_vid_mode;
|
||||
int amdgpu_reset_method = -1; /* auto */
|
||||
int amdgpu_num_kcq = -1;
|
||||
int amdgpu_smartshift_bias;
|
||||
|
@ -879,6 +880,32 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444);
|
|||
MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
|
||||
module_param_named(tmz, amdgpu_tmz, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: freesync_video (uint)
|
||||
* Enable the optimization to adjust front porch timing to achieve seamless
|
||||
* mode change experience when setting a freesync supported mode for which full
|
||||
* modeset is not needed.
|
||||
*
|
||||
* The Display Core will add a set of modes derived from the base FreeSync
|
||||
* video mode into the corresponding connector's mode list based on commonly
|
||||
* used refresh rates and VRR range of the connected display, when users enable
|
||||
* this feature. From the userspace perspective, they can see a seamless mode
|
||||
* change experience when the change between different refresh rates under the
|
||||
* same resolution. Additionally, userspace applications such as Video playback
|
||||
* can read this modeset list and change the refresh rate based on the video
|
||||
* frame rate. Finally, the userspace can also derive an appropriate mode for a
|
||||
* particular refresh rate based on the FreeSync Mode and add it to the
|
||||
* connector's mode list.
|
||||
*
|
||||
* Note: This is an experimental feature.
|
||||
*
|
||||
* The default value: 0 (off).
|
||||
*/
|
||||
MODULE_PARM_DESC(
|
||||
freesync_video,
|
||||
"Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
|
||||
module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: reset_method (int)
|
||||
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
|
||||
|
|
|
@ -801,7 +801,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
|
|||
|
||||
p2plink->attr.name = "properties";
|
||||
p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
|
||||
sysfs_attr_init(&iolink->attr);
|
||||
sysfs_attr_init(&p2plink->attr);
|
||||
ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
@ -4361,6 +4361,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
|||
amdgpu_set_panel_orientation(&aconnector->base);
|
||||
}
|
||||
|
||||
/* If we didn't find a panel, notify the acpi video detection */
|
||||
if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
|
||||
acpi_video_report_nolcd();
|
||||
|
||||
/* Software is initialized. Now we can register interrupt handlers. */
|
||||
switch (adev->asic_type) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_SI)
|
||||
|
@ -5831,7 +5835,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
*/
|
||||
DRM_DEBUG_DRIVER("No preferred mode found\n");
|
||||
} else {
|
||||
recalculate_timing = is_freesync_video_mode(&mode, aconnector);
|
||||
recalculate_timing = amdgpu_freesync_vid_mode &&
|
||||
is_freesync_video_mode(&mode, aconnector);
|
||||
if (recalculate_timing) {
|
||||
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
|
||||
drm_mode_copy(&saved_mode, &mode);
|
||||
|
@ -6982,7 +6987,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
|
|||
struct amdgpu_dm_connector *amdgpu_dm_connector =
|
||||
to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (!edid)
|
||||
if (!(amdgpu_freesync_vid_mode && edid))
|
||||
return;
|
||||
|
||||
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
|
||||
|
@ -8846,7 +8851,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
|||
* TODO: Refactor this function to allow this check to work
|
||||
* in all conditions.
|
||||
*/
|
||||
if (dm_new_crtc_state->stream &&
|
||||
if (amdgpu_freesync_vid_mode &&
|
||||
dm_new_crtc_state->stream &&
|
||||
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
|
||||
goto skip_modeset;
|
||||
|
||||
|
@ -8881,7 +8887,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
|||
if (!dm_old_crtc_state->stream)
|
||||
goto skip_modeset;
|
||||
|
||||
if (dm_new_crtc_state->stream &&
|
||||
if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
|
||||
is_timing_unchanged_for_freesync(new_crtc_state,
|
||||
old_crtc_state)) {
|
||||
new_crtc_state->mode_changed = false;
|
||||
|
@ -8893,7 +8899,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
|||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
|
||||
goto skip_modeset;
|
||||
} else if (aconnector &&
|
||||
} else if (amdgpu_freesync_vid_mode && aconnector &&
|
||||
is_freesync_video_mode(&new_crtc_state->mode,
|
||||
aconnector)) {
|
||||
struct drm_display_mode *high_mode;
|
||||
|
|
|
@ -6257,12 +6257,12 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
|
|||
double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
|
||||
bool NotEnoughDETSwathFillLatencyHiding = false;
|
||||
|
||||
/* calculate sum of single swath size for all pipes in bytes*/
|
||||
/* calculate sum of single swath size for all pipes in bytes */
|
||||
for (k = 0; k < NumberOfActiveSurfaces; k++) {
|
||||
SwathSizePerSurfaceY[k] += SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
|
||||
SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
|
||||
|
||||
if (SwathHeightC[k] != 0)
|
||||
SwathSizePerSurfaceC[k] += SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
|
||||
SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
|
||||
else
|
||||
SwathSizePerSurfaceC[k] = 0;
|
||||
|
||||
|
|
|
@ -41,9 +41,11 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_dsi_vbt.h"
|
||||
#include "intel_gmbus_regs.h"
|
||||
#include "vlv_dsi.h"
|
||||
#include "vlv_dsi_regs.h"
|
||||
#include "vlv_sideband.h"
|
||||
|
@ -377,6 +379,85 @@ static void icl_exec_gpio(struct intel_connector *connector,
|
|||
drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
|
||||
}
|
||||
|
||||
enum {
|
||||
MIPI_RESET_1 = 0,
|
||||
MIPI_AVDD_EN_1,
|
||||
MIPI_BKLT_EN_1,
|
||||
MIPI_AVEE_EN_1,
|
||||
MIPI_VIO_EN_1,
|
||||
MIPI_RESET_2,
|
||||
MIPI_AVDD_EN_2,
|
||||
MIPI_BKLT_EN_2,
|
||||
MIPI_AVEE_EN_2,
|
||||
MIPI_VIO_EN_2,
|
||||
};
|
||||
|
||||
static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
|
||||
int gpio, bool value)
|
||||
{
|
||||
int index;
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
|
||||
return;
|
||||
|
||||
switch (gpio) {
|
||||
case MIPI_RESET_1:
|
||||
case MIPI_RESET_2:
|
||||
index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B;
|
||||
|
||||
/*
|
||||
* Disable HPD to set the pin to output, and set output
|
||||
* value. The HPD pin should not be enabled for DSI anyway,
|
||||
* assuming the board design and VBT are sane, and the pin isn't
|
||||
* used by a non-DSI encoder.
|
||||
*
|
||||
* The locking protects against concurrent SHOTPLUG_CTL_DDI
|
||||
* modifications in irq setup and handling.
|
||||
*/
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
|
||||
SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
|
||||
SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
|
||||
value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
break;
|
||||
case MIPI_AVDD_EN_1:
|
||||
case MIPI_AVDD_EN_2:
|
||||
index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
|
||||
|
||||
intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON,
|
||||
value ? PANEL_POWER_ON : 0);
|
||||
break;
|
||||
case MIPI_BKLT_EN_1:
|
||||
case MIPI_BKLT_EN_2:
|
||||
index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
|
||||
|
||||
intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE,
|
||||
value ? EDP_BLC_ENABLE : 0);
|
||||
break;
|
||||
case MIPI_AVEE_EN_1:
|
||||
case MIPI_AVEE_EN_2:
|
||||
index = gpio == MIPI_AVEE_EN_1 ? 1 : 2;
|
||||
|
||||
intel_de_rmw(dev_priv, GPIO(dev_priv, index),
|
||||
GPIO_CLOCK_VAL_OUT,
|
||||
GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT |
|
||||
GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0));
|
||||
break;
|
||||
case MIPI_VIO_EN_1:
|
||||
case MIPI_VIO_EN_2:
|
||||
index = gpio == MIPI_VIO_EN_1 ? 1 : 2;
|
||||
|
||||
intel_de_rmw(dev_priv, GPIO(dev_priv, index),
|
||||
GPIO_DATA_VAL_OUT,
|
||||
GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT |
|
||||
GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0));
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(gpio);
|
||||
}
|
||||
}
|
||||
|
||||
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
|
@ -384,8 +465,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
|||
struct intel_connector *connector = intel_dsi->attached_connector;
|
||||
u8 gpio_source, gpio_index = 0, gpio_number;
|
||||
bool value;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "\n");
|
||||
bool native = DISPLAY_VER(dev_priv) >= 11;
|
||||
|
||||
if (connector->panel.vbt.dsi.seq_version >= 3)
|
||||
gpio_index = *data++;
|
||||
|
@ -398,10 +478,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
|||
else
|
||||
gpio_source = 0;
|
||||
|
||||
if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
|
||||
native = false;
|
||||
|
||||
/* pull up/down */
|
||||
value = *data++ & 1;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 11)
|
||||
drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
|
||||
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
|
||||
|
||||
if (native)
|
||||
icl_native_gpio_set_value(dev_priv, gpio_number, value);
|
||||
else if (DISPLAY_VER(dev_priv) >= 11)
|
||||
icl_exec_gpio(connector, gpio_source, gpio_index, value);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
vlv_exec_gpio(connector, gpio_source, gpio_number, value);
|
||||
|
|
|
@ -730,37 +730,74 @@ static int eb_reserve(struct i915_execbuffer *eb)
|
|||
bool unpinned;
|
||||
|
||||
/*
|
||||
* Attempt to pin all of the buffers into the GTT.
|
||||
* This is done in 2 phases:
|
||||
* We have one more buffers that we couldn't bind, which could be due to
|
||||
* various reasons. To resolve this we have 4 passes, with every next
|
||||
* level turning the screws tighter:
|
||||
*
|
||||
* 1. Unbind all objects that do not match the GTT constraints for
|
||||
* the execbuffer (fenceable, mappable, alignment etc).
|
||||
* 2. Bind new objects.
|
||||
* 0. Unbind all objects that do not match the GTT constraints for the
|
||||
* execbuffer (fenceable, mappable, alignment etc). Bind all new
|
||||
* objects. This avoids unnecessary unbinding of later objects in order
|
||||
* to make room for the earlier objects *unless* we need to defragment.
|
||||
*
|
||||
* This avoid unnecessary unbinding of later objects in order to make
|
||||
* room for the earlier objects *unless* we need to defragment.
|
||||
* 1. Reorder the buffers, where objects with the most restrictive
|
||||
* placement requirements go first (ignoring fixed location buffers for
|
||||
* now). For example, objects needing the mappable aperture (the first
|
||||
* 256M of GTT), should go first vs objects that can be placed just
|
||||
* about anywhere. Repeat the previous pass.
|
||||
*
|
||||
* Defragmenting is skipped if all objects are pinned at a fixed location.
|
||||
* 2. Consider buffers that are pinned at a fixed location. Also try to
|
||||
* evict the entire VM this time, leaving only objects that we were
|
||||
* unable to lock. Try again to bind the buffers. (still using the new
|
||||
* buffer order).
|
||||
*
|
||||
* 3. We likely have object lock contention for one or more stubborn
|
||||
* objects in the VM, for which we need to evict to make forward
|
||||
* progress (perhaps we are fighting the shrinker?). When evicting the
|
||||
* VM this time around, anything that we can't lock we now track using
|
||||
* the busy_bo, using the full lock (after dropping the vm->mutex to
|
||||
* prevent deadlocks), instead of trylock. We then continue to evict the
|
||||
* VM, this time with the stubborn object locked, which we can now
|
||||
* hopefully unbind (if still bound in the VM). Repeat until the VM is
|
||||
* evicted. Finally we should be able bind everything.
|
||||
*/
|
||||
for (pass = 0; pass <= 2; pass++) {
|
||||
for (pass = 0; pass <= 3; pass++) {
|
||||
int pin_flags = PIN_USER | PIN_VALIDATE;
|
||||
|
||||
if (pass == 0)
|
||||
pin_flags |= PIN_NONBLOCK;
|
||||
|
||||
if (pass >= 1)
|
||||
unpinned = eb_unbind(eb, pass == 2);
|
||||
unpinned = eb_unbind(eb, pass >= 2);
|
||||
|
||||
if (pass == 2) {
|
||||
err = mutex_lock_interruptible(&eb->context->vm->mutex);
|
||||
if (!err) {
|
||||
err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
|
||||
err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
|
||||
mutex_unlock(&eb->context->vm->mutex);
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (pass == 3) {
|
||||
retry:
|
||||
err = mutex_lock_interruptible(&eb->context->vm->mutex);
|
||||
if (!err) {
|
||||
struct drm_i915_gem_object *busy_bo = NULL;
|
||||
|
||||
err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
|
||||
mutex_unlock(&eb->context->vm->mutex);
|
||||
if (err && busy_bo) {
|
||||
err = i915_gem_object_lock(busy_bo, &eb->ww);
|
||||
i915_gem_object_put(busy_bo);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
list_for_each_entry(ev, &eb->unbound, bind_link) {
|
||||
err = eb_reserve_vma(eb, ev, pin_flags);
|
||||
if (err)
|
||||
|
|
|
@ -369,7 +369,7 @@ retry:
|
|||
if (vma == ERR_PTR(-ENOSPC)) {
|
||||
ret = mutex_lock_interruptible(&ggtt->vm.mutex);
|
||||
if (!ret) {
|
||||
ret = i915_gem_evict_vm(&ggtt->vm, &ww);
|
||||
ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
|
||||
mutex_unlock(&ggtt->vm.mutex);
|
||||
}
|
||||
if (ret)
|
||||
|
|
|
@ -1109,9 +1109,15 @@ static void mmio_invalidate_full(struct intel_gt *gt)
|
|||
continue;
|
||||
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
|
||||
u32 val = BIT(engine->instance);
|
||||
|
||||
if (engine->class == VIDEO_DECODE_CLASS ||
|
||||
engine->class == VIDEO_ENHANCEMENT_CLASS ||
|
||||
engine->class == COMPUTE_CLASS)
|
||||
val = _MASKED_BIT_ENABLE(val);
|
||||
intel_gt_mcr_multicast_write_fw(gt,
|
||||
xehp_regs[engine->class],
|
||||
BIT(engine->instance));
|
||||
val);
|
||||
} else {
|
||||
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
|
||||
if (!i915_mmio_reg_offset(rb.reg))
|
||||
|
|
|
@ -545,6 +545,32 @@ static int check_ccs_header(struct intel_gt *gt,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
|
||||
{
|
||||
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
|
||||
struct device *dev = gt->i915->drm.dev;
|
||||
int err;
|
||||
|
||||
err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
|
||||
drm_err(>->i915->drm,
|
||||
"%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
|
||||
(*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
|
||||
|
||||
/* try to find another blob to load */
|
||||
release_firmware(*fw);
|
||||
*fw = NULL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_uc_fw_fetch - fetch uC firmware
|
||||
* @uc_fw: uC firmware
|
||||
|
@ -558,7 +584,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
|
|||
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_uc_fw_file file_ideal;
|
||||
struct device *dev = i915->drm.dev;
|
||||
struct drm_i915_gem_object *obj;
|
||||
const struct firmware *fw = NULL;
|
||||
bool old_ver = false;
|
||||
|
@ -574,20 +599,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
|
|||
__force_fw_fetch_failures(uc_fw, -EINVAL);
|
||||
__force_fw_fetch_failures(uc_fw, -ESTALE);
|
||||
|
||||
err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
|
||||
err = try_firmware_load(uc_fw, &fw);
|
||||
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
|
||||
|
||||
if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) {
|
||||
drm_err(&i915->drm,
|
||||
"%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
|
||||
fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
|
||||
|
||||
/* try to find another blob to load */
|
||||
release_firmware(fw);
|
||||
err = -ENOENT;
|
||||
}
|
||||
|
||||
/* Any error is terminal if overriding. Don't bother searching for older versions */
|
||||
if (err && intel_uc_fw_is_overridden(uc_fw))
|
||||
goto fail;
|
||||
|
@ -608,7 +622,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
|
|||
break;
|
||||
}
|
||||
|
||||
err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
|
||||
err = try_firmware_load(uc_fw, &fw);
|
||||
}
|
||||
|
||||
if (err)
|
||||
|
|
|
@ -151,6 +151,22 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
|
|||
vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
|
||||
"0x%llx\n");
|
||||
|
||||
static int vgpu_status_get(void *data, u64 *val)
|
||||
{
|
||||
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
|
||||
|
||||
*val = 0;
|
||||
|
||||
if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
*val |= (1 << INTEL_VGPU_STATUS_ATTACHED);
|
||||
if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
|
||||
*val |= (1 << INTEL_VGPU_STATUS_ACTIVE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
|
||||
|
||||
/**
|
||||
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
|
||||
* @vgpu: a vGPU
|
||||
|
@ -162,11 +178,12 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
|
|||
snprintf(name, 16, "vgpu%d", vgpu->id);
|
||||
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
|
||||
|
||||
debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
|
||||
debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
|
||||
&vgpu_mmio_diff_fops);
|
||||
debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
|
||||
&vgpu_scan_nonprivbb_fops);
|
||||
debugfs_create_file("status", 0644, vgpu->debugfs, vgpu,
|
||||
&vgpu_status_fops);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -175,8 +192,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
|
|||
*/
|
||||
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
debugfs_remove_recursive(vgpu->debugfs);
|
||||
vgpu->debugfs = NULL;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_minor *minor = gvt->gt->i915->drm.primary;
|
||||
|
||||
if (minor->debugfs_root && gvt->debugfs_root) {
|
||||
debugfs_remove_recursive(vgpu->debugfs);
|
||||
vgpu->debugfs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -199,6 +221,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
|
|||
*/
|
||||
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
|
||||
{
|
||||
debugfs_remove_recursive(gvt->debugfs_root);
|
||||
gvt->debugfs_root = NULL;
|
||||
struct drm_minor *minor = gvt->gt->i915->drm.primary;
|
||||
|
||||
if (minor->debugfs_root) {
|
||||
debugfs_remove_recursive(gvt->debugfs_root);
|
||||
gvt->debugfs_root = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -134,7 +134,8 @@ static void dmabuf_gem_object_free(struct kref *kref)
|
|||
struct list_head *pos;
|
||||
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
|
||||
|
||||
if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
|
||||
if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
|
||||
!list_empty(&vgpu->dmabuf_obj_list_head)) {
|
||||
list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
|
||||
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
|
||||
if (dmabuf_obj == obj) {
|
||||
|
|
|
@ -55,7 +55,7 @@ static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
|
|||
int idx;
|
||||
bool ret;
|
||||
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return false;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
|
@ -1178,7 +1178,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
|
|||
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
|
||||
return 0;
|
||||
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -EINVAL;
|
||||
pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
|
@ -1209,10 +1209,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
|||
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
|
||||
PAGE_SIZE, &dma_addr);
|
||||
if (ret) {
|
||||
ppgtt_invalidate_spt(spt);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err;
|
||||
sub_se.val64 = se->val64;
|
||||
|
||||
/* Copy the PAT field from PDE. */
|
||||
|
@ -1231,6 +1229,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
|||
ops->set_pfn(se, sub_spt->shadow_page.mfn);
|
||||
ppgtt_set_shadow_entry(spt, se, index);
|
||||
return 0;
|
||||
err:
|
||||
/* Cancel the existing addess mappings of DMA addr. */
|
||||
for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
|
||||
gvt_vdbg_mm("invalidate 4K entry\n");
|
||||
ppgtt_invalidate_pte(sub_spt, &sub_se);
|
||||
}
|
||||
/* Release the new allocated spt. */
|
||||
trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
|
||||
sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
|
||||
ppgtt_free_spt(sub_spt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
|
||||
|
|
|
@ -172,13 +172,18 @@ struct intel_vgpu_submission {
|
|||
|
||||
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
|
||||
|
||||
enum {
|
||||
INTEL_VGPU_STATUS_ATTACHED = 0,
|
||||
INTEL_VGPU_STATUS_ACTIVE,
|
||||
INTEL_VGPU_STATUS_NR_BITS,
|
||||
};
|
||||
|
||||
struct intel_vgpu {
|
||||
struct vfio_device vfio_device;
|
||||
struct intel_gvt *gvt;
|
||||
struct mutex vgpu_lock;
|
||||
int id;
|
||||
bool active;
|
||||
bool attached;
|
||||
DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS);
|
||||
bool pv_notified;
|
||||
bool failsafe;
|
||||
unsigned int resetting_eng;
|
||||
|
@ -467,7 +472,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
|||
|
||||
#define for_each_active_vgpu(gvt, vgpu, id) \
|
||||
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
|
||||
for_each_if(vgpu->active)
|
||||
for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
|
||||
|
||||
static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
|
||||
u32 offset, u32 val, bool low)
|
||||
|
@ -725,7 +730,7 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
|
|||
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
|
||||
void *buf, unsigned long len)
|
||||
{
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -ESRCH;
|
||||
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
|
||||
}
|
||||
|
@ -743,7 +748,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
|
|||
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
|
||||
unsigned long gpa, void *buf, unsigned long len)
|
||||
{
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -ESRCH;
|
||||
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
|
||||
}
|
||||
|
|
|
@ -433,7 +433,7 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
|
|||
* enabled by guest. so if msi_trigger is null, success is still
|
||||
* returned and don't inject interrupt into guest.
|
||||
*/
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -ESRCH;
|
||||
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
|
||||
return -EFAULT;
|
||||
|
|
|
@ -638,7 +638,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
|
|||
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
for_each_active_vgpu(vgpu->gvt, itr, id) {
|
||||
if (!itr->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
|
||||
continue;
|
||||
|
||||
if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
|
||||
|
@ -655,9 +655,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
|
|||
{
|
||||
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
|
||||
|
||||
if (vgpu->attached)
|
||||
return -EEXIST;
|
||||
|
||||
if (!vgpu->vfio_device.kvm ||
|
||||
vgpu->vfio_device.kvm->mm != current->mm) {
|
||||
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
|
||||
|
@ -667,14 +664,14 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
|
|||
if (__kvmgt_vgpu_exist(vgpu))
|
||||
return -EEXIST;
|
||||
|
||||
vgpu->attached = true;
|
||||
|
||||
vgpu->track_node.track_write = kvmgt_page_track_write;
|
||||
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
|
||||
kvm_get_kvm(vgpu->vfio_device.kvm);
|
||||
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
|
||||
&vgpu->track_node);
|
||||
|
||||
set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
|
||||
|
||||
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
|
||||
&vgpu->nr_cache_entries);
|
||||
|
||||
|
@ -698,11 +695,10 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
|
|||
{
|
||||
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
|
||||
|
||||
if (!vgpu->attached)
|
||||
return;
|
||||
|
||||
intel_gvt_release_vgpu(vgpu);
|
||||
|
||||
clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
|
||||
|
||||
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
|
||||
|
||||
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
|
||||
|
@ -718,8 +714,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
|
|||
vgpu->dma_addr_cache = RB_ROOT;
|
||||
|
||||
intel_vgpu_release_msi_eventfd_ctx(vgpu);
|
||||
|
||||
vgpu->attached = false;
|
||||
}
|
||||
|
||||
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
|
||||
|
@ -1512,9 +1506,6 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
|
|||
{
|
||||
struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
|
||||
|
||||
if (WARN_ON_ONCE(vgpu->attached))
|
||||
return;
|
||||
|
||||
vfio_unregister_group_dev(&vgpu->vfio_device);
|
||||
vfio_put_device(&vgpu->vfio_device);
|
||||
}
|
||||
|
@ -1559,7 +1550,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
|
|||
struct kvm_memory_slot *slot;
|
||||
int idx;
|
||||
|
||||
if (!info->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
|
||||
return -ESRCH;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
|
@ -1589,8 +1580,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
|
|||
struct kvm_memory_slot *slot;
|
||||
int idx;
|
||||
|
||||
if (!info->attached)
|
||||
return 0;
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
|
||||
return -ESRCH;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
slot = gfn_to_memslot(kvm, gfn);
|
||||
|
@ -1668,7 +1659,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
|||
struct gvt_dma *entry;
|
||||
int ret;
|
||||
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&vgpu->cache_lock);
|
||||
|
@ -1714,8 +1705,8 @@ int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
|
|||
struct gvt_dma *entry;
|
||||
int ret = 0;
|
||||
|
||||
if (!vgpu->attached)
|
||||
return -ENODEV;
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&vgpu->cache_lock);
|
||||
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
|
||||
|
@ -1742,7 +1733,7 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
|
|||
{
|
||||
struct gvt_dma *entry;
|
||||
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return;
|
||||
|
||||
mutex_lock(&vgpu->cache_lock);
|
||||
|
@ -1778,7 +1769,7 @@ static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
|
|||
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
|
||||
(void *)&gvt->service_request)) {
|
||||
if (vgpu->active)
|
||||
if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
|
||||
intel_vgpu_emulate_vblank(vgpu);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -695,6 +695,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
|
|||
|
||||
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
|
||||
!workload->shadow_mm->ppgtt_mm.shadowed) {
|
||||
intel_vgpu_unpin_mm(workload->shadow_mm);
|
||||
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -865,7 +866,8 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!scheduler->current_vgpu->active ||
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ACTIVE,
|
||||
scheduler->current_vgpu->status) ||
|
||||
list_empty(workload_q_head(scheduler->current_vgpu, engine)))
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -166,9 +166,7 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
|
|||
*/
|
||||
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
vgpu->active = true;
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -183,7 +181,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
|
|||
{
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
|
||||
vgpu->active = false;
|
||||
clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
|
||||
|
||||
if (atomic_read(&vgpu->submission.running_workload_num)) {
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
|
@ -228,7 +226,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
|||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *i915 = gvt->gt->i915;
|
||||
|
||||
drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
|
||||
drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status),
|
||||
"vGPU is still active!\n");
|
||||
|
||||
/*
|
||||
* remove idr first so later clean can judge if need to stop
|
||||
|
@ -285,8 +284,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
|
|||
if (ret)
|
||||
goto out_free_vgpu;
|
||||
|
||||
vgpu->active = false;
|
||||
|
||||
clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
|
||||
return vgpu;
|
||||
|
||||
out_free_vgpu:
|
||||
|
|
|
@ -416,6 +416,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
|||
* @vm: Address space to cleanse
|
||||
* @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
|
||||
* will be able to evict vma's locked by the ww as well.
|
||||
* @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then
|
||||
* in the event i915_gem_evict_vm() is unable to trylock an object for eviction,
|
||||
* then @busy_bo will point to it. -EBUSY is also returned. The caller must drop
|
||||
* the vm->mutex, before trying again to acquire the contended lock. The caller
|
||||
* also owns a reference to the object.
|
||||
*
|
||||
* This function evicts all vmas from a vm.
|
||||
*
|
||||
|
@ -425,7 +430,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
|||
* To clarify: This is for freeing up virtual address space, not for freeing
|
||||
* memory in e.g. the shrinker.
|
||||
*/
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
|
||||
struct drm_i915_gem_object **busy_bo)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -457,15 +463,22 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
|
|||
* the resv is shared among multiple objects, we still
|
||||
* need the object ref.
|
||||
*/
|
||||
if (dying_vma(vma) ||
|
||||
if (!i915_gem_object_get_rcu(vma->obj) ||
|
||||
(ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
|
||||
__i915_vma_pin(vma);
|
||||
list_add(&vma->evict_link, &locked_eviction_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!i915_gem_object_trylock(vma->obj, ww))
|
||||
if (!i915_gem_object_trylock(vma->obj, ww)) {
|
||||
if (busy_bo) {
|
||||
*busy_bo = vma->obj; /* holds ref */
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
i915_gem_object_put(vma->obj);
|
||||
continue;
|
||||
}
|
||||
|
||||
__i915_vma_pin(vma);
|
||||
list_add(&vma->evict_link, &eviction_list);
|
||||
|
@ -473,25 +486,29 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
|
|||
if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
|
||||
break;
|
||||
|
||||
ret = 0;
|
||||
/* Unbind locked objects first, before unlocking the eviction_list */
|
||||
list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
|
||||
__i915_vma_unpin(vma);
|
||||
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
ret = __i915_vma_unbind(vma);
|
||||
if (ret != -EINTR) /* "Get me out of here!" */
|
||||
ret = 0;
|
||||
if (ret != -EINTR) /* "Get me out of here!" */
|
||||
ret = 0;
|
||||
}
|
||||
if (!dying_vma(vma))
|
||||
i915_gem_object_put(vma->obj);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
|
||||
__i915_vma_unpin(vma);
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
ret = __i915_vma_unbind(vma);
|
||||
if (ret != -EINTR) /* "Get me out of here!" */
|
||||
ret = 0;
|
||||
if (ret != -EINTR) /* "Get me out of here!" */
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
i915_gem_object_unlock(vma->obj);
|
||||
i915_gem_object_put(vma->obj);
|
||||
}
|
||||
} while (ret == 0);
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
struct drm_mm_node;
|
||||
struct i915_address_space;
|
||||
struct i915_gem_ww_ctx;
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
|
||||
struct i915_gem_ww_ctx *ww,
|
||||
|
@ -23,6 +24,7 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
|
|||
struct drm_mm_node *node,
|
||||
unsigned int flags);
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm,
|
||||
struct i915_gem_ww_ctx *ww);
|
||||
struct i915_gem_ww_ctx *ww,
|
||||
struct drm_i915_gem_object **busy_bo);
|
||||
|
||||
#endif /* __I915_GEM_EVICT_H__ */
|
||||
|
|
|
@ -1974,7 +1974,10 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
|
|||
if (ddi_hotplug_trigger) {
|
||||
u32 dig_hotplug_reg;
|
||||
|
||||
/* Locking due to DSI native GPIO sequences */
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
|
||||
ddi_hotplug_trigger, dig_hotplug_reg,
|
||||
|
|
|
@ -1129,7 +1129,6 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
__maybe_unused
|
||||
static const struct intel_device_info mtl_info = {
|
||||
XE_HP_FEATURES,
|
||||
XE_LPDP_FEATURES,
|
||||
|
|
|
@ -5988,6 +5988,7 @@
|
|||
|
||||
#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
|
||||
#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
|
|
|
@ -1566,7 +1566,7 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
|
|||
* locked objects when called from execbuf when pinning
|
||||
* is removed. This would probably regress badly.
|
||||
*/
|
||||
i915_gem_evict_vm(vm, NULL);
|
||||
i915_gem_evict_vm(vm, NULL, NULL);
|
||||
mutex_unlock(&vm->mutex);
|
||||
}
|
||||
} while (1);
|
||||
|
|
|
@ -344,7 +344,7 @@ static int igt_evict_vm(void *arg)
|
|||
|
||||
/* Everything is pinned, nothing should happen */
|
||||
mutex_lock(&ggtt->vm.mutex);
|
||||
err = i915_gem_evict_vm(&ggtt->vm, NULL);
|
||||
err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL);
|
||||
mutex_unlock(&ggtt->vm.mutex);
|
||||
if (err) {
|
||||
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
|
||||
|
@ -356,7 +356,7 @@ static int igt_evict_vm(void *arg)
|
|||
|
||||
for_i915_gem_ww(&ww, err, false) {
|
||||
mutex_lock(&ggtt->vm.mutex);
|
||||
err = i915_gem_evict_vm(&ggtt->vm, &ww);
|
||||
err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
|
||||
mutex_unlock(&ggtt->vm.mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -614,6 +614,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
|||
break;
|
||||
}
|
||||
|
||||
if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG)
|
||||
width = ipu_src_rect_width(new_state);
|
||||
else
|
||||
width = drm_rect_width(&new_state->src) >> 16;
|
||||
|
||||
eba = drm_plane_state_to_eba(new_state, 0);
|
||||
|
||||
/*
|
||||
|
@ -622,8 +627,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
|||
*/
|
||||
if (ipu_state->use_pre) {
|
||||
axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
|
||||
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
|
||||
ipu_src_rect_width(new_state),
|
||||
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
|
||||
drm_rect_height(&new_state->src) >> 16,
|
||||
fb->pitches[0], fb->format->format,
|
||||
fb->modifier, &eba);
|
||||
|
@ -678,9 +682,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
|||
break;
|
||||
}
|
||||
|
||||
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
|
||||
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
|
||||
|
||||
width = ipu_src_rect_width(new_state);
|
||||
height = drm_rect_height(&new_state->src) >> 16;
|
||||
info = drm_format_info(fb->format->format);
|
||||
ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
|
||||
|
@ -744,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
|||
ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
|
||||
|
||||
ipu_cpmem_zero(ipu_plane->alpha_ch);
|
||||
ipu_cpmem_set_resolution(ipu_plane->alpha_ch,
|
||||
ipu_src_rect_width(new_state),
|
||||
ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width,
|
||||
drm_rect_height(&new_state->src) >> 16);
|
||||
ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
|
||||
ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue