Merge back other thermal control material for 6.3.
* thermal: (734 commits) thermal: core: call put_device() only after device_register() fails Linux 6.2-rc4 kbuild: Fix CFI hash randomization with KASAN firmware: coreboot: Check size of table entry and use flex-array kallsyms: Fix scheduling with interrupts disabled in self-test ata: pata_cs5535: Don't build on UML lockref: stop doing cpu_relax in the cmpxchg loop x86/pci: Treat EfiMemoryMappedIO as reservation of ECAM space efi: tpm: Avoid READ_ONCE() for accessing the event log io_uring: lock overflowing for IOPOLL ALSA: pcm: Move rwsem lock inside snd_ctl_elem_read to prevent UAF iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe() iommu/iova: Fix alloc iova overflows issue iommu: Fix refcount leak in iommu_device_claim_dma_owner iommu/arm-smmu-v3: Don't unregister on shutdown iommu/arm-smmu: Don't unregister on shutdown iommu/arm-smmu: Report IOMMU_CAP_CACHE_COHERENCY even betterer platform/x86: thinkpad_acpi: Fix profile mode display in AMT mode ALSA: usb-audio: Fix possible NULL pointer dereference in snd_usb_pcm_has_fixed_rate() platform/x86: int3472/discrete: Ensure the clk/power enable pins are in output mode ...
This commit is contained in:
commit
8ef0ca4a17
|
@ -39,6 +39,7 @@
|
|||
*.o.*
|
||||
*.patch
|
||||
*.rmeta
|
||||
*.rpm
|
||||
*.rsi
|
||||
*.s
|
||||
*.so
|
||||
|
|
1
.mailmap
1
.mailmap
|
@ -422,6 +422,7 @@ Tony Luck <tony.luck@intel.com>
|
|||
TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
|
||||
TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
|
||||
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
|
||||
Tudor Ambarus <tudor.ambarus@linaro.org> <tudor.ambarus@microchip.com>
|
||||
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
|
||||
Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
|
||||
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
What: /sys/kernel/debug/pktcdvd/pktcdvd[0-7]
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
|
||||
The pktcdvd module (packet writing driver) creates
|
||||
these files in debugfs:
|
||||
|
||||
/sys/kernel/debug/pktcdvd/pktcdvd[0-7]/
|
||||
|
||||
==== ====== ====================================
|
||||
info 0444 Lots of driver statistics and infos.
|
||||
==== ====== ====================================
|
||||
|
||||
Example::
|
||||
|
||||
cat /sys/kernel/debug/pktcdvd/pktcdvd0/info
|
|
@ -0,0 +1,97 @@
|
|||
sysfs interface
|
||||
---------------
|
||||
The pktcdvd module (packet writing driver) creates the following files in the
|
||||
sysfs: (<devid> is in the format major:minor)
|
||||
|
||||
What: /sys/class/pktcdvd/add
|
||||
What: /sys/class/pktcdvd/remove
|
||||
What: /sys/class/pktcdvd/device_map
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
|
||||
========== ==============================================
|
||||
add (WO) Write a block device id (major:minor) to
|
||||
create a new pktcdvd device and map it to the
|
||||
block device.
|
||||
|
||||
remove (WO) Write the pktcdvd device id (major:minor)
|
||||
to remove the pktcdvd device.
|
||||
|
||||
device_map (RO) Shows the device mapping in format:
|
||||
pktcdvd[0-7] <pktdevid> <blkdevid>
|
||||
========== ==============================================
|
||||
|
||||
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/dev
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/uevent
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
dev: (RO) Device id
|
||||
|
||||
uevent: (WO) To send a uevent
|
||||
|
||||
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/packets_started
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/packets_finished
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/kb_written
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/kb_read
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/kb_read_gather
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/stat/reset
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
packets_started: (RO) Number of started packets.
|
||||
|
||||
packets_finished: (RO) Number of finished packets.
|
||||
|
||||
kb_written: (RO) kBytes written.
|
||||
|
||||
kb_read: (RO) kBytes read.
|
||||
|
||||
kb_read_gather: (RO) kBytes read to fill write packets.
|
||||
|
||||
reset: (WO) Write any value to it to reset
|
||||
pktcdvd device statistic values, like
|
||||
bytes read/written.
|
||||
|
||||
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/write_queue/size
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/write_queue/congestion_off
|
||||
What: /sys/class/pktcdvd/pktcdvd[0-7]/write_queue/congestion_on
|
||||
Date: Oct. 2006
|
||||
KernelVersion: 2.6.20
|
||||
Contact: Thomas Maier <balagi@justmail.de>
|
||||
Description:
|
||||
============== ================================================
|
||||
size (RO) Contains the size of the bio write queue.
|
||||
|
||||
congestion_off (RW) If bio write queue size is below this mark,
|
||||
accept new bio requests from the block layer.
|
||||
|
||||
congestion_on (RW) If bio write queue size is higher as this
|
||||
mark, do no longer accept bio write requests
|
||||
from the block layer and wait till the pktcdvd
|
||||
device has processed enough bio's so that bio
|
||||
write queue size is below congestion off mark.
|
||||
A value of <= 0 disables congestion control.
|
||||
============== ================================================
|
||||
|
||||
|
||||
Example:
|
||||
--------
|
||||
To use the pktcdvd sysfs interface directly, you can do::
|
||||
|
||||
# create a new pktcdvd device mapped to /dev/hdc
|
||||
echo "22:0" >/sys/class/pktcdvd/add
|
||||
cat /sys/class/pktcdvd/device_map
|
||||
# assuming device pktcdvd0 was created, look at stat's
|
||||
cat /sys/class/pktcdvd/pktcdvd0/stat/kb_written
|
||||
# print the device id of the mapped block device
|
||||
fgrep pktcdvd0 /sys/class/pktcdvd/device_map
|
||||
# remove device, using pktcdvd0 device id 253:0
|
||||
echo "253:0" >/sys/class/pktcdvd/remove
|
|
@ -120,6 +120,8 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
|
||||
|
|
|
@ -31,6 +31,12 @@ def have_command(cmd):
|
|||
# Get Sphinx version
|
||||
major, minor, patch = sphinx.version_info[:3]
|
||||
|
||||
#
|
||||
# Warn about older versions that we don't want to support for much
|
||||
# longer.
|
||||
#
|
||||
if (major < 2) or (major == 2 and minor < 4):
|
||||
print('WARNING: support for Sphinx < 2.4 will be removed soon.')
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
|
@ -339,7 +345,11 @@ html_use_smartypants = False
|
|||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
# Note that the RTD theme ignores this
|
||||
html_sidebars = { '**': ["about.html", 'searchbox.html', 'localtoc.html', 'sourcelink.html']}
|
||||
html_sidebars = { '**': ['searchbox.html', 'localtoc.html', 'sourcelink.html']}
|
||||
|
||||
# about.html is available for alabaster theme. Add it at the front.
|
||||
if html_theme == 'alabaster':
|
||||
html_sidebars['**'].insert(0, 'about.html')
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'TheLinuxKerneldoc'
|
||||
|
|
|
@ -54,6 +54,17 @@ properties:
|
|||
- const: xo
|
||||
- const: alternate
|
||||
|
||||
interrupts:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
interrupt-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: dcvsh-irq-0
|
||||
- const: dcvsh-irq-1
|
||||
- const: dcvsh-irq-2
|
||||
|
||||
'#freq-domain-cells':
|
||||
const: 1
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Atmel Advanced Encryption Standard (AES) HW cryptographic accelerator
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
|
|
@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Atmel Secure Hash Algorithm (SHA) HW cryptographic accelerator
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
|
|
@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Atmel Triple Data Encryption Standard (TDES) HW cryptographic accelerator
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
|
|
@ -32,7 +32,7 @@ properties:
|
|||
- description: Display byte clock
|
||||
- description: Display byte interface clock
|
||||
- description: Display pixel clock
|
||||
- description: Display escape clock
|
||||
- description: Display core clock
|
||||
- description: Display AHB clock
|
||||
- description: Display AXI clock
|
||||
|
||||
|
@ -137,8 +137,6 @@ required:
|
|||
- phys
|
||||
- assigned-clocks
|
||||
- assigned-clock-parents
|
||||
- power-domains
|
||||
- operating-points-v2
|
||||
- ports
|
||||
|
||||
additionalProperties: false
|
||||
|
|
|
@ -69,7 +69,6 @@ required:
|
|||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- vdds-supply
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
|
|
@ -39,7 +39,6 @@ required:
|
|||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- vcca-supply
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
|
|
@ -34,6 +34,10 @@ properties:
|
|||
vddio-supply:
|
||||
description: Phandle to vdd-io regulator device node.
|
||||
|
||||
qcom,dsi-phy-regulator-ldo-mode:
|
||||
type: boolean
|
||||
description: Indicates if the LDO mode PHY regulator is wanted.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
|
|
@ -72,7 +72,7 @@ examples:
|
|||
#include <dt-bindings/interconnect/qcom,qcm2290.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
|
||||
mdss@5e00000 {
|
||||
display-subsystem@5e00000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "qcom,qcm2290-mdss";
|
||||
|
|
|
@ -62,7 +62,7 @@ examples:
|
|||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
|
||||
mdss@5e00000 {
|
||||
display-subsystem@5e00000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "qcom,sm6115-mdss";
|
||||
|
|
|
@ -40,6 +40,9 @@ properties:
|
|||
clock-names:
|
||||
const: stmmaceth
|
||||
|
||||
phy-supply:
|
||||
description: PHY regulator
|
||||
|
||||
syscon:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description:
|
||||
|
|
|
@ -16,9 +16,6 @@ description: |
|
|||
8k has a second unit which provides an interface with the xMDIO bus. This
|
||||
driver handles these interfaces.
|
||||
|
||||
allOf:
|
||||
- $ref: "mdio.yaml#"
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
|
@ -39,13 +36,38 @@ required:
|
|||
- compatible
|
||||
- reg
|
||||
|
||||
allOf:
|
||||
- $ref: mdio.yaml#
|
||||
|
||||
- if:
|
||||
required:
|
||||
- interrupts
|
||||
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- items:
|
||||
- $ref: /schemas/types.yaml#/definitions/cell
|
||||
- const: 0x84
|
||||
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- items:
|
||||
- $ref: /schemas/types.yaml#/definitions/cell
|
||||
- enum:
|
||||
- 0x4
|
||||
- 0x10
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
mdio@d0072004 {
|
||||
compatible = "marvell,orion-mdio";
|
||||
reg = <0xd0072004 0x4>;
|
||||
reg = <0xd0072004 0x84>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <30>;
|
||||
|
|
|
@ -16,6 +16,7 @@ properties:
|
|||
compatible:
|
||||
enum:
|
||||
- mediatek,mt8186-mt6366-rt1019-rt5682s-sound
|
||||
- mediatek,mt8186-mt6366-rt5682s-max98360-sound
|
||||
|
||||
mediatek,platform:
|
||||
$ref: "/schemas/types.yaml#/definitions/phandle"
|
||||
|
|
|
@ -30,7 +30,9 @@ properties:
|
|||
const: 0
|
||||
|
||||
clocks:
|
||||
maxItems: 5
|
||||
oneOf:
|
||||
- maxItems: 3
|
||||
- maxItems: 5
|
||||
|
||||
clock-names:
|
||||
oneOf:
|
||||
|
|
|
@ -9,9 +9,6 @@ title: LPASS(Low Power Audio Subsystem) VA Macro audio codec
|
|||
maintainers:
|
||||
- Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
|
||||
allOf:
|
||||
- $ref: dai-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
|
@ -30,15 +27,12 @@ properties:
|
|||
const: 0
|
||||
|
||||
clocks:
|
||||
maxItems: 5
|
||||
minItems: 5
|
||||
maxItems: 6
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: mclk
|
||||
- const: npl
|
||||
- const: macro
|
||||
- const: dcodec
|
||||
- const: fsgen
|
||||
minItems: 5
|
||||
maxItems: 6
|
||||
|
||||
clock-output-names:
|
||||
maxItems: 1
|
||||
|
@ -55,10 +49,51 @@ required:
|
|||
- reg
|
||||
- "#sound-dai-cells"
|
||||
|
||||
allOf:
|
||||
- $ref: dai-common.yaml#
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,sc7280-lpass-wsa-macro
|
||||
- qcom,sm8450-lpass-wsa-macro
|
||||
- qcom,sc8280xp-lpass-wsa-macro
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 5
|
||||
clock-names:
|
||||
items:
|
||||
- const: mclk
|
||||
- const: npl
|
||||
- const: macro
|
||||
- const: dcodec
|
||||
- const: fsgen
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,sm8250-lpass-wsa-macro
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 6
|
||||
clock-names:
|
||||
items:
|
||||
- const: mclk
|
||||
- const: npl
|
||||
- const: macro
|
||||
- const: dcodec
|
||||
- const: va
|
||||
- const: fsgen
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,sm8250-lpass-aoncc.h>
|
||||
#include <dt-bindings/sound/qcom,q6afe.h>
|
||||
codec@3240000 {
|
||||
compatible = "qcom,sm8250-lpass-wsa-macro";
|
||||
|
@ -69,7 +104,8 @@ examples:
|
|||
<&audiocc 0>,
|
||||
<&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
|
||||
<&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
|
||||
<&aoncc LPASS_CDC_VA_MCLK>,
|
||||
<&vamacro>;
|
||||
clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
|
||||
clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen";
|
||||
clock-output-names = "mclk";
|
||||
};
|
||||
|
|
|
@ -80,7 +80,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
|
||||
qcom,ports-sinterval-low:
|
||||
$ref: /schemas/types.yaml#/definitions/uint8-array
|
||||
|
@ -124,7 +124,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
|
||||
qcom,ports-block-pack-mode:
|
||||
$ref: /schemas/types.yaml#/definitions/uint8-array
|
||||
|
@ -154,7 +154,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
items:
|
||||
oneOf:
|
||||
- minimum: 0
|
||||
|
@ -171,7 +171,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
items:
|
||||
oneOf:
|
||||
- minimum: 0
|
||||
|
@ -187,7 +187,7 @@ properties:
|
|||
or applicable for the respective data port.
|
||||
More info in MIPI Alliance SoundWire 1.0 Specifications.
|
||||
minItems: 3
|
||||
maxItems: 5
|
||||
maxItems: 8
|
||||
items:
|
||||
oneOf:
|
||||
- minimum: 0
|
||||
|
|
|
@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Atmel SPI device
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
|
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Atmel Quad Serial Peripheral Interface (QSPI)
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
|
|
|
@ -44,9 +44,9 @@ properties:
|
|||
description:
|
||||
Maximum SPI clocking speed of the device in Hz.
|
||||
|
||||
spi-cs-setup-ns:
|
||||
spi-cs-setup-delay-ns:
|
||||
description:
|
||||
Delay in nanosecods to be introduced by the controller after CS is
|
||||
Delay in nanoseconds to be introduced by the controller after CS is
|
||||
asserted.
|
||||
|
||||
spi-rx-bus-width:
|
||||
|
|
|
@ -104,3 +104,4 @@ to do something different in the near future.
|
|||
../riscv/patch-acceptance
|
||||
../driver-api/media/maintainer-entry-profile
|
||||
../driver-api/vfio-pci-device-specific-driver-acceptance
|
||||
../nvme/feature-and-quirk-policy
|
||||
|
|
|
@ -880,8 +880,8 @@ The kernel interface functions are as follows:
|
|||
|
||||
notify_end_rx can be NULL or it can be used to specify a function to be
|
||||
called when the call changes state to end the Tx phase. This function is
|
||||
called with the call-state spinlock held to prevent any reply or final ACK
|
||||
from being delivered first.
|
||||
called with a spinlock held to prevent the last DATA packet from being
|
||||
transmitted until the function returns.
|
||||
|
||||
(#) Receive data from a call::
|
||||
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=======================================
|
||||
Linux NVMe feature and and quirk policy
|
||||
=======================================
|
||||
|
||||
This file explains the policy used to decide what is supported by the
|
||||
Linux NVMe driver and what is not.
|
||||
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
NVM Express is an open collection of standards and information.
|
||||
|
||||
The Linux NVMe host driver in drivers/nvme/host/ supports devices
|
||||
implementing the NVM Express (NVMe) family of specifications, which
|
||||
currently consists of a number of documents:
|
||||
|
||||
- the NVMe Base specification
|
||||
- various Command Set specifications (e.g. NVM Command Set)
|
||||
- various Transport specifications (e.g. PCIe, Fibre Channel, RDMA, TCP)
|
||||
- the NVMe Management Interface specification
|
||||
|
||||
See https://nvmexpress.org/developers/ for the NVMe specifications.
|
||||
|
||||
|
||||
Supported features
|
||||
==================
|
||||
|
||||
NVMe is a large suite of specifications, and contains features that are only
|
||||
useful or suitable for specific use-cases. It is important to note that Linux
|
||||
does not aim to implement every feature in the specification. Every additional
|
||||
feature implemented introduces more code, more maintenance and potentially more
|
||||
bugs. Hence there is an inherent tradeoff between functionality and
|
||||
maintainability of the NVMe host driver.
|
||||
|
||||
Any feature implemented in the Linux NVMe host driver must support the
|
||||
following requirements:
|
||||
|
||||
1. The feature is specified in a release version of an official NVMe
|
||||
specification, or in a ratified Technical Proposal (TP) that is
|
||||
available on NVMe website. Or if it is not directly related to the
|
||||
on-wire protocol, does not contradict any of the NVMe specifications.
|
||||
2. Does not conflict with the Linux architecture, nor the design of the
|
||||
NVMe host driver.
|
||||
3. Has a clear, indisputable value-proposition and a wide consensus across
|
||||
the community.
|
||||
|
||||
Vendor specific extensions are generally not supported in the NVMe host
|
||||
driver.
|
||||
|
||||
It is strongly recommended to work with the Linux NVMe and block layer
|
||||
maintainers and get feedback on specification changes that are intended
|
||||
to be used by the Linux NVMe host driver in order to avoid conflict at a
|
||||
later stage.
|
||||
|
||||
|
||||
Quirks
|
||||
======
|
||||
|
||||
Sometimes implementations of open standards fail to correctly implement parts
|
||||
of the standards. Linux uses identifier-based quirks to work around such
|
||||
implementation bugs. The intent of quirks is to deal with widely available
|
||||
hardware, usually consumer, which Linux users can't use without these quirks.
|
||||
Typically these implementations are not or only superficially tested with Linux
|
||||
by the hardware manufacturer.
|
||||
|
||||
The Linux NVMe maintainers decide ad hoc whether to quirk implementations
|
||||
based on the impact of the problem to Linux users and how it impacts
|
||||
maintainability of the driver. In general quirks are a last resort, if no
|
||||
firmware updates or other workarounds are available from the vendor.
|
||||
|
||||
Quirks will not be added to the Linux kernel for hardware that isn't available
|
||||
on the mass market. Hardware that fails qualification for enterprise Linux
|
||||
distributions, ChromeOS, Android or other consumers of the Linux kernel
|
||||
should be fixed before it is shipped instead of relying on Linux quirks.
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
.. _netdev-FAQ:
|
||||
|
||||
==========
|
||||
netdev FAQ
|
||||
==========
|
||||
=============================
|
||||
Networking subsystem (netdev)
|
||||
=============================
|
||||
|
||||
tl;dr
|
||||
-----
|
||||
|
@ -15,14 +15,15 @@ tl;dr
|
|||
- don't repost your patches within one 24h period
|
||||
- reverse xmas tree
|
||||
|
||||
What is netdev?
|
||||
---------------
|
||||
It is a mailing list for all network-related Linux stuff. This
|
||||
netdev
|
||||
------
|
||||
|
||||
netdev is a mailing list for all network-related Linux stuff. This
|
||||
includes anything found under net/ (i.e. core code like IPv6) and
|
||||
drivers/net (i.e. hardware specific drivers) in the Linux source tree.
|
||||
|
||||
Note that some subsystems (e.g. wireless drivers) which have a high
|
||||
volume of traffic have their own specific mailing lists.
|
||||
volume of traffic have their own specific mailing lists and trees.
|
||||
|
||||
The netdev list is managed (like many other Linux mailing lists) through
|
||||
VGER (http://vger.kernel.org/) with archives available at
|
||||
|
@ -32,32 +33,10 @@ Aside from subsystems like those mentioned above, all network-related
|
|||
Linux development (i.e. RFC, review, comments, etc.) takes place on
|
||||
netdev.
|
||||
|
||||
How do the changes posted to netdev make their way into Linux?
|
||||
--------------------------------------------------------------
|
||||
There are always two trees (git repositories) in play. Both are
|
||||
driven by David Miller, the main network maintainer. There is the
|
||||
``net`` tree, and the ``net-next`` tree. As you can probably guess from
|
||||
the names, the ``net`` tree is for fixes to existing code already in the
|
||||
mainline tree from Linus, and ``net-next`` is where the new code goes
|
||||
for the future release. You can find the trees here:
|
||||
Development cycle
|
||||
-----------------
|
||||
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
|
||||
How do I indicate which tree (net vs. net-next) my patch should be in?
|
||||
----------------------------------------------------------------------
|
||||
To help maintainers and CI bots you should explicitly mark which tree
|
||||
your patch is targeting. Assuming that you use git, use the prefix
|
||||
flag::
|
||||
|
||||
git format-patch --subject-prefix='PATCH net-next' start..finish
|
||||
|
||||
Use ``net`` instead of ``net-next`` (always lower case) in the above for
|
||||
bug-fix ``net`` content.
|
||||
|
||||
How often do changes from these trees make it to the mainline Linus tree?
|
||||
-------------------------------------------------------------------------
|
||||
To understand this, you need to know a bit of background information on
|
||||
Here is a bit of background information on
|
||||
the cadence of Linux development. Each new release starts off with a
|
||||
two week "merge window" where the main maintainers feed their new stuff
|
||||
to Linus for merging into the mainline tree. After the two weeks, the
|
||||
|
@ -69,9 +48,33 @@ rc2 is released. This repeats on a roughly weekly basis until rc7
|
|||
state of churn), and a week after the last vX.Y-rcN was done, the
|
||||
official vX.Y is released.
|
||||
|
||||
Relating that to netdev: At the beginning of the 2-week merge window,
|
||||
the ``net-next`` tree will be closed - no new changes/features. The
|
||||
accumulated new content of the past ~10 weeks will be passed onto
|
||||
To find out where we are now in the cycle - load the mainline (Linus)
|
||||
page here:
|
||||
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
|
||||
|
||||
and note the top of the "tags" section. If it is rc1, it is early in
|
||||
the dev cycle. If it was tagged rc7 a week ago, then a release is
|
||||
probably imminent. If the most recent tag is a final release tag
|
||||
(without an ``-rcN`` suffix) - we are most likely in a merge window
|
||||
and ``net-next`` is closed.
|
||||
|
||||
git trees and patch flow
|
||||
------------------------
|
||||
|
||||
There are two networking trees (git repositories) in play. Both are
|
||||
driven by David Miller, the main network maintainer. There is the
|
||||
``net`` tree, and the ``net-next`` tree. As you can probably guess from
|
||||
the names, the ``net`` tree is for fixes to existing code already in the
|
||||
mainline tree from Linus, and ``net-next`` is where the new code goes
|
||||
for the future release. You can find the trees here:
|
||||
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
|
||||
Relating that to kernel development: At the beginning of the 2-week
|
||||
merge window, the ``net-next`` tree will be closed - no new changes/features.
|
||||
The accumulated new content of the past ~10 weeks will be passed onto
|
||||
mainline/Linus via a pull request for vX.Y -- at the same time, the
|
||||
``net`` tree will start accumulating fixes for this pulled content
|
||||
relating to vX.Y
|
||||
|
@ -103,22 +106,14 @@ focus for ``net`` is on stabilization and bug fixes.
|
|||
|
||||
Finally, the vX.Y gets released, and the whole cycle starts over.
|
||||
|
||||
So where are we now in this cycle?
|
||||
----------------------------------
|
||||
netdev patch review
|
||||
-------------------
|
||||
|
||||
Load the mainline (Linus) page here:
|
||||
Patch status
|
||||
~~~~~~~~~~~~
|
||||
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
|
||||
|
||||
and note the top of the "tags" section. If it is rc1, it is early in
|
||||
the dev cycle. If it was tagged rc7 a week ago, then a release is
|
||||
probably imminent. If the most recent tag is a final release tag
|
||||
(without an ``-rcN`` suffix) - we are most likely in a merge window
|
||||
and ``net-next`` is closed.
|
||||
|
||||
How can I tell the status of a patch I've sent?
|
||||
-----------------------------------------------
|
||||
Start by looking at the main patchworks queue for netdev:
|
||||
Status of a patch can be checked by looking at the main patchwork
|
||||
queue for netdev:
|
||||
|
||||
https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
|
||||
|
@ -127,8 +122,20 @@ patch. Patches are indexed by the ``Message-ID`` header of the emails
|
|||
which carried them so if you have trouble finding your patch append
|
||||
the value of ``Message-ID`` to the URL above.
|
||||
|
||||
How long before my patch is accepted?
|
||||
-------------------------------------
|
||||
Updating patch status
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
It may be tempting to help the maintainers and update the state of your
|
||||
own patches when you post a new version or spot a bug. Please **do not**
|
||||
do that.
|
||||
Interfering with the patch status on patchwork will only cause confusion. Leave
|
||||
it to the maintainer to figure out what is the most recent and current
|
||||
version that should be applied. If there is any doubt, the maintainer
|
||||
will reply and ask what should be done.
|
||||
|
||||
Review timelines
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Generally speaking, the patches get triaged quickly (in less than
|
||||
48h). But be patient, if your patch is active in patchwork (i.e. it's
|
||||
listed on the project's patch list) the chances it was missed are close to zero.
|
||||
|
@ -136,116 +143,47 @@ Asking the maintainer for status updates on your
|
|||
patch is a good way to ensure your patch is ignored or pushed to the
|
||||
bottom of the priority list.
|
||||
|
||||
Should I directly update patchwork state of my own patches?
|
||||
-----------------------------------------------------------
|
||||
It may be tempting to help the maintainers and update the state of your
|
||||
own patches when you post a new version or spot a bug. Please do not do that.
|
||||
Interfering with the patch status on patchwork will only cause confusion. Leave
|
||||
it to the maintainer to figure out what is the most recent and current
|
||||
version that should be applied. If there is any doubt, the maintainer
|
||||
will reply and ask what should be done.
|
||||
Partial resends
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
How do I divide my work into patches?
|
||||
-------------------------------------
|
||||
|
||||
Put yourself in the shoes of the reviewer. Each patch is read separately
|
||||
and therefore should constitute a comprehensible step towards your stated
|
||||
goal.
|
||||
|
||||
Avoid sending series longer than 15 patches. Larger series takes longer
|
||||
to review as reviewers will defer looking at it until they find a large
|
||||
chunk of time. A small series can be reviewed in a short time, so Maintainers
|
||||
just do it. As a result, a sequence of smaller series gets merged quicker and
|
||||
with better review coverage. Re-posting large series also increases the mailing
|
||||
list traffic.
|
||||
|
||||
I made changes to only a few patches in a patch series should I resend only those changed?
|
||||
------------------------------------------------------------------------------------------
|
||||
No, please resend the entire patch series and make sure you do number your
|
||||
Please always resend the entire patch series and make sure you do number your
|
||||
patches such that it is clear this is the latest and greatest set of patches
|
||||
that can be applied.
|
||||
that can be applied. Do not try to resend just the patches which changed.
|
||||
|
||||
I have received review feedback, when should I post a revised version of the patches?
|
||||
-------------------------------------------------------------------------------------
|
||||
Allow at least 24 hours to pass between postings. This will ensure reviewers
|
||||
from all geographical locations have a chance to chime in. Do not wait
|
||||
too long (weeks) between postings either as it will make it harder for reviewers
|
||||
to recall all the context.
|
||||
Handling misapplied patches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Make sure you address all the feedback in your new posting. Do not post a new
|
||||
version of the code if the discussion about the previous version is still
|
||||
ongoing, unless directly instructed by a reviewer.
|
||||
|
||||
I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
|
||||
----------------------------------------------------------------------------------------------------------------------------------------
|
||||
Occasionally a patch series gets applied before receiving critical feedback,
|
||||
or the wrong version of a series gets applied.
|
||||
There is no revert possible, once it is pushed out, it stays like that.
|
||||
Please send incremental versions on top of what has been merged in order to fix
|
||||
the patches the way they would look like if your latest patch series was to be
|
||||
merged.
|
||||
|
||||
Are there special rules regarding stable submissions on netdev?
|
||||
---------------------------------------------------------------
|
||||
Stable tree
|
||||
~~~~~~~~~~~
|
||||
|
||||
While it used to be the case that netdev submissions were not supposed
|
||||
to carry explicit ``CC: stable@vger.kernel.org`` tags that is no longer
|
||||
the case today. Please follow the standard stable rules in
|
||||
:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`,
|
||||
and make sure you include appropriate Fixes tags!
|
||||
|
||||
Is the comment style convention different for the networking content?
|
||||
---------------------------------------------------------------------
|
||||
Yes, in a largely trivial way. Instead of this::
|
||||
Security fixes
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
/*
|
||||
* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
it is requested that you make it look like this::
|
||||
|
||||
/* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
What is "reverse xmas tree"?
|
||||
----------------------------
|
||||
|
||||
Netdev has a convention for ordering local variables in functions.
|
||||
Order the variable declaration lines longest to shortest, e.g.::
|
||||
|
||||
struct scatterlist *sg;
|
||||
struct sk_buff *skb;
|
||||
int err, i;
|
||||
|
||||
If there are dependencies between the variables preventing the ordering
|
||||
move the initialization out of line.
|
||||
|
||||
I am working in existing code which uses non-standard formatting. Which formatting should I use?
|
||||
------------------------------------------------------------------------------------------------
|
||||
Make your code follow the most recent guidelines, so that eventually all code
|
||||
in the domain of netdev is in the preferred format.
|
||||
|
||||
I found a bug that might have possible security implications or similar. Should I mail the main netdev maintainer off-list?
|
||||
---------------------------------------------------------------------------------------------------------------------------
|
||||
No. The current netdev maintainer has consistently requested that
|
||||
Do not email netdev maintainers directly if you think you discovered
|
||||
a bug that might have possible security implications.
|
||||
The current netdev maintainer has consistently requested that
|
||||
people use the mailing lists and not reach out directly. If you aren't
|
||||
OK with that, then perhaps consider mailing security@kernel.org or
|
||||
reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
|
||||
as possible alternative mechanisms.
|
||||
|
||||
What level of testing is expected before I submit my change?
|
||||
------------------------------------------------------------
|
||||
At the very minimum your changes must survive an ``allyesconfig`` and an
|
||||
``allmodconfig`` build with ``W=1`` set without new warnings or failures.
|
||||
|
||||
Ideally you will have done run-time testing specific to your change,
|
||||
and the patch series contains a set of kernel selftest for
|
||||
``tools/testing/selftests/net`` or using the KUnit framework.
|
||||
Co-posting changes to user space components
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You are expected to test your changes on top of the relevant networking
|
||||
tree (``net`` or ``net-next``) and not e.g. a stable tree or ``linux-next``.
|
||||
|
||||
How do I post corresponding changes to user space components?
|
||||
-------------------------------------------------------------
|
||||
User space code exercising kernel features should be posted
|
||||
alongside kernel patches. This gives reviewers a chance to see
|
||||
how any new interface is used and how well it works.
|
||||
|
@ -270,42 +208,10 @@ to the mailing list, e.g.::
|
|||
Posting as one thread is discouraged because it confuses patchwork
|
||||
(as of patchwork 2.2.2).
|
||||
|
||||
Can I reproduce the checks from patchwork on my local machine?
|
||||
--------------------------------------------------------------
|
||||
Preparing changes
|
||||
-----------------
|
||||
|
||||
Checks in patchwork are mostly simple wrappers around existing kernel
|
||||
scripts, the sources are available at:
|
||||
|
||||
https://github.com/kuba-moo/nipa/tree/master/tests
|
||||
|
||||
Running all the builds and checks locally is a pain, can I post my patches and have the patchwork bot validate them?
|
||||
--------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
No, you must ensure that your patches are ready by testing them locally
|
||||
before posting to the mailing list. The patchwork build bot instance
|
||||
gets overloaded very easily and netdev@vger really doesn't need more
|
||||
traffic if we can help it.
|
||||
|
||||
netdevsim is great, can I extend it for my out-of-tree tests?
|
||||
-------------------------------------------------------------
|
||||
|
||||
No, ``netdevsim`` is a test vehicle solely for upstream tests.
|
||||
(Please add your tests under ``tools/testing/selftests/``.)
|
||||
|
||||
We also give no guarantees that ``netdevsim`` won't change in the future
|
||||
in a way which would break what would normally be considered uAPI.
|
||||
|
||||
Is netdevsim considered a "user" of an API?
|
||||
-------------------------------------------
|
||||
|
||||
Linux kernel has a long standing rule that no API should be added unless
|
||||
it has a real, in-tree user. Mock-ups and tests based on ``netdevsim`` are
|
||||
strongly encouraged when adding new APIs, but ``netdevsim`` in itself
|
||||
is **not** considered a use case/user.
|
||||
|
||||
Any other tips to help ensure my net/net-next patch gets OK'd?
|
||||
--------------------------------------------------------------
|
||||
Attention to detail. Re-read your own work as if you were the
|
||||
Attention to detail is important. Re-read your own work as if you were the
|
||||
reviewer. You can start with using ``checkpatch.pl``, perhaps even with
|
||||
the ``--strict`` flag. But do not be mindlessly robotic in doing so.
|
||||
If your change is a bug fix, make sure your commit log indicates the
|
||||
|
@ -320,10 +226,133 @@ Finally, go back and read
|
|||
:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
|
||||
to be sure you are not repeating some common mistake documented there.
|
||||
|
||||
My company uses peer feedback in employee performance reviews. Can I ask netdev maintainers for feedback?
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
Indicating target tree
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Yes, especially if you spend significant amount of time reviewing code
|
||||
To help maintainers and CI bots you should explicitly mark which tree
|
||||
your patch is targeting. Assuming that you use git, use the prefix
|
||||
flag::
|
||||
|
||||
git format-patch --subject-prefix='PATCH net-next' start..finish
|
||||
|
||||
Use ``net`` instead of ``net-next`` (always lower case) in the above for
|
||||
bug-fix ``net`` content.
|
||||
|
||||
Dividing work into patches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Put yourself in the shoes of the reviewer. Each patch is read separately
|
||||
and therefore should constitute a comprehensible step towards your stated
|
||||
goal.
|
||||
|
||||
Avoid sending series longer than 15 patches. Larger series takes longer
|
||||
to review as reviewers will defer looking at it until they find a large
|
||||
chunk of time. A small series can be reviewed in a short time, so Maintainers
|
||||
just do it. As a result, a sequence of smaller series gets merged quicker and
|
||||
with better review coverage. Re-posting large series also increases the mailing
|
||||
list traffic.
|
||||
|
||||
Multi-line comments
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Comment style convention is slightly different for networking and most of
|
||||
the tree. Instead of this::
|
||||
|
||||
/*
|
||||
* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
it is requested that you make it look like this::
|
||||
|
||||
/* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
Local variable ordering ("reverse xmas tree", "RCS")
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Netdev has a convention for ordering local variables in functions.
|
||||
Order the variable declaration lines longest to shortest, e.g.::
|
||||
|
||||
struct scatterlist *sg;
|
||||
struct sk_buff *skb;
|
||||
int err, i;
|
||||
|
||||
If there are dependencies between the variables preventing the ordering
|
||||
move the initialization out of line.
|
||||
|
||||
Format precedence
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
When working in existing code which uses nonstandard formatting make
|
||||
your code follow the most recent guidelines, so that eventually all code
|
||||
in the domain of netdev is in the preferred format.
|
||||
|
||||
Resending after review
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Allow at least 24 hours to pass between postings. This will ensure reviewers
|
||||
from all geographical locations have a chance to chime in. Do not wait
|
||||
too long (weeks) between postings either as it will make it harder for reviewers
|
||||
to recall all the context.
|
||||
|
||||
Make sure you address all the feedback in your new posting. Do not post a new
|
||||
version of the code if the discussion about the previous version is still
|
||||
ongoing, unless directly instructed by a reviewer.
|
||||
|
||||
Testing
|
||||
-------
|
||||
|
||||
Expected level of testing
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
At the very minimum your changes must survive an ``allyesconfig`` and an
|
||||
``allmodconfig`` build with ``W=1`` set without new warnings or failures.
|
||||
|
||||
Ideally you will have done run-time testing specific to your change,
|
||||
and the patch series contains a set of kernel selftest for
|
||||
``tools/testing/selftests/net`` or using the KUnit framework.
|
||||
|
||||
You are expected to test your changes on top of the relevant networking
|
||||
tree (``net`` or ``net-next``) and not e.g. a stable tree or ``linux-next``.
|
||||
|
||||
patchwork checks
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Checks in patchwork are mostly simple wrappers around existing kernel
|
||||
scripts, the sources are available at:
|
||||
|
||||
https://github.com/kuba-moo/nipa/tree/master/tests
|
||||
|
||||
**Do not** post your patches just to run them through the checks.
|
||||
You must ensure that your patches are ready by testing them locally
|
||||
before posting to the mailing list. The patchwork build bot instance
|
||||
gets overloaded very easily and netdev@vger really doesn't need more
|
||||
traffic if we can help it.
|
||||
|
||||
netdevsim
|
||||
~~~~~~~~~
|
||||
|
||||
``netdevsim`` is a test driver which can be used to exercise driver
|
||||
configuration APIs without requiring capable hardware.
|
||||
Mock-ups and tests based on ``netdevsim`` are strongly encouraged when
|
||||
adding new APIs, but ``netdevsim`` in itself is **not** considered
|
||||
a use case/user. You must also implement the new APIs in a real driver.
|
||||
|
||||
We give no guarantees that ``netdevsim`` won't change in the future
|
||||
in a way which would break what would normally be considered uAPI.
|
||||
|
||||
``netdevsim`` is reserved for use by upstream tests only, so any
|
||||
new ``netdevsim`` features must be accompanied by selftests under
|
||||
``tools/testing/selftests/``.
|
||||
|
||||
Testimonials / feedback
|
||||
-----------------------
|
||||
|
||||
Some companies use peer feedback in employee performance reviews.
|
||||
Please feel free to request feedback from netdev maintainers,
|
||||
especially if you spend significant amount of time reviewing code
|
||||
and go out of your way to improve shared infrastructure.
|
||||
|
||||
The feedback must be requested by you, the contributor, and will always
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
import os
|
||||
import sys
|
||||
from sphinx.util.pycompat import execfile_
|
||||
from sphinx.util.osutil import fs_encoding
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def loadConfig(namespace):
|
||||
|
@ -48,7 +48,9 @@ def loadConfig(namespace):
|
|||
sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
|
||||
config = namespace.copy()
|
||||
config['__file__'] = config_file
|
||||
execfile_(config_file, config)
|
||||
with open(config_file, 'rb') as f:
|
||||
code = compile(f.read(), fs_encoding, 'exec')
|
||||
exec(code, config)
|
||||
del config['__file__']
|
||||
namespace.update(config)
|
||||
else:
|
||||
|
|
|
@ -1354,6 +1354,14 @@ the memory region are automatically reflected into the guest. For example, an
|
|||
mmap() that affects the region will be made visible immediately. Another
|
||||
example is madvise(MADV_DROP).
|
||||
|
||||
Note: On arm64, a write generated by the page-table walker (to update
|
||||
the Access and Dirty flags, for example) never results in a
|
||||
KVM_EXIT_MMIO exit when the slot has the KVM_MEM_READONLY flag. This
|
||||
is because KVM cannot provide the data that would be written by the
|
||||
page-table walker, making it impossible to emulate the access.
|
||||
Instead, an abort (data abort if the cause of the page-table update
|
||||
was a load or a store, instruction abort if it was an instruction
|
||||
fetch) is injected in the guest.
|
||||
|
||||
4.36 KVM_SET_TSS_ADDR
|
||||
---------------------
|
||||
|
@ -5343,9 +5351,9 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
|
|||
32 vCPUs in the shared_info page, KVM does not automatically do so
|
||||
and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used
|
||||
explicitly even when the vcpu_info for a given vCPU resides at the
|
||||
"default" location in the shared_info page. This is because KVM is
|
||||
not aware of the Xen CPU id which is used as the index into the
|
||||
vcpu_info[] array, so cannot know the correct default location.
|
||||
"default" location in the shared_info page. This is because KVM may
|
||||
not be aware of the Xen CPU id which is used as the index into the
|
||||
vcpu_info[] array, so may know the correct default location.
|
||||
|
||||
Note that the shared info page may be constantly written to by KVM;
|
||||
it contains the event channel bitmap used to deliver interrupts to
|
||||
|
@ -5356,23 +5364,29 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
|
|||
any vCPU has been running or any event channel interrupts can be
|
||||
routed to the guest.
|
||||
|
||||
Setting the gfn to KVM_XEN_INVALID_GFN will disable the shared info
|
||||
page.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
|
||||
Sets the exception vector used to deliver Xen event channel upcalls.
|
||||
This is the HVM-wide vector injected directly by the hypervisor
|
||||
(not through the local APIC), typically configured by a guest via
|
||||
HVM_PARAM_CALLBACK_IRQ.
|
||||
HVM_PARAM_CALLBACK_IRQ. This can be disabled again (e.g. for guest
|
||||
SHUTDOWN_soft_reset) by setting it to zero.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_EVTCHN
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It configures
|
||||
an outbound port number for interception of EVTCHNOP_send requests
|
||||
from the guest. A given sending port number may be directed back
|
||||
to a specified vCPU (by APIC ID) / port / priority on the guest,
|
||||
or to trigger events on an eventfd. The vCPU and priority can be
|
||||
changed by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call,
|
||||
but other fields cannot change for a given sending port. A port
|
||||
mapping is removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags
|
||||
field.
|
||||
from the guest. A given sending port number may be directed back to
|
||||
a specified vCPU (by APIC ID) / port / priority on the guest, or to
|
||||
trigger events on an eventfd. The vCPU and priority can be changed
|
||||
by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call, but but other
|
||||
fields cannot change for a given sending port. A port mapping is
|
||||
removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags field. Passing
|
||||
KVM_XEN_EVTCHN_RESET in the flags field removes all interception of
|
||||
outbound event channels. The values of the flags field are mutually
|
||||
exclusive and cannot be combined as a bitmask.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_XEN_VERSION
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
|
@ -5388,7 +5402,7 @@ KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG
|
|||
support for KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG. It enables the
|
||||
XEN_RUNSTATE_UPDATE flag which allows guest vCPUs to safely read
|
||||
other vCPUs' vcpu_runstate_info. Xen guests enable this feature via
|
||||
the VM_ASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist
|
||||
the VMASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist
|
||||
hypercall.
|
||||
|
||||
4.127 KVM_XEN_HVM_GET_ATTR
|
||||
|
@ -5446,15 +5460,18 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
|
|||
As with the shared_info page for the VM, the corresponding page may be
|
||||
dirtied at any time if event channel interrupt delivery is enabled, so
|
||||
userspace should always assume that the page is dirty without relying
|
||||
on dirty logging.
|
||||
on dirty logging. Setting the gpa to KVM_XEN_INVALID_GPA will disable
|
||||
the vcpu_info.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
|
||||
Sets the guest physical address of an additional pvclock structure
|
||||
for a given vCPU. This is typically used for guest vsyscall support.
|
||||
Setting the gpa to KVM_XEN_INVALID_GPA will disable the structure.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR
|
||||
Sets the guest physical address of the vcpu_runstate_info for a given
|
||||
vCPU. This is how a Xen guest tracks CPU state such as steal time.
|
||||
Setting the gpa to KVM_XEN_INVALID_GPA will disable the runstate area.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT
|
||||
Sets the runstate (RUNSTATE_running/_runnable/_blocked/_offline) of
|
||||
|
@ -5487,7 +5504,8 @@ KVM_XEN_VCPU_ATTR_TYPE_TIMER
|
|||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the
|
||||
event channel port/priority for the VIRQ_TIMER of the vCPU, as well
|
||||
as allowing a pending timer to be saved/restored.
|
||||
as allowing a pending timer to be saved/restored. Setting the timer
|
||||
port to zero disables kernel handling of the singleshot timer.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
|
@ -5495,7 +5513,8 @@ KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR
|
|||
per-vCPU local APIC upcall vector, configured by a Xen guest with
|
||||
the HVMOP_set_evtchn_upcall_vector hypercall. This is typically
|
||||
used by Windows guests, and is distinct from the HVM-wide upcall
|
||||
vector configured with HVM_PARAM_CALLBACK_IRQ.
|
||||
vector configured with HVM_PARAM_CALLBACK_IRQ. It is disabled by
|
||||
setting the vector to zero.
|
||||
|
||||
|
||||
4.129 KVM_XEN_VCPU_GET_ATTR
|
||||
|
@ -6577,11 +6596,6 @@ Please note that the kernel is allowed to use the kvm_run structure as the
|
|||
primary storage for certain register types. Therefore, the kernel may use the
|
||||
values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set.
|
||||
|
||||
::
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
6. Capabilities that can be enabled on vCPUs
|
||||
============================================
|
||||
|
@ -8304,6 +8318,20 @@ CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``
|
|||
It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
|
||||
has enabled in-kernel emulation of the local APIC.
|
||||
|
||||
CPU topology
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Several CPUID values include topology information for the host CPU:
|
||||
0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems. Different
|
||||
versions of KVM return different values for this information and userspace
|
||||
should not rely on it. Currently they return all zeroes.
|
||||
|
||||
If userspace wishes to set up a guest topology, it should be careful that
|
||||
the values of these three leaves differ for each CPU. In particular,
|
||||
the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX
|
||||
for 0x8000001e; the latter also encodes the core id and node id in bits
|
||||
7:0 of EBX and ECX respectively.
|
||||
|
||||
Obsolete ioctls and capabilities
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
|
|
@ -16,20 +16,30 @@ The acquisition orders for mutexes are as follows:
|
|||
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
|
||||
them together is quite rare.
|
||||
|
||||
- Unlike kvm->slots_lock, kvm->slots_arch_lock is released before
|
||||
synchronize_srcu(&kvm->srcu). Therefore kvm->slots_arch_lock
|
||||
can be taken inside a kvm->srcu read-side critical section,
|
||||
while kvm->slots_lock cannot.
|
||||
|
||||
- kvm->mn_active_invalidate_count ensures that pairs of
|
||||
invalidate_range_start() and invalidate_range_end() callbacks
|
||||
use the same memslots array. kvm->slots_lock and kvm->slots_arch_lock
|
||||
are taken on the waiting side in install_new_memslots, so MMU notifiers
|
||||
must not take either kvm->slots_lock or kvm->slots_arch_lock.
|
||||
|
||||
For SRCU:
|
||||
|
||||
- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
|
||||
for kvm->lock, vcpu->mutex and kvm->slots_lock. These locks _cannot_
|
||||
be taken inside a kvm->srcu read-side critical section; that is, the
|
||||
following is broken::
|
||||
|
||||
srcu_read_lock(&kvm->srcu);
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
- kvm->slots_arch_lock instead is released before the call to
|
||||
``synchronize_srcu()``. It _can_ therefore be taken inside a
|
||||
kvm->srcu read-side critical section, for example while processing
|
||||
a vmexit.
|
||||
|
||||
On x86:
|
||||
|
||||
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
|
||||
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock and kvm->arch.xen.xen_lock
|
||||
|
||||
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
|
||||
kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
|
||||
|
|
29
MAINTAINERS
29
MAINTAINERS
|
@ -11356,9 +11356,9 @@ F: virt/kvm/*
|
|||
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
|
||||
M: Marc Zyngier <maz@kernel.org>
|
||||
R: James Morse <james.morse@arm.com>
|
||||
R: Alexandru Elisei <alexandru.elisei@arm.com>
|
||||
R: Suzuki K Poulose <suzuki.poulose@arm.com>
|
||||
R: Oliver Upton <oliver.upton@linux.dev>
|
||||
R: Zenghui Yu <yuzenghui@huawei.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: kvmarm@lists.linux.dev
|
||||
L: kvmarm@lists.cs.columbia.edu (deprecated, moderated for non-subscribers)
|
||||
|
@ -11468,7 +11468,7 @@ F: arch/x86/kvm/hyperv.*
|
|||
F: arch/x86/kvm/kvm_onhyperv.*
|
||||
F: arch/x86/kvm/svm/hyperv.*
|
||||
F: arch/x86/kvm/svm/svm_onhyperv.*
|
||||
F: arch/x86/kvm/vmx/evmcs.*
|
||||
F: arch/x86/kvm/vmx/hyperv.*
|
||||
|
||||
KVM X86 Xen (KVM/Xen)
|
||||
M: David Woodhouse <dwmw2@infradead.org>
|
||||
|
@ -13620,7 +13620,7 @@ F: arch/microblaze/
|
|||
|
||||
MICROCHIP AT91 DMA DRIVERS
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -13665,7 +13665,7 @@ F: Documentation/devicetree/bindings/media/microchip,csi2dc.yaml
|
|||
F: drivers/media/platform/microchip/microchip-csi2dc.c
|
||||
|
||||
MICROCHIP ECC DRIVER
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/crypto/atmel-ecc.*
|
||||
|
@ -13762,7 +13762,7 @@ S: Maintained
|
|||
F: drivers/mmc/host/atmel-mci.c
|
||||
|
||||
MICROCHIP NAND DRIVER
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/mtd/atmel-nand.txt
|
||||
|
@ -13814,7 +13814,7 @@ S: Supported
|
|||
F: drivers/power/reset/at91-sama5d2_shdwc.c
|
||||
|
||||
MICROCHIP SPI DRIVER
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
S: Supported
|
||||
F: drivers/spi/spi-atmel.*
|
||||
|
||||
|
@ -14916,9 +14916,11 @@ L: linux-nvme@lists.infradead.org
|
|||
S: Supported
|
||||
W: http://git.infradead.org/nvme.git
|
||||
T: git://git.infradead.org/nvme.git
|
||||
F: Documentation/nvme/
|
||||
F: drivers/nvme/host/
|
||||
F: drivers/nvme/common/
|
||||
F: include/linux/nvme*
|
||||
F: include/linux/nvme.h
|
||||
F: include/linux/nvme-*.h
|
||||
F: include/uapi/linux/nvme_ioctl.h
|
||||
|
||||
NVM EXPRESS FABRICS AUTHENTICATION
|
||||
|
@ -16609,6 +16611,13 @@ S: Supported
|
|||
F: Documentation/devicetree/bindings/input/pine64,pinephone-keyboard.yaml
|
||||
F: drivers/input/keyboard/pinephone-keyboard.c
|
||||
|
||||
PKTCDVD DRIVER
|
||||
M: linux-block@vger.kernel.org
|
||||
S: Orphan
|
||||
F: drivers/block/pktcdvd.c
|
||||
F: include/linux/pktcdvd.h
|
||||
F: include/uapi/linux/pktcdvd.h
|
||||
|
||||
PLANTOWER PMS7003 AIR POLLUTION SENSOR DRIVER
|
||||
M: Tomasz Duszynski <tduszyns@gmail.com>
|
||||
S: Maintained
|
||||
|
@ -19664,7 +19673,7 @@ F: drivers/clk/spear/
|
|||
F: drivers/pinctrl/spear/
|
||||
|
||||
SPI NOR SUBSYSTEM
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
M: Pratyush Yadav <pratyush@kernel.org>
|
||||
R: Michael Walle <michael@walle.cc>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
|
@ -22245,7 +22254,9 @@ F: drivers/scsi/vmw_pvscsi.c
|
|||
F: drivers/scsi/vmw_pvscsi.h
|
||||
|
||||
VMWARE VIRTUAL PTP CLOCK DRIVER
|
||||
M: Vivek Thampi <vithampi@vmware.com>
|
||||
M: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
|
||||
M: Deep Shah <sdeep@vmware.com>
|
||||
R: Alexey Makhalov <amakhalov@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
6
Makefile
6
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -297,7 +297,7 @@ no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
|
|||
headers_install modules_install kernelrelease image_name
|
||||
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
|
||||
image_name
|
||||
single-targets := %.a %.i %.rsi %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
|
||||
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.rsi %.s %.symtypes %/
|
||||
|
||||
config-build :=
|
||||
mixed-build :=
|
||||
|
@ -1986,7 +1986,7 @@ $(single-no-ko): $(build-dir)
|
|||
# Remove MODORDER when done because it is not the real one.
|
||||
PHONY += single_modules
|
||||
single_modules: $(single-no-ko) modules_prepare
|
||||
$(Q){ $(foreach m, $(single-ko), echo $(extmod_prefix)$m;) } > $(MODORDER)
|
||||
$(Q){ $(foreach m, $(single-ko), echo $(extmod_prefix)$(m:%.ko=%.o);) } > $(MODORDER)
|
||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
|
||||
ifneq ($(KBUILD_MODPOST_NOFINAL),1)
|
||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modfinal
|
||||
|
|
|
@ -128,15 +128,16 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
|||
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
|
||||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_UPROBE 3 /* breakpointed or singlestepping */
|
||||
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
|
||||
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
|
||||
#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
|
||||
#define TIF_NOTIFY_SIGNAL 8 /* signal notifications exist */
|
||||
#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
|
||||
|
||||
#define TIF_USING_IWMMXT 17
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
#define TIF_RESTORE_SIGMASK 19
|
||||
#define TIF_SYSCALL_TRACE 20 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */
|
||||
#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */
|
||||
#define TIF_SECCOMP 23 /* seccomp syscall filtering active */
|
||||
|
||||
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
|
|
|
@ -184,8 +184,6 @@ config ARM64
|
|||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
|
||||
if $(cc-option,-fpatchable-function-entry=2)
|
||||
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
|
||||
if DYNAMIC_FTRACE_WITH_ARGS
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
|
@ -972,6 +970,22 @@ config ARM64_ERRATUM_2457168
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2645198
|
||||
bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption"
|
||||
default y
|
||||
help
|
||||
This option adds the workaround for ARM Cortex-A715 erratum 2645198.
|
||||
|
||||
If a Cortex-A715 cpu sees a page mapping permissions change from executable
|
||||
to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the
|
||||
next instruction abort caused by permission fault.
|
||||
|
||||
Only user-space does executable to non-executable permission transition via
|
||||
mprotect() system call. Workaround the problem by doing a break-before-make
|
||||
TLB invalidation, for all changes to executable user space mappings.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/assembler.h>
|
||||
#include "sm4-ce-asm.h"
|
||||
|
||||
|
@ -104,7 +105,7 @@ SYM_FUNC_START(sm4_ce_ccm_final)
|
|||
SYM_FUNC_END(sm4_ce_ccm_final)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_ccm_enc)
|
||||
SYM_TYPED_FUNC_START(sm4_ce_ccm_enc)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
|
@ -216,7 +217,7 @@ SYM_FUNC_START(sm4_ce_ccm_enc)
|
|||
SYM_FUNC_END(sm4_ce_ccm_enc)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_ccm_dec)
|
||||
SYM_TYPED_FUNC_START(sm4_ce_ccm_dec)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/assembler.h>
|
||||
#include "sm4-ce-asm.h"
|
||||
|
||||
|
@ -370,7 +371,7 @@ SYM_FUNC_START(pmull_ghash_update)
|
|||
SYM_FUNC_END(pmull_ghash_update)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_pmull_gcm_enc)
|
||||
SYM_TYPED_FUNC_START(sm4_ce_pmull_gcm_enc)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
|
@ -581,7 +582,7 @@ SYM_FUNC_END(sm4_ce_pmull_gcm_enc)
|
|||
#define RH3 v20
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_pmull_gcm_dec)
|
||||
SYM_TYPED_FUNC_START(sm4_ce_pmull_gcm_dec)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
|
|
|
@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
|
|||
" cbnz %w0, 1b\n" \
|
||||
" " #mb "\n" \
|
||||
"2:" \
|
||||
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
|
||||
: "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \
|
||||
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
|
||||
: cl); \
|
||||
\
|
||||
|
|
|
@ -311,7 +311,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
|
|||
" eor %[old2], %[old2], %[oldval2]\n" \
|
||||
" orr %[old1], %[old1], %[old2]" \
|
||||
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
|
||||
[v] "+Q" (*(unsigned long *)ptr) \
|
||||
[v] "+Q" (*(__uint128_t *)ptr) \
|
||||
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
|
||||
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
|
||||
: cl); \
|
||||
|
|
|
@ -124,6 +124,8 @@
|
|||
#define APPLE_CPU_PART_M1_FIRESTORM_PRO 0x025
|
||||
#define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
|
||||
#define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029
|
||||
#define APPLE_CPU_PART_M2_BLIZZARD 0x032
|
||||
#define APPLE_CPU_PART_M2_AVALANCHE 0x033
|
||||
|
||||
#define AMPERE_CPU_PART_AMPERE1 0xAC3
|
||||
|
||||
|
@ -177,6 +179,8 @@
|
|||
#define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
|
||||
#define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
|
||||
#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
|
||||
#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
|
||||
#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
|
||||
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
|
||||
|
||||
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
|
||||
|
|
|
@ -114,6 +114,15 @@
|
|||
#define ESR_ELx_FSC_ACCESS (0x08)
|
||||
#define ESR_ELx_FSC_FAULT (0x04)
|
||||
#define ESR_ELx_FSC_PERM (0x0C)
|
||||
#define ESR_ELx_FSC_SEA_TTW0 (0x14)
|
||||
#define ESR_ELx_FSC_SEA_TTW1 (0x15)
|
||||
#define ESR_ELx_FSC_SEA_TTW2 (0x16)
|
||||
#define ESR_ELx_FSC_SEA_TTW3 (0x17)
|
||||
#define ESR_ELx_FSC_SECC (0x18)
|
||||
#define ESR_ELx_FSC_SECC_TTW0 (0x1c)
|
||||
#define ESR_ELx_FSC_SECC_TTW1 (0x1d)
|
||||
#define ESR_ELx_FSC_SECC_TTW2 (0x1e)
|
||||
#define ESR_ELx_FSC_SECC_TTW3 (0x1f)
|
||||
|
||||
/* ISS field definitions for Data Aborts */
|
||||
#define ESR_ELx_ISV_SHIFT (24)
|
||||
|
|
|
@ -49,6 +49,15 @@ extern pte_t huge_ptep_get(pte_t *ptep);
|
|||
|
||||
void __init arm64_hugetlb_cma_reserve(void);
|
||||
|
||||
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
|
||||
extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
||||
#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
|
||||
extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t new_pte);
|
||||
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
#endif /* __ASM_HUGETLB_H */
|
||||
|
|
|
@ -319,21 +319,6 @@
|
|||
BIT(18) | \
|
||||
GENMASK(16, 15))
|
||||
|
||||
/* For compatibility with fault code shared with 32-bit */
|
||||
#define FSC_FAULT ESR_ELx_FSC_FAULT
|
||||
#define FSC_ACCESS ESR_ELx_FSC_ACCESS
|
||||
#define FSC_PERM ESR_ELx_FSC_PERM
|
||||
#define FSC_SEA ESR_ELx_FSC_EXTABT
|
||||
#define FSC_SEA_TTW0 (0x14)
|
||||
#define FSC_SEA_TTW1 (0x15)
|
||||
#define FSC_SEA_TTW2 (0x16)
|
||||
#define FSC_SEA_TTW3 (0x17)
|
||||
#define FSC_SECC (0x18)
|
||||
#define FSC_SECC_TTW0 (0x1c)
|
||||
#define FSC_SECC_TTW1 (0x1d)
|
||||
#define FSC_SECC_TTW2 (0x1e)
|
||||
#define FSC_SECC_TTW3 (0x1f)
|
||||
|
||||
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
|
||||
#define HPFAR_MASK (~UL(0xf))
|
||||
/*
|
||||
|
|
|
@ -349,16 +349,16 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *v
|
|||
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (kvm_vcpu_trap_get_fault(vcpu)) {
|
||||
case FSC_SEA:
|
||||
case FSC_SEA_TTW0:
|
||||
case FSC_SEA_TTW1:
|
||||
case FSC_SEA_TTW2:
|
||||
case FSC_SEA_TTW3:
|
||||
case FSC_SECC:
|
||||
case FSC_SECC_TTW0:
|
||||
case FSC_SECC_TTW1:
|
||||
case FSC_SECC_TTW2:
|
||||
case FSC_SECC_TTW3:
|
||||
case ESR_ELx_FSC_EXTABT:
|
||||
case ESR_ELx_FSC_SEA_TTW0:
|
||||
case ESR_ELx_FSC_SEA_TTW1:
|
||||
case ESR_ELx_FSC_SEA_TTW2:
|
||||
case ESR_ELx_FSC_SEA_TTW3:
|
||||
case ESR_ELx_FSC_SECC:
|
||||
case ESR_ELx_FSC_SECC_TTW0:
|
||||
case ESR_ELx_FSC_SECC_TTW1:
|
||||
case ESR_ELx_FSC_SECC_TTW2:
|
||||
case ESR_ELx_FSC_SECC_TTW3:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
@ -373,8 +373,26 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
|||
|
||||
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_abt_iss1tw(vcpu))
|
||||
return true;
|
||||
if (kvm_vcpu_abt_iss1tw(vcpu)) {
|
||||
/*
|
||||
* Only a permission fault on a S1PTW should be
|
||||
* considered as a write. Otherwise, page tables baked
|
||||
* in a read-only memslot will result in an exception
|
||||
* being delivered in the guest.
|
||||
*
|
||||
* The drawback is that we end-up faulting twice if the
|
||||
* guest is using any of HW AF/DB: a translation fault
|
||||
* to map the page containing the PT (read only at
|
||||
* first), then a permission fault to allow the flags
|
||||
* to be set.
|
||||
*/
|
||||
switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
|
||||
case ESR_ELx_FSC_PERM:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
return false;
|
||||
|
|
|
@ -681,7 +681,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
|||
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
|
||||
#define pud_valid(pud) pte_valid(pud_pte(pud))
|
||||
#define pud_user(pud) pte_user(pud_pte(pud))
|
||||
|
||||
#define pud_user_exec(pud) pte_user_exec(pud_pte(pud))
|
||||
|
||||
static inline void set_pud(pud_t *pudp, pud_t pud)
|
||||
{
|
||||
|
@ -730,6 +730,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
|
|||
#else
|
||||
|
||||
#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
|
||||
#define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */
|
||||
|
||||
/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
|
||||
#define pmd_set_fixmap(addr) NULL
|
||||
|
@ -862,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
|
|||
|
||||
static inline bool pmd_user_accessible_page(pmd_t pmd)
|
||||
{
|
||||
return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
|
||||
return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
|
||||
}
|
||||
|
||||
static inline bool pud_user_accessible_page(pud_t pud)
|
||||
{
|
||||
return pud_leaf(pud) && pud_user(pud);
|
||||
return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1093,6 +1094,15 @@ static inline bool pud_sect_supported(void)
|
|||
}
|
||||
|
||||
|
||||
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
|
||||
#define ptep_modify_prot_start ptep_modify_prot_start
|
||||
extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
||||
#define ptep_modify_prot_commit ptep_modify_prot_commit
|
||||
extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t new_pte);
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_PGTABLE_H */
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
|
||||
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
|
||||
|
||||
typedef u32 uprobe_opcode_t;
|
||||
typedef __le32 uprobe_opcode_t;
|
||||
|
||||
struct arch_uprobe_task {
|
||||
};
|
||||
|
|
|
@ -661,6 +661,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2645198
|
||||
{
|
||||
.desc = "ARM erratum 2645198",
|
||||
.capability = ARM64_WORKAROUND_2645198,
|
||||
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2077057
|
||||
{
|
||||
.desc = "ARM erratum 2077057",
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
SYM_FUNC_START(__efi_rt_asm_wrapper)
|
||||
stp x29, x30, [sp, #-112]!
|
||||
|
|
|
@ -8,28 +8,27 @@
|
|||
#include <asm/cpufeature.h>
|
||||
#include <asm/mte.h>
|
||||
|
||||
#define for_each_mte_vma(vmi, vma) \
|
||||
#define for_each_mte_vma(cprm, i, m) \
|
||||
if (system_supports_mte()) \
|
||||
for_each_vma(vmi, vma) \
|
||||
if (vma->vm_flags & VM_MTE)
|
||||
for (i = 0, m = cprm->vma_meta; \
|
||||
i < cprm->vma_count; \
|
||||
i++, m = cprm->vma_meta + i) \
|
||||
if (m->flags & VM_MTE)
|
||||
|
||||
static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
|
||||
static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
|
||||
{
|
||||
if (vma->vm_flags & VM_DONTDUMP)
|
||||
return 0;
|
||||
|
||||
return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
|
||||
return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
|
||||
}
|
||||
|
||||
/* Derived from dump_user_range(); start/end must be page-aligned */
|
||||
static int mte_dump_tag_range(struct coredump_params *cprm,
|
||||
unsigned long start, unsigned long end)
|
||||
unsigned long start, unsigned long len)
|
||||
{
|
||||
int ret = 1;
|
||||
unsigned long addr;
|
||||
void *tags = NULL;
|
||||
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
|
||||
struct page *page = get_dump_page(addr);
|
||||
|
||||
/*
|
||||
|
@ -65,7 +64,6 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
|
|||
mte_save_page_tags(page_address(page), tags);
|
||||
put_page(page);
|
||||
if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
|
||||
mte_free_tag_storage(tags);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
@ -77,13 +75,13 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
|
|||
return ret;
|
||||
}
|
||||
|
||||
Elf_Half elf_core_extra_phdrs(void)
|
||||
Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int i;
|
||||
struct core_vma_metadata *m;
|
||||
int vma_count = 0;
|
||||
VMA_ITERATOR(vmi, current->mm, 0);
|
||||
|
||||
for_each_mte_vma(vmi, vma)
|
||||
for_each_mte_vma(cprm, i, m)
|
||||
vma_count++;
|
||||
|
||||
return vma_count;
|
||||
|
@ -91,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void)
|
|||
|
||||
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
VMA_ITERATOR(vmi, current->mm, 0);
|
||||
int i;
|
||||
struct core_vma_metadata *m;
|
||||
|
||||
for_each_mte_vma(vmi, vma) {
|
||||
for_each_mte_vma(cprm, i, m) {
|
||||
struct elf_phdr phdr;
|
||||
|
||||
phdr.p_type = PT_AARCH64_MEMTAG_MTE;
|
||||
phdr.p_offset = offset;
|
||||
phdr.p_vaddr = vma->vm_start;
|
||||
phdr.p_vaddr = m->start;
|
||||
phdr.p_paddr = 0;
|
||||
phdr.p_filesz = mte_vma_tag_dump_size(vma);
|
||||
phdr.p_memsz = vma->vm_end - vma->vm_start;
|
||||
phdr.p_filesz = mte_vma_tag_dump_size(m);
|
||||
phdr.p_memsz = m->end - m->start;
|
||||
offset += phdr.p_filesz;
|
||||
phdr.p_flags = 0;
|
||||
phdr.p_align = 0;
|
||||
|
@ -114,28 +112,25 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
|
|||
return 1;
|
||||
}
|
||||
|
||||
size_t elf_core_extra_data_size(void)
|
||||
size_t elf_core_extra_data_size(struct coredump_params *cprm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int i;
|
||||
struct core_vma_metadata *m;
|
||||
size_t data_size = 0;
|
||||
VMA_ITERATOR(vmi, current->mm, 0);
|
||||
|
||||
for_each_mte_vma(vmi, vma)
|
||||
data_size += mte_vma_tag_dump_size(vma);
|
||||
for_each_mte_vma(cprm, i, m)
|
||||
data_size += mte_vma_tag_dump_size(m);
|
||||
|
||||
return data_size;
|
||||
}
|
||||
|
||||
int elf_core_write_extra_data(struct coredump_params *cprm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
VMA_ITERATOR(vmi, current->mm, 0);
|
||||
int i;
|
||||
struct core_vma_metadata *m;
|
||||
|
||||
for_each_mte_vma(vmi, vma) {
|
||||
if (vma->vm_flags & VM_DONTDUMP)
|
||||
continue;
|
||||
|
||||
if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
|
||||
for_each_mte_vma(cprm, i, m) {
|
||||
if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -385,7 +385,7 @@ static void task_fpsimd_load(void)
|
|||
WARN_ON(!system_supports_fpsimd());
|
||||
WARN_ON(!have_cpu_fpsimd_context());
|
||||
|
||||
if (system_supports_sve()) {
|
||||
if (system_supports_sve() || system_supports_sme()) {
|
||||
switch (current->thread.fp_type) {
|
||||
case FP_STATE_FPSIMD:
|
||||
/* Stop tracking SVE for this task until next use. */
|
||||
|
|
|
@ -1357,7 +1357,7 @@ enum aarch64_regset {
|
|||
#ifdef CONFIG_ARM64_SVE
|
||||
REGSET_SVE,
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
#ifdef CONFIG_ARM64_SME
|
||||
REGSET_SSVE,
|
||||
REGSET_ZA,
|
||||
#endif
|
||||
|
|
|
@ -281,7 +281,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
|
|||
|
||||
vl = task_get_sme_vl(current);
|
||||
} else {
|
||||
if (!system_supports_sve())
|
||||
/*
|
||||
* A SME only system use SVE for streaming mode so can
|
||||
* have a SVE formatted context with a zero VL and no
|
||||
* payload data.
|
||||
*/
|
||||
if (!system_supports_sve() && !system_supports_sme())
|
||||
return -EINVAL;
|
||||
|
||||
vl = task_get_sve_vl(current);
|
||||
|
@ -732,7 +737,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (system_supports_sve()) {
|
||||
if (system_supports_sve() || system_supports_sme()) {
|
||||
unsigned int vq = 0;
|
||||
|
||||
if (add_all || test_thread_flag(TIF_SVE) ||
|
||||
|
|
|
@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
|
|||
*/
|
||||
if (!(esr & ESR_ELx_S1PTW) &&
|
||||
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
|
||||
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
|
||||
(esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
|
||||
if (!__translate_far_to_hpfar(far, &hpfar))
|
||||
return false;
|
||||
} else {
|
||||
|
|
|
@ -367,7 +367,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||||
bool valid;
|
||||
|
||||
valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
|
||||
valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
|
||||
kvm_vcpu_dabt_isvalid(vcpu) &&
|
||||
!kvm_vcpu_abt_issea(vcpu) &&
|
||||
!kvm_vcpu_abt_iss1tw(vcpu);
|
||||
|
|
|
@ -1212,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
|
||||
VM_BUG_ON(write_fault && exec_fault);
|
||||
|
||||
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
|
||||
if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
|
||||
kvm_err("Unexpected L2 read permission error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -1277,7 +1277,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
* only exception to this is when dirty logging is enabled at runtime
|
||||
* and a write fault needs to collapse a block entry into a table.
|
||||
*/
|
||||
if (fault_status != FSC_PERM || (logging_active && write_fault)) {
|
||||
if (fault_status != ESR_ELx_FSC_PERM ||
|
||||
(logging_active && write_fault)) {
|
||||
ret = kvm_mmu_topup_memory_cache(memcache,
|
||||
kvm_mmu_cache_min_pages(kvm));
|
||||
if (ret)
|
||||
|
@ -1342,7 +1343,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
* backed by a THP and thus use block mapping if possible.
|
||||
*/
|
||||
if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
|
||||
if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
|
||||
if (fault_status == ESR_ELx_FSC_PERM &&
|
||||
fault_granule > PAGE_SIZE)
|
||||
vma_pagesize = fault_granule;
|
||||
else
|
||||
vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
|
||||
|
@ -1350,7 +1352,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
&fault_ipa);
|
||||
}
|
||||
|
||||
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
|
||||
if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
|
||||
/* Check the VMM hasn't introduced a new disallowed VMA */
|
||||
if (kvm_vma_mte_allowed(vma)) {
|
||||
sanitise_mte_tags(kvm, pfn, vma_pagesize);
|
||||
|
@ -1376,7 +1378,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
* permissions only if vma_pagesize equals fault_granule. Otherwise,
|
||||
* kvm_pgtable_stage2_map() should be called to change block size.
|
||||
*/
|
||||
if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
|
||||
if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
|
||||
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
|
||||
else
|
||||
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
|
||||
|
@ -1441,7 +1443,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
|
||||
if (fault_status == FSC_FAULT) {
|
||||
if (fault_status == ESR_ELx_FSC_FAULT) {
|
||||
/* Beyond sanitised PARange (which is the IPA limit) */
|
||||
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
|
||||
kvm_inject_size_fault(vcpu);
|
||||
|
@ -1476,8 +1478,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||
kvm_vcpu_get_hfar(vcpu), fault_ipa);
|
||||
|
||||
/* Check the stage-2 fault is trans. fault or write fault */
|
||||
if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
|
||||
fault_status != FSC_ACCESS) {
|
||||
if (fault_status != ESR_ELx_FSC_FAULT &&
|
||||
fault_status != ESR_ELx_FSC_PERM &&
|
||||
fault_status != ESR_ELx_FSC_ACCESS) {
|
||||
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
|
||||
kvm_vcpu_trap_get_class(vcpu),
|
||||
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
|
||||
|
@ -1539,7 +1542,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||
/* Userspace should not be able to register out-of-bounds IPAs */
|
||||
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
|
||||
|
||||
if (fault_status == FSC_ACCESS) {
|
||||
if (fault_status == ESR_ELx_FSC_ACCESS) {
|
||||
handle_access_fault(vcpu, fault_ipa);
|
||||
ret = 1;
|
||||
goto out_unlock;
|
||||
|
|
|
@ -646,7 +646,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
return;
|
||||
|
||||
/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
|
||||
pmcr = read_sysreg(pmcr_el0) & ARMV8_PMU_PMCR_N_MASK;
|
||||
pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
if (!kvm_supports_32bit_el0())
|
||||
pmcr |= ARMV8_PMU_PMCR_LC;
|
||||
|
||||
|
|
|
@ -616,6 +616,8 @@ static const struct midr_range broken_seis[] = {
|
|||
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -559,3 +559,24 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
|
|||
{
|
||||
return __hugetlb_valid_size(size);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
* when the permission changes from executable to non-executable
|
||||
* in cases where cpu is affected with errata #2645198.
|
||||
*/
|
||||
if (pte_user_exec(READ_ONCE(*ptep)))
|
||||
return huge_ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
}
|
||||
|
||||
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t pte)
|
||||
{
|
||||
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
}
|
||||
|
|
|
@ -1630,3 +1630,24 @@ static int __init prevent_bootmem_remove_init(void)
|
|||
}
|
||||
early_initcall(prevent_bootmem_remove_init);
|
||||
#endif
|
||||
|
||||
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
* when the permission changes from executable to non-executable
|
||||
* in cases where cpu is affected with errata #2645198.
|
||||
*/
|
||||
if (pte_user_exec(READ_ONCE(*ptep)))
|
||||
return ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
return ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
}
|
||||
|
||||
void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t pte)
|
||||
{
|
||||
set_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ WORKAROUND_2038923
|
|||
WORKAROUND_2064142
|
||||
WORKAROUND_2077057
|
||||
WORKAROUND_2457168
|
||||
WORKAROUND_2645198
|
||||
WORKAROUND_2658417
|
||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
WORKAROUND_TSB_FLUSH_FAILURE
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <asm/elf.h>
|
||||
|
||||
|
||||
Elf64_Half elf_core_extra_phdrs(void)
|
||||
Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm)
|
||||
{
|
||||
return GATE_EHDR->e_phnum;
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
|
|||
return 1;
|
||||
}
|
||||
|
||||
size_t elf_core_extra_data_size(void)
|
||||
size_t elf_core_extra_data_size(struct coredump_params *cprm)
|
||||
{
|
||||
const struct elf_phdr *const gate_phdrs =
|
||||
(const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
|
||||
|
|
|
@ -64,7 +64,7 @@ void __init plat_mem_setup(void)
|
|||
dtb = get_fdt();
|
||||
__dt_setup_arch(dtb);
|
||||
|
||||
if (!early_init_dt_scan_memory())
|
||||
if (early_init_dt_scan_memory())
|
||||
return;
|
||||
|
||||
if (soc_info.mem_detect)
|
||||
|
|
|
@ -659,3 +659,19 @@
|
|||
interrupts = <16 2 1 9>;
|
||||
};
|
||||
};
|
||||
|
||||
&fman0_rx_0x08 {
|
||||
/delete-property/ fsl,fman-10g-port;
|
||||
};
|
||||
|
||||
&fman0_tx_0x28 {
|
||||
/delete-property/ fsl,fman-10g-port;
|
||||
};
|
||||
|
||||
&fman0_rx_0x09 {
|
||||
/delete-property/ fsl,fman-10g-port;
|
||||
};
|
||||
|
||||
&fman0_tx_0x29 {
|
||||
/delete-property/ fsl,fman-10g-port;
|
||||
};
|
||||
|
|
|
@ -210,6 +210,10 @@ ld_version()
|
|||
gsub(".*version ", "");
|
||||
gsub("-.*", "");
|
||||
split($1,a, ".");
|
||||
if( length(a[3]) == "8" )
|
||||
# a[3] is probably a date of format yyyymmdd used for release snapshots. We
|
||||
# can assume it to be zero as it does not signify a new version as such.
|
||||
a[3] = 0;
|
||||
print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
|
||||
exit
|
||||
}'
|
||||
|
|
|
@ -137,7 +137,7 @@ struct imc_pmu {
|
|||
* are inited.
|
||||
*/
|
||||
struct imc_pmu_ref {
|
||||
struct mutex lock;
|
||||
spinlock_t lock;
|
||||
unsigned int id;
|
||||
int refc;
|
||||
};
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#define BSS_FIRST_SECTIONS *(.bss.prominit)
|
||||
#define EMITS_PT_NOTE
|
||||
#define RO_EXCEPTION_TABLE_ALIGN 0
|
||||
#define RUNTIME_DISCARD_EXIT
|
||||
|
||||
#define SOFT_MASK_TABLE(align) \
|
||||
. = ALIGN(align); \
|
||||
|
@ -410,9 +411,12 @@ SECTIONS
|
|||
DISCARDS
|
||||
/DISCARD/ : {
|
||||
*(*.EMB.apuinfo)
|
||||
*(.glink .iplt .plt .rela* .comment)
|
||||
*(.glink .iplt .plt)
|
||||
*(.gnu.version*)
|
||||
*(.gnu.attributes)
|
||||
*(.eh_frame)
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
*(.rela*)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
|
|||
|
||||
void hpt_clear_stress(void);
|
||||
static struct timer_list stress_hpt_timer;
|
||||
void stress_hpt_timer_fn(struct timer_list *timer)
|
||||
static void stress_hpt_timer_fn(struct timer_list *timer)
|
||||
{
|
||||
int next_cpu;
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/cputhreads.h>
|
||||
#include <asm/smp.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/* Nest IMC data structures and variables */
|
||||
|
||||
|
@ -21,7 +22,7 @@
|
|||
* Used to avoid races in counting the nest-pmu units during hotplug
|
||||
* register and unregister
|
||||
*/
|
||||
static DEFINE_MUTEX(nest_init_lock);
|
||||
static DEFINE_SPINLOCK(nest_init_lock);
|
||||
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
|
||||
static struct imc_pmu **per_nest_pmu_arr;
|
||||
static cpumask_t nest_imc_cpumask;
|
||||
|
@ -50,7 +51,7 @@ static int trace_imc_mem_size;
|
|||
* core and trace-imc
|
||||
*/
|
||||
static struct imc_pmu_ref imc_global_refc = {
|
||||
.lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
|
||||
.lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
|
||||
.id = 0,
|
||||
.refc = 0,
|
||||
};
|
||||
|
@ -400,7 +401,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
|
|||
get_hard_smp_processor_id(cpu));
|
||||
/*
|
||||
* If this is the last cpu in this chip then, skip the reference
|
||||
* count mutex lock and make the reference count on this chip zero.
|
||||
* count lock and make the reference count on this chip zero.
|
||||
*/
|
||||
ref = get_nest_pmu_ref(cpu);
|
||||
if (!ref)
|
||||
|
@ -462,15 +463,15 @@ static void nest_imc_counters_release(struct perf_event *event)
|
|||
/*
|
||||
* See if we need to disable the nest PMU.
|
||||
* If no events are currently in use, then we have to take a
|
||||
* mutex to ensure that we don't race with another task doing
|
||||
* lock to ensure that we don't race with another task doing
|
||||
* enable or disable the nest counters.
|
||||
*/
|
||||
ref = get_nest_pmu_ref(event->cpu);
|
||||
if (!ref)
|
||||
return;
|
||||
|
||||
/* Take the mutex lock for this node and then decrement the reference count */
|
||||
mutex_lock(&ref->lock);
|
||||
/* Take the lock for this node and then decrement the reference count */
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
/*
|
||||
* The scenario where this is true is, when perf session is
|
||||
|
@ -482,7 +483,7 @@ static void nest_imc_counters_release(struct perf_event *event)
|
|||
* an OPAL call to disable the engine in that node.
|
||||
*
|
||||
*/
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return;
|
||||
}
|
||||
ref->refc--;
|
||||
|
@ -490,7 +491,7 @@ static void nest_imc_counters_release(struct perf_event *event)
|
|||
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
|
||||
return;
|
||||
}
|
||||
|
@ -498,7 +499,7 @@ static void nest_imc_counters_release(struct perf_event *event)
|
|||
WARN(1, "nest-imc: Invalid event reference count\n");
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
}
|
||||
|
||||
static int nest_imc_event_init(struct perf_event *event)
|
||||
|
@ -557,26 +558,25 @@ static int nest_imc_event_init(struct perf_event *event)
|
|||
|
||||
/*
|
||||
* Get the imc_pmu_ref struct for this node.
|
||||
* Take the mutex lock and then increment the count of nest pmu events
|
||||
* inited.
|
||||
* Take the lock and then increment the count of nest pmu events inited.
|
||||
*/
|
||||
ref = get_nest_pmu_ref(event->cpu);
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("nest-imc: Unable to start the counters for node %d\n",
|
||||
node_id);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
event->destroy = nest_imc_counters_release;
|
||||
return 0;
|
||||
|
@ -612,9 +612,8 @@ static int core_imc_mem_init(int cpu, int size)
|
|||
return -ENOMEM;
|
||||
mem_info->vbase = page_address(page);
|
||||
|
||||
/* Init the mutex */
|
||||
core_imc_refc[core_id].id = core_id;
|
||||
mutex_init(&core_imc_refc[core_id].lock);
|
||||
spin_lock_init(&core_imc_refc[core_id].lock);
|
||||
|
||||
rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
|
||||
__pa((void *)mem_info->vbase),
|
||||
|
@ -703,9 +702,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
|
|||
perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
|
||||
} else {
|
||||
/*
|
||||
* If this is the last cpu in this core then, skip taking refernce
|
||||
* count mutex lock for this core and directly zero "refc" for
|
||||
* this core.
|
||||
* If this is the last cpu in this core then skip taking reference
|
||||
* count lock for this core and directly zero "refc" for this core.
|
||||
*/
|
||||
opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(cpu));
|
||||
|
@ -720,11 +718,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
|
|||
* last cpu in this core and core-imc event running
|
||||
* in this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_CORE)
|
||||
imc_global_refc.refc--;
|
||||
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -739,7 +737,7 @@ static int core_imc_pmu_cpumask_init(void)
|
|||
|
||||
static void reset_global_refc(struct perf_event *event)
|
||||
{
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
imc_global_refc.refc--;
|
||||
|
||||
/*
|
||||
|
@ -751,7 +749,7 @@ static void reset_global_refc(struct perf_event *event)
|
|||
imc_global_refc.refc = 0;
|
||||
imc_global_refc.id = 0;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
|
||||
static void core_imc_counters_release(struct perf_event *event)
|
||||
|
@ -764,17 +762,17 @@ static void core_imc_counters_release(struct perf_event *event)
|
|||
/*
|
||||
* See if we need to disable the IMC PMU.
|
||||
* If no events are currently in use, then we have to take a
|
||||
* mutex to ensure that we don't race with another task doing
|
||||
* lock to ensure that we don't race with another task doing
|
||||
* enable or disable the core counters.
|
||||
*/
|
||||
core_id = event->cpu / threads_per_core;
|
||||
|
||||
/* Take the mutex lock and decrement the refernce count for this core */
|
||||
/* Take the lock and decrement the refernce count for this core */
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return;
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
/*
|
||||
* The scenario where this is true is, when perf session is
|
||||
|
@ -786,7 +784,7 @@ static void core_imc_counters_release(struct perf_event *event)
|
|||
* an OPAL call to disable the engine in that core.
|
||||
*
|
||||
*/
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return;
|
||||
}
|
||||
ref->refc--;
|
||||
|
@ -794,7 +792,7 @@ static void core_imc_counters_release(struct perf_event *event)
|
|||
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
|
||||
return;
|
||||
}
|
||||
|
@ -802,7 +800,7 @@ static void core_imc_counters_release(struct perf_event *event)
|
|||
WARN(1, "core-imc: Invalid event reference count\n");
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
reset_global_refc(event);
|
||||
}
|
||||
|
@ -840,7 +838,6 @@ static int core_imc_event_init(struct perf_event *event)
|
|||
if ((!pcmi->vbase))
|
||||
return -ENODEV;
|
||||
|
||||
/* Get the core_imc mutex for this core */
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
|
@ -848,22 +845,22 @@ static int core_imc_event_init(struct perf_event *event)
|
|||
/*
|
||||
* Core pmu units are enabled only when it is used.
|
||||
* See if this is triggered for the first time.
|
||||
* If yes, take the mutex lock and enable the core counters.
|
||||
* If yes, take the lock and enable the core counters.
|
||||
* If not, just increment the count in core_imc_refc struct.
|
||||
*/
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("core-imc: Unable to start the counters for core %d\n",
|
||||
core_id);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
/*
|
||||
* Since the system can run either in accumulation or trace-mode
|
||||
|
@ -874,7 +871,7 @@ static int core_imc_event_init(struct perf_event *event)
|
|||
* to know whether any other trace/thread imc
|
||||
* events are running.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
|
||||
/*
|
||||
* No other trace/thread imc events are running in
|
||||
|
@ -883,10 +880,10 @@ static int core_imc_event_init(struct perf_event *event)
|
|||
imc_global_refc.id = IMC_DOMAIN_CORE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
|
||||
event->destroy = core_imc_counters_release;
|
||||
|
@ -958,10 +955,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
|
|||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
|
||||
/* Reduce the refc if thread-imc event running on this cpu */
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_THREAD)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1001,7 +998,7 @@ static int thread_imc_event_init(struct perf_event *event)
|
|||
if (!target)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
/*
|
||||
* Check if any other trace/core imc events are running in the
|
||||
* system, if not set the global id to thread-imc.
|
||||
|
@ -1010,10 +1007,10 @@ static int thread_imc_event_init(struct perf_event *event)
|
|||
imc_global_refc.id = IMC_DOMAIN_THREAD;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->pmu->task_ctx_nr = perf_sw_context;
|
||||
event->destroy = reset_global_refc;
|
||||
|
@ -1135,25 +1132,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
|
|||
/*
|
||||
* imc pmus are enabled only when it is used.
|
||||
* See if this is triggered for the first time.
|
||||
* If yes, take the mutex lock and enable the counters.
|
||||
* If yes, take the lock and enable the counters.
|
||||
* If not, just increment the count in ref count struct.
|
||||
*/
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("thread-imc: Unable to start the counter\
|
||||
for core %d\n", core_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1170,12 +1167,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
|||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("thread-imc: Unable to stop the counters\
|
||||
for core %d\n", core_id);
|
||||
return;
|
||||
|
@ -1183,7 +1180,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
|||
} else if (ref->refc < 0) {
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
|
@ -1224,9 +1221,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
|
|||
}
|
||||
}
|
||||
|
||||
/* Init the mutex, if not already */
|
||||
trace_imc_refc[core_id].id = core_id;
|
||||
mutex_init(&trace_imc_refc[core_id].lock);
|
||||
spin_lock_init(&trace_imc_refc[core_id].lock);
|
||||
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
return 0;
|
||||
|
@ -1246,10 +1242,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
|
|||
* Reduce the refc if any trace-imc event running
|
||||
* on this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_TRACE)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1371,17 +1367,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
|
|||
}
|
||||
|
||||
mtspr(SPRN_LDBAR, ldbar_value);
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1414,19 +1410,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
|
|||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
|
||||
return;
|
||||
}
|
||||
} else if (ref->refc < 0) {
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
trace_imc_event_stop(event, flags);
|
||||
}
|
||||
|
@ -1448,7 +1444,7 @@ static int trace_imc_event_init(struct perf_event *event)
|
|||
* no other thread is running any core/thread imc
|
||||
* events
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
|
||||
/*
|
||||
* No core/thread imc events are running in the
|
||||
|
@ -1457,10 +1453,10 @@ static int trace_imc_event_init(struct perf_event *event)
|
|||
imc_global_refc.id = IMC_DOMAIN_TRACE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.idx = -1;
|
||||
|
||||
|
@ -1533,10 +1529,10 @@ static int init_nest_pmu_ref(void)
|
|||
i = 0;
|
||||
for_each_node(nid) {
|
||||
/*
|
||||
* Mutex lock to avoid races while tracking the number of
|
||||
* Take the lock to avoid races while tracking the number of
|
||||
* sessions using the chip's nest pmu units.
|
||||
*/
|
||||
mutex_init(&nest_imc_refc[i].lock);
|
||||
spin_lock_init(&nest_imc_refc[i].lock);
|
||||
|
||||
/*
|
||||
* Loop to init the "id" with the node_id. Variable "i" initialized to
|
||||
|
@ -1633,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
|
|||
static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
||||
{
|
||||
if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
|
||||
mutex_lock(&nest_init_lock);
|
||||
spin_lock(&nest_init_lock);
|
||||
if (nest_pmus == 1) {
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
|
||||
kfree(nest_imc_refc);
|
||||
|
@ -1643,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
|||
|
||||
if (nest_pmus > 0)
|
||||
nest_pmus--;
|
||||
mutex_unlock(&nest_init_lock);
|
||||
spin_unlock(&nest_init_lock);
|
||||
}
|
||||
|
||||
/* Free core_imc memory */
|
||||
|
@ -1800,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
|||
* rest. To handle the cpuhotplug callback unregister, we track
|
||||
* the number of nest pmus in "nest_pmus".
|
||||
*/
|
||||
mutex_lock(&nest_init_lock);
|
||||
spin_lock(&nest_init_lock);
|
||||
if (nest_pmus == 0) {
|
||||
ret = init_nest_pmu_ref();
|
||||
if (ret) {
|
||||
mutex_unlock(&nest_init_lock);
|
||||
spin_unlock(&nest_init_lock);
|
||||
kfree(per_nest_pmu_arr);
|
||||
per_nest_pmu_arr = NULL;
|
||||
goto err_free_mem;
|
||||
|
@ -1812,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
|||
/* Register for cpu hotplug notification. */
|
||||
ret = nest_pmu_cpumask_init();
|
||||
if (ret) {
|
||||
mutex_unlock(&nest_init_lock);
|
||||
spin_unlock(&nest_init_lock);
|
||||
kfree(nest_imc_refc);
|
||||
kfree(per_nest_pmu_arr);
|
||||
per_nest_pmu_arr = NULL;
|
||||
|
@ -1820,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
|||
}
|
||||
}
|
||||
nest_pmus++;
|
||||
mutex_unlock(&nest_init_lock);
|
||||
spin_unlock(&nest_init_lock);
|
||||
break;
|
||||
case IMC_DOMAIN_CORE:
|
||||
ret = core_imc_pmu_cpumask_init();
|
||||
|
|
|
@ -165,7 +165,7 @@ do { \
|
|||
might_fault(); \
|
||||
access_ok(__p, sizeof(*__p)) ? \
|
||||
__get_user((x), __p) : \
|
||||
((x) = 0, -EFAULT); \
|
||||
((x) = (__force __typeof__(x))0, -EFAULT); \
|
||||
})
|
||||
|
||||
#define __put_user_asm(insn, x, ptr, err) \
|
||||
|
|
|
@ -31,9 +31,9 @@ __RISCV_INSN_FUNCS(fence, 0x7f, 0x0f);
|
|||
} while (0)
|
||||
|
||||
__RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001);
|
||||
__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002);
|
||||
__RISCV_INSN_FUNCS(c_jr, 0xf07f, 0x8002);
|
||||
__RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001);
|
||||
__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002);
|
||||
__RISCV_INSN_FUNCS(c_jalr, 0xf07f, 0x9002);
|
||||
__RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001);
|
||||
__RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001);
|
||||
__RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002);
|
||||
|
|
|
@ -23,9 +23,9 @@
|
|||
#define memmove memmove
|
||||
#define memzero(s, n) memset((s), 0, (n))
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
#if defined(CONFIG_KERNEL_BZIP2)
|
||||
#define BOOT_HEAP_SIZE 0x400000
|
||||
#elif CONFIG_KERNEL_ZSTD
|
||||
#elif defined(CONFIG_KERNEL_ZSTD)
|
||||
#define BOOT_HEAP_SIZE 0x30000
|
||||
#else
|
||||
#define BOOT_HEAP_SIZE 0x10000
|
||||
|
|
|
@ -190,7 +190,6 @@ CONFIG_NFT_CT=m
|
|||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
CONFIG_NFT_NAT=m
|
||||
CONFIG_NFT_OBJREF=m
|
||||
CONFIG_NFT_REJECT=m
|
||||
CONFIG_NFT_COMPAT=m
|
||||
CONFIG_NFT_HASH=m
|
||||
|
@ -569,6 +568,7 @@ CONFIG_INPUT_EVDEV=y
|
|||
# CONFIG_INPUT_MOUSE is not set
|
||||
# CONFIG_SERIO is not set
|
||||
CONFIG_LEGACY_PTY_COUNT=0
|
||||
# CONFIG_LEGACY_TIOCSTI is not set
|
||||
CONFIG_VIRTIO_CONSOLE=m
|
||||
CONFIG_HW_RANDOM_VIRTIO=m
|
||||
CONFIG_HANGCHECK_TIMER=m
|
||||
|
@ -660,6 +660,7 @@ CONFIG_CONFIGFS_FS=m
|
|||
CONFIG_ECRYPT_FS=m
|
||||
CONFIG_CRAMFS=m
|
||||
CONFIG_SQUASHFS=m
|
||||
CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
|
||||
CONFIG_SQUASHFS_XATTR=y
|
||||
CONFIG_SQUASHFS_LZ4=y
|
||||
CONFIG_SQUASHFS_LZO=y
|
||||
|
@ -705,6 +706,7 @@ CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
|
|||
CONFIG_SECURITY_LANDLOCK=y
|
||||
CONFIG_INTEGRITY_SIGNATURE=y
|
||||
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
|
||||
CONFIG_INTEGRITY_PLATFORM_KEYRING=y
|
||||
CONFIG_IMA=y
|
||||
CONFIG_IMA_DEFAULT_HASH_SHA256=y
|
||||
CONFIG_IMA_WRITE_POLICY=y
|
||||
|
@ -781,6 +783,7 @@ CONFIG_ZCRYPT=m
|
|||
CONFIG_PKEY=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=m
|
||||
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
|
||||
CONFIG_CORDIC=m
|
||||
CONFIG_CRYPTO_LIB_CURVE25519=m
|
||||
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
|
||||
|
@ -848,7 +851,6 @@ CONFIG_PREEMPT_TRACER=y
|
|||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_BPF_KPROBE_OVERRIDE=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_FTRACE_STARTUP_TEST=y
|
||||
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
|
||||
|
@ -870,7 +872,6 @@ CONFIG_FAIL_MAKE_REQUEST=y
|
|||
CONFIG_FAIL_IO_TIMEOUT=y
|
||||
CONFIG_FAIL_FUTEX=y
|
||||
CONFIG_FAULT_INJECTION_DEBUG_FS=y
|
||||
CONFIG_FAIL_FUNCTION=y
|
||||
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_TEST_MIN_HEAP=y
|
||||
|
|
|
@ -181,7 +181,6 @@ CONFIG_NFT_CT=m
|
|||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
CONFIG_NFT_NAT=m
|
||||
CONFIG_NFT_OBJREF=m
|
||||
CONFIG_NFT_REJECT=m
|
||||
CONFIG_NFT_COMPAT=m
|
||||
CONFIG_NFT_HASH=m
|
||||
|
@ -559,6 +558,7 @@ CONFIG_INPUT_EVDEV=y
|
|||
# CONFIG_INPUT_MOUSE is not set
|
||||
# CONFIG_SERIO is not set
|
||||
CONFIG_LEGACY_PTY_COUNT=0
|
||||
# CONFIG_LEGACY_TIOCSTI is not set
|
||||
CONFIG_VIRTIO_CONSOLE=m
|
||||
CONFIG_HW_RANDOM_VIRTIO=m
|
||||
CONFIG_HANGCHECK_TIMER=m
|
||||
|
@ -645,6 +645,7 @@ CONFIG_CONFIGFS_FS=m
|
|||
CONFIG_ECRYPT_FS=m
|
||||
CONFIG_CRAMFS=m
|
||||
CONFIG_SQUASHFS=m
|
||||
CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
|
||||
CONFIG_SQUASHFS_XATTR=y
|
||||
CONFIG_SQUASHFS_LZ4=y
|
||||
CONFIG_SQUASHFS_LZO=y
|
||||
|
@ -688,6 +689,7 @@ CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
|
|||
CONFIG_SECURITY_LANDLOCK=y
|
||||
CONFIG_INTEGRITY_SIGNATURE=y
|
||||
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
|
||||
CONFIG_INTEGRITY_PLATFORM_KEYRING=y
|
||||
CONFIG_IMA=y
|
||||
CONFIG_IMA_DEFAULT_HASH_SHA256=y
|
||||
CONFIG_IMA_WRITE_POLICY=y
|
||||
|
@ -766,6 +768,7 @@ CONFIG_ZCRYPT=m
|
|||
CONFIG_PKEY=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=m
|
||||
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
|
||||
CONFIG_CORDIC=m
|
||||
CONFIG_PRIME_NUMBERS=m
|
||||
CONFIG_CRYPTO_LIB_CURVE25519=m
|
||||
|
@ -798,7 +801,6 @@ CONFIG_STACK_TRACER=y
|
|||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_BPF_KPROBE_OVERRIDE=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_SAMPLES=y
|
||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||
|
|
|
@ -13,7 +13,6 @@ CONFIG_TUNE_ZEC12=y
|
|||
# CONFIG_COMPAT is not set
|
||||
CONFIG_NR_CPUS=2
|
||||
CONFIG_HZ_100=y
|
||||
# CONFIG_RELOCATABLE is not set
|
||||
# CONFIG_CHSC_SCH is not set
|
||||
# CONFIG_SCM_BUS is not set
|
||||
CONFIG_CRASH_DUMP=y
|
||||
|
@ -50,6 +49,7 @@ CONFIG_ZFCP=y
|
|||
# CONFIG_INPUT_KEYBOARD is not set
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
# CONFIG_SERIO is not set
|
||||
# CONFIG_LEGACY_TIOCSTI is not set
|
||||
# CONFIG_HVC_IUCV is not set
|
||||
# CONFIG_HW_RANDOM_S390 is not set
|
||||
# CONFIG_HMC_DRV is not set
|
||||
|
|
|
@ -131,19 +131,21 @@ struct hws_combined_entry {
|
|||
struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
|
||||
} __packed;
|
||||
|
||||
struct hws_trailer_entry {
|
||||
union {
|
||||
struct {
|
||||
unsigned int f:1; /* 0 - Block Full Indicator */
|
||||
unsigned int a:1; /* 1 - Alert request control */
|
||||
unsigned int t:1; /* 2 - Timestamp format */
|
||||
unsigned int :29; /* 3 - 31: Reserved */
|
||||
unsigned int bsdes:16; /* 32-47: size of basic SDE */
|
||||
unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
|
||||
};
|
||||
unsigned long long flags; /* 0 - 63: All indicators */
|
||||
union hws_trailer_header {
|
||||
struct {
|
||||
unsigned int f:1; /* 0 - Block Full Indicator */
|
||||
unsigned int a:1; /* 1 - Alert request control */
|
||||
unsigned int t:1; /* 2 - Timestamp format */
|
||||
unsigned int :29; /* 3 - 31: Reserved */
|
||||
unsigned int bsdes:16; /* 32-47: size of basic SDE */
|
||||
unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
|
||||
unsigned long long overflow; /* 64 - Overflow Count */
|
||||
};
|
||||
unsigned long long overflow; /* 64 - sample Overflow count */
|
||||
__uint128_t val;
|
||||
};
|
||||
|
||||
struct hws_trailer_entry {
|
||||
union hws_trailer_header header; /* 0 - 15 Flags + Overflow Count */
|
||||
unsigned char timestamp[16]; /* 16 - 31 timestamp */
|
||||
unsigned long long reserved1; /* 32 -Reserved */
|
||||
unsigned long long reserved2; /* */
|
||||
|
@ -290,14 +292,11 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
|
|||
return USEC_PER_SEC * qsi->cpu_speed / rate;
|
||||
}
|
||||
|
||||
#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL
|
||||
#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
|
||||
|
||||
/* Return TOD timestamp contained in an trailer entry */
|
||||
static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
|
||||
{
|
||||
/* TOD in STCKE format */
|
||||
if (te->t)
|
||||
if (te->header.t)
|
||||
return *((unsigned long long *) &te->timestamp[1]);
|
||||
|
||||
/* TOD in STCK format */
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
*
|
||||
* Copyright IBM Corp. 1999, 2020
|
||||
*/
|
||||
#ifndef DEBUG_H
|
||||
#define DEBUG_H
|
||||
#ifndef _ASM_S390_DEBUG_H
|
||||
#define _ASM_S390_DEBUG_H
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -487,4 +487,4 @@ void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
|
|||
|
||||
#endif /* MODULE */
|
||||
|
||||
#endif /* DEBUG_H */
|
||||
#endif /* _ASM_S390_DEBUG_H */
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
pcp_op_T__ *ptr__; \
|
||||
preempt_disable_notrace(); \
|
||||
ptr__ = raw_cpu_ptr(&(pcp)); \
|
||||
prev__ = *ptr__; \
|
||||
prev__ = READ_ONCE(*ptr__); \
|
||||
do { \
|
||||
old__ = prev__; \
|
||||
new__ = old__ op (val); \
|
||||
|
|
|
@ -187,8 +187,6 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
|||
|
||||
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
|
||||
buf.mem = data->memsz;
|
||||
if (image->type == KEXEC_TYPE_CRASH)
|
||||
buf.mem += crashk_res.start;
|
||||
|
||||
ptr = (void *)ipl_cert_list_addr;
|
||||
end = ptr + ipl_cert_list_size;
|
||||
|
@ -225,6 +223,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
|||
data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
|
||||
*lc_ipl_parmblock_ptr = (__u32)buf.mem;
|
||||
|
||||
if (image->type == KEXEC_TYPE_CRASH)
|
||||
buf.mem += crashk_res.start;
|
||||
|
||||
ret = kexec_add_buffer(&buf);
|
||||
out:
|
||||
return ret;
|
||||
|
|
|
@ -163,14 +163,15 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
|
|||
|
||||
static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
|
||||
{
|
||||
unsigned long sdb, *trailer;
|
||||
struct hws_trailer_entry *te;
|
||||
unsigned long sdb;
|
||||
|
||||
/* Allocate and initialize sample-data-block */
|
||||
sdb = get_zeroed_page(gfp_flags);
|
||||
if (!sdb)
|
||||
return -ENOMEM;
|
||||
trailer = trailer_entry_ptr(sdb);
|
||||
*trailer = SDB_TE_ALERT_REQ_MASK;
|
||||
te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
|
||||
te->header.a = 1;
|
||||
|
||||
/* Link SDB into the sample-data-block-table */
|
||||
*sdbt = sdb;
|
||||
|
@ -1206,7 +1207,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
|
|||
"%s: Found unknown"
|
||||
" sampling data entry: te->f %i"
|
||||
" basic.def %#4x (%p)\n", __func__,
|
||||
te->f, sample->def, sample);
|
||||
te->header.f, sample->def, sample);
|
||||
/* Sample slot is not yet written or other record.
|
||||
*
|
||||
* This condition can occur if the buffer was reused
|
||||
|
@ -1217,7 +1218,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
|
|||
* that are not full. Stop processing if the first
|
||||
* invalid format was detected.
|
||||
*/
|
||||
if (!te->f)
|
||||
if (!te->header.f)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1227,6 +1228,16 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
|
|||
}
|
||||
}
|
||||
|
||||
static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t new)
|
||||
{
|
||||
asm volatile(
|
||||
" cdsg %[old],%[new],%[ptr]\n"
|
||||
: [old] "+d" (old), [ptr] "+QS" (*ptr)
|
||||
: [new] "d" (new)
|
||||
: "memory", "cc");
|
||||
return old;
|
||||
}
|
||||
|
||||
/* hw_perf_event_update() - Process sampling buffer
|
||||
* @event: The perf event
|
||||
* @flush_all: Flag to also flush partially filled sample-data-blocks
|
||||
|
@ -1243,10 +1254,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
|
|||
*/
|
||||
static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
||||
{
|
||||
unsigned long long event_overflow, sampl_overflow, num_sdb;
|
||||
union hws_trailer_header old, prev, new;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct hws_trailer_entry *te;
|
||||
unsigned long *sdbt;
|
||||
unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
|
||||
int done;
|
||||
|
||||
/*
|
||||
|
@ -1266,25 +1278,25 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
|||
te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
|
||||
|
||||
/* Leave loop if no more work to do (block full indicator) */
|
||||
if (!te->f) {
|
||||
if (!te->header.f) {
|
||||
done = 1;
|
||||
if (!flush_all)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Check the sample overflow count */
|
||||
if (te->overflow)
|
||||
if (te->header.overflow)
|
||||
/* Account sample overflows and, if a particular limit
|
||||
* is reached, extend the sampling buffer.
|
||||
* For details, see sfb_account_overflows().
|
||||
*/
|
||||
sampl_overflow += te->overflow;
|
||||
sampl_overflow += te->header.overflow;
|
||||
|
||||
/* Timestamps are valid for full sample-data-blocks only */
|
||||
debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx "
|
||||
"overflow %llu timestamp %#llx\n",
|
||||
__func__, (unsigned long)sdbt, te->overflow,
|
||||
(te->f) ? trailer_timestamp(te) : 0ULL);
|
||||
__func__, (unsigned long)sdbt, te->header.overflow,
|
||||
(te->header.f) ? trailer_timestamp(te) : 0ULL);
|
||||
|
||||
/* Collect all samples from a single sample-data-block and
|
||||
* flag if an (perf) event overflow happened. If so, the PMU
|
||||
|
@ -1294,12 +1306,16 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
|||
num_sdb++;
|
||||
|
||||
/* Reset trailer (using compare-double-and-swap) */
|
||||
/* READ_ONCE() 16 byte header */
|
||||
prev.val = __cdsg(&te->header.val, 0, 0);
|
||||
do {
|
||||
te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
|
||||
te_flags |= SDB_TE_ALERT_REQ_MASK;
|
||||
} while (!cmpxchg_double(&te->flags, &te->overflow,
|
||||
te->flags, te->overflow,
|
||||
te_flags, 0ULL));
|
||||
old.val = prev.val;
|
||||
new.val = prev.val;
|
||||
new.f = 0;
|
||||
new.a = 1;
|
||||
new.overflow = 0;
|
||||
prev.val = __cdsg(&te->header.val, old.val, new.val);
|
||||
} while (prev.val != old.val);
|
||||
|
||||
/* Advance to next sample-data-block */
|
||||
sdbt++;
|
||||
|
@ -1384,7 +1400,7 @@ static void aux_output_end(struct perf_output_handle *handle)
|
|||
range_scan = AUX_SDB_NUM_ALERT(aux);
|
||||
for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
|
||||
te = aux_sdb_trailer(aux, idx);
|
||||
if (!(te->flags & SDB_TE_BUFFER_FULL_MASK))
|
||||
if (!te->header.f)
|
||||
break;
|
||||
}
|
||||
/* i is num of SDBs which are full */
|
||||
|
@ -1392,7 +1408,7 @@ static void aux_output_end(struct perf_output_handle *handle)
|
|||
|
||||
/* Remove alert indicators in the buffer */
|
||||
te = aux_sdb_trailer(aux, aux->alert_mark);
|
||||
te->flags &= ~SDB_TE_ALERT_REQ_MASK;
|
||||
te->header.a = 0;
|
||||
|
||||
debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
|
||||
__func__, i, range_scan, aux->head);
|
||||
|
@ -1437,9 +1453,9 @@ static int aux_output_begin(struct perf_output_handle *handle,
|
|||
idx = aux->empty_mark + 1;
|
||||
for (i = 0; i < range_scan; i++, idx++) {
|
||||
te = aux_sdb_trailer(aux, idx);
|
||||
te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
|
||||
SDB_TE_ALERT_REQ_MASK);
|
||||
te->overflow = 0;
|
||||
te->header.f = 0;
|
||||
te->header.a = 0;
|
||||
te->header.overflow = 0;
|
||||
}
|
||||
/* Save the position of empty SDBs */
|
||||
aux->empty_mark = aux->head + range - 1;
|
||||
|
@ -1448,7 +1464,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
|
|||
/* Set alert indicator */
|
||||
aux->alert_mark = aux->head + range/2 - 1;
|
||||
te = aux_sdb_trailer(aux, aux->alert_mark);
|
||||
te->flags = te->flags | SDB_TE_ALERT_REQ_MASK;
|
||||
te->header.a = 1;
|
||||
|
||||
/* Reset hardware buffer head */
|
||||
head = AUX_SDB_INDEX(aux, aux->head);
|
||||
|
@ -1475,14 +1491,17 @@ static int aux_output_begin(struct perf_output_handle *handle,
|
|||
static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
|
||||
unsigned long long *overflow)
|
||||
{
|
||||
unsigned long long orig_overflow, orig_flags, new_flags;
|
||||
union hws_trailer_header old, prev, new;
|
||||
struct hws_trailer_entry *te;
|
||||
|
||||
te = aux_sdb_trailer(aux, alert_index);
|
||||
/* READ_ONCE() 16 byte header */
|
||||
prev.val = __cdsg(&te->header.val, 0, 0);
|
||||
do {
|
||||
orig_flags = te->flags;
|
||||
*overflow = orig_overflow = te->overflow;
|
||||
if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
|
||||
old.val = prev.val;
|
||||
new.val = prev.val;
|
||||
*overflow = old.overflow;
|
||||
if (old.f) {
|
||||
/*
|
||||
* SDB is already set by hardware.
|
||||
* Abort and try to set somewhere
|
||||
|
@ -1490,10 +1509,10 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
|
|||
*/
|
||||
return false;
|
||||
}
|
||||
new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK;
|
||||
} while (!cmpxchg_double(&te->flags, &te->overflow,
|
||||
orig_flags, orig_overflow,
|
||||
new_flags, 0ULL));
|
||||
new.a = 1;
|
||||
new.overflow = 0;
|
||||
prev.val = __cdsg(&te->header.val, old.val, new.val);
|
||||
} while (prev.val != old.val);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1522,8 +1541,9 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
|
|||
static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
|
||||
unsigned long long *overflow)
|
||||
{
|
||||
unsigned long long orig_overflow, orig_flags, new_flags;
|
||||
unsigned long i, range_scan, idx, idx_old;
|
||||
union hws_trailer_header old, prev, new;
|
||||
unsigned long long orig_overflow;
|
||||
struct hws_trailer_entry *te;
|
||||
|
||||
debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
|
||||
|
@ -1554,17 +1574,20 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
|
|||
idx_old = idx = aux->empty_mark + 1;
|
||||
for (i = 0; i < range_scan; i++, idx++) {
|
||||
te = aux_sdb_trailer(aux, idx);
|
||||
/* READ_ONCE() 16 byte header */
|
||||
prev.val = __cdsg(&te->header.val, 0, 0);
|
||||
do {
|
||||
orig_flags = te->flags;
|
||||
orig_overflow = te->overflow;
|
||||
new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK;
|
||||
old.val = prev.val;
|
||||
new.val = prev.val;
|
||||
orig_overflow = old.overflow;
|
||||
new.f = 0;
|
||||
new.overflow = 0;
|
||||
if (idx == aux->alert_mark)
|
||||
new_flags |= SDB_TE_ALERT_REQ_MASK;
|
||||
new.a = 1;
|
||||
else
|
||||
new_flags &= ~SDB_TE_ALERT_REQ_MASK;
|
||||
} while (!cmpxchg_double(&te->flags, &te->overflow,
|
||||
orig_flags, orig_overflow,
|
||||
new_flags, 0ULL));
|
||||
new.a = 0;
|
||||
prev.val = __cdsg(&te->header.val, old.val, new.val);
|
||||
} while (prev.val != old.val);
|
||||
*overflow += orig_overflow;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
/* Handle ro_after_init data on our own. */
|
||||
#define RO_AFTER_INIT_DATA
|
||||
|
||||
#define RUNTIME_DISCARD_EXIT
|
||||
|
||||
#define EMITS_PT_NOTE
|
||||
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
|
@ -79,6 +81,7 @@ SECTIONS
|
|||
_end_amode31_refs = .;
|
||||
}
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_edata = .; /* End of data section */
|
||||
|
||||
/* will be freed after init */
|
||||
|
@ -193,6 +196,7 @@ SECTIONS
|
|||
|
||||
BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE)
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_end = . ;
|
||||
|
||||
/*
|
||||
|
|
|
@ -83,8 +83,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
|
|||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
union esca_sigp_ctrl *sigp_ctrl =
|
||||
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
|
||||
union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
|
||||
union esca_sigp_ctrl new_val = {0}, old_val;
|
||||
|
||||
old_val = READ_ONCE(*sigp_ctrl);
|
||||
new_val.scn = src_id;
|
||||
new_val.c = 1;
|
||||
old_val.c = 0;
|
||||
|
@ -95,8 +96,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
|
|||
struct bsca_block *sca = vcpu->kvm->arch.sca;
|
||||
union bsca_sigp_ctrl *sigp_ctrl =
|
||||
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
|
||||
union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
|
||||
union bsca_sigp_ctrl new_val = {0}, old_val;
|
||||
|
||||
old_val = READ_ONCE(*sigp_ctrl);
|
||||
new_val.scn = src_id;
|
||||
new_val.c = 1;
|
||||
old_val.c = 0;
|
||||
|
@ -126,16 +128,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
|
|||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
union esca_sigp_ctrl *sigp_ctrl =
|
||||
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
|
||||
union esca_sigp_ctrl old = *sigp_ctrl;
|
||||
union esca_sigp_ctrl old;
|
||||
|
||||
old = READ_ONCE(*sigp_ctrl);
|
||||
expect = old.value;
|
||||
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
|
||||
} else {
|
||||
struct bsca_block *sca = vcpu->kvm->arch.sca;
|
||||
union bsca_sigp_ctrl *sigp_ctrl =
|
||||
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
|
||||
union bsca_sigp_ctrl old = *sigp_ctrl;
|
||||
union bsca_sigp_ctrl old;
|
||||
|
||||
old = READ_ONCE(*sigp_ctrl);
|
||||
expect = old.value;
|
||||
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#define pmd_ERROR(e) \
|
||||
printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
|
||||
typedef struct {
|
||||
typedef union {
|
||||
struct {
|
||||
unsigned long pmd_low;
|
||||
unsigned long pmd_high;
|
||||
|
|
|
@ -32,7 +32,7 @@ intcall:
|
|||
movw %dx, %si
|
||||
movw %sp, %di
|
||||
movw $11, %cx
|
||||
rep; movsd
|
||||
rep; movsl
|
||||
|
||||
/* Pop full state from the stack */
|
||||
popal
|
||||
|
@ -67,7 +67,7 @@ intcall:
|
|||
jz 4f
|
||||
movw %sp, %si
|
||||
movw $11, %cx
|
||||
rep; movsd
|
||||
rep; movsl
|
||||
4: addw $44, %sp
|
||||
|
||||
/* Restore state and return */
|
||||
|
|
|
@ -386,8 +386,8 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
|
|||
{
|
||||
unsigned long *reg, val, vaddr;
|
||||
char buffer[MAX_INSN_SIZE];
|
||||
enum insn_mmio_type mmio;
|
||||
struct insn insn = {};
|
||||
enum mmio_type mmio;
|
||||
int size, extend_size;
|
||||
u8 extend_val = 0;
|
||||
|
||||
|
@ -402,10 +402,10 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
|
|||
return -EINVAL;
|
||||
|
||||
mmio = insn_decode_mmio(&insn, &size);
|
||||
if (WARN_ON_ONCE(mmio == MMIO_DECODE_FAILED))
|
||||
if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
|
||||
return -EINVAL;
|
||||
|
||||
if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
|
||||
if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
|
||||
reg = insn_get_modrm_reg_ptr(&insn, regs);
|
||||
if (!reg)
|
||||
return -EINVAL;
|
||||
|
@ -426,23 +426,23 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
|
|||
|
||||
/* Handle writes first */
|
||||
switch (mmio) {
|
||||
case MMIO_WRITE:
|
||||
case INSN_MMIO_WRITE:
|
||||
memcpy(&val, reg, size);
|
||||
if (!mmio_write(size, ve->gpa, val))
|
||||
return -EIO;
|
||||
return insn.length;
|
||||
case MMIO_WRITE_IMM:
|
||||
case INSN_MMIO_WRITE_IMM:
|
||||
val = insn.immediate.value;
|
||||
if (!mmio_write(size, ve->gpa, val))
|
||||
return -EIO;
|
||||
return insn.length;
|
||||
case MMIO_READ:
|
||||
case MMIO_READ_ZERO_EXTEND:
|
||||
case MMIO_READ_SIGN_EXTEND:
|
||||
case INSN_MMIO_READ:
|
||||
case INSN_MMIO_READ_ZERO_EXTEND:
|
||||
case INSN_MMIO_READ_SIGN_EXTEND:
|
||||
/* Reads are handled below */
|
||||
break;
|
||||
case MMIO_MOVS:
|
||||
case MMIO_DECODE_FAILED:
|
||||
case INSN_MMIO_MOVS:
|
||||
case INSN_MMIO_DECODE_FAILED:
|
||||
/*
|
||||
* MMIO was accessed with an instruction that could not be
|
||||
* decoded or handled properly. It was likely not using io.h
|
||||
|
@ -459,15 +459,15 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
|
|||
return -EIO;
|
||||
|
||||
switch (mmio) {
|
||||
case MMIO_READ:
|
||||
case INSN_MMIO_READ:
|
||||
/* Zero-extend for 32-bit operation */
|
||||
extend_size = size == 4 ? sizeof(*reg) : 0;
|
||||
break;
|
||||
case MMIO_READ_ZERO_EXTEND:
|
||||
case INSN_MMIO_READ_ZERO_EXTEND:
|
||||
/* Zero extend based on operand size */
|
||||
extend_size = insn.opnd_bytes;
|
||||
break;
|
||||
case MMIO_READ_SIGN_EXTEND:
|
||||
case INSN_MMIO_READ_SIGN_EXTEND:
|
||||
/* Sign extend based on operand size */
|
||||
extend_size = insn.opnd_bytes;
|
||||
if (size == 1 && val & BIT(7))
|
||||
|
|
|
@ -1387,7 +1387,7 @@ static int __init amd_core_pmu_init(void)
|
|||
* numbered counter following it.
|
||||
*/
|
||||
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
|
||||
even_ctr_mask |= 1 << i;
|
||||
even_ctr_mask |= BIT_ULL(i);
|
||||
|
||||
pair_constraint = (struct event_constraint)
|
||||
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
* MSR_CORE_C1_RES: CORE C1 Residency Counter
|
||||
* perf code: 0x00
|
||||
* Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
|
||||
* MTL
|
||||
* Scope: Core (each processor core has a MSR)
|
||||
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
|
||||
* perf code: 0x01
|
||||
|
@ -51,50 +52,50 @@
|
|||
* perf code: 0x02
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
|
||||
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
|
||||
* TGL,TNT,RKL,ADL,RPL,SPR
|
||||
* TGL,TNT,RKL,ADL,RPL,SPR,MTL
|
||||
* Scope: Core
|
||||
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
|
||||
* perf code: 0x03
|
||||
* Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
|
||||
* ICL,TGL,RKL,ADL,RPL
|
||||
* ICL,TGL,RKL,ADL,RPL,MTL
|
||||
* Scope: Core
|
||||
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
|
||||
* perf code: 0x00
|
||||
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
|
||||
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
|
||||
* RPL,SPR
|
||||
* RPL,SPR,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
|
||||
* perf code: 0x01
|
||||
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
|
||||
* GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
|
||||
* ADL,RPL
|
||||
* ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
|
||||
* perf code: 0x02
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
|
||||
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
|
||||
* TGL,TNT,RKL,ADL,RPL,SPR
|
||||
* TGL,TNT,RKL,ADL,RPL,SPR,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
|
||||
* perf code: 0x03
|
||||
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
|
||||
* KBL,CML,ICL,TGL,RKL,ADL,RPL
|
||||
* KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
|
||||
* perf code: 0x04
|
||||
* Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
|
||||
* ADL,RPL
|
||||
* ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
|
||||
* perf code: 0x05
|
||||
* Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
|
||||
* ADL,RPL
|
||||
* ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
|
||||
* perf code: 0x06
|
||||
* Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
|
||||
* TNT,RKL,ADL,RPL
|
||||
* TNT,RKL,ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
*
|
||||
*/
|
||||
|
@ -686,6 +687,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
|||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &adl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &adl_cstates),
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
|
||||
|
|
|
@ -1833,6 +1833,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
|||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
|
||||
{},
|
||||
};
|
||||
|
|
|
@ -69,6 +69,7 @@ static bool test_intel(int idx, void *data)
|
|||
case INTEL_FAM6_BROADWELL_G:
|
||||
case INTEL_FAM6_BROADWELL_X:
|
||||
case INTEL_FAM6_SAPPHIRERAPIDS_X:
|
||||
case INTEL_FAM6_EMERALDRAPIDS_X:
|
||||
|
||||
case INTEL_FAM6_ATOM_SILVERMONT:
|
||||
case INTEL_FAM6_ATOM_SILVERMONT_D:
|
||||
|
@ -107,6 +108,8 @@ static bool test_intel(int idx, void *data)
|
|||
case INTEL_FAM6_RAPTORLAKE:
|
||||
case INTEL_FAM6_RAPTORLAKE_P:
|
||||
case INTEL_FAM6_RAPTORLAKE_S:
|
||||
case INTEL_FAM6_METEORLAKE:
|
||||
case INTEL_FAM6_METEORLAKE_L:
|
||||
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
|
||||
return true;
|
||||
break;
|
||||
|
|
|
@ -800,13 +800,18 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
|
|||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &model_spr),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &model_skl),
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
|
||||
|
|
|
@ -32,16 +32,16 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs,
|
|||
bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
|
||||
unsigned char buf[MAX_INSN_SIZE], int buf_size);
|
||||
|
||||
enum mmio_type {
|
||||
MMIO_DECODE_FAILED,
|
||||
MMIO_WRITE,
|
||||
MMIO_WRITE_IMM,
|
||||
MMIO_READ,
|
||||
MMIO_READ_ZERO_EXTEND,
|
||||
MMIO_READ_SIGN_EXTEND,
|
||||
MMIO_MOVS,
|
||||
enum insn_mmio_type {
|
||||
INSN_MMIO_DECODE_FAILED,
|
||||
INSN_MMIO_WRITE,
|
||||
INSN_MMIO_WRITE_IMM,
|
||||
INSN_MMIO_READ,
|
||||
INSN_MMIO_READ_ZERO_EXTEND,
|
||||
INSN_MMIO_READ_SIGN_EXTEND,
|
||||
INSN_MMIO_MOVS,
|
||||
};
|
||||
|
||||
enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes);
|
||||
enum insn_mmio_type insn_decode_mmio(struct insn *insn, int *bytes);
|
||||
|
||||
#endif /* _ASM_X86_INSN_EVAL_H */
|
||||
|
|
|
@ -1111,6 +1111,7 @@ struct msr_bitmap_range {
|
|||
|
||||
/* Xen emulation context */
|
||||
struct kvm_xen {
|
||||
struct mutex xen_lock;
|
||||
u32 xen_version;
|
||||
bool long_mode;
|
||||
bool runstate_update_flag;
|
||||
|
|
|
@ -119,7 +119,7 @@ static bool is_coretext(const struct core_text *ct, void *addr)
|
|||
return within_module_coretext(addr);
|
||||
}
|
||||
|
||||
static __init_or_module bool skip_addr(void *dest)
|
||||
static bool skip_addr(void *dest)
|
||||
{
|
||||
if (dest == error_entry)
|
||||
return true;
|
||||
|
@ -181,7 +181,7 @@ static const u8 nops[] = {
|
|||
0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
|
||||
};
|
||||
|
||||
static __init_or_module void *patch_dest(void *dest, bool direct)
|
||||
static void *patch_dest(void *dest, bool direct)
|
||||
{
|
||||
unsigned int tsize = SKL_TMPL_SIZE;
|
||||
u8 *pad = dest - tsize;
|
||||
|
|
|
@ -1981,6 +1981,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
|||
if (ctrl == PR_SPEC_FORCE_DISABLE)
|
||||
task_set_spec_ib_force_disable(task);
|
||||
task_update_spec_tif(task);
|
||||
if (task == current)
|
||||
indirect_branch_prediction_barrier();
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
|
|
|
@ -146,6 +146,30 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
|
|||
return entry;
|
||||
}
|
||||
|
||||
static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
|
||||
{
|
||||
u64 msr_val;
|
||||
|
||||
/*
|
||||
* As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
|
||||
* with a valid event code for supported resource type and the bits
|
||||
* IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
|
||||
* IA32_QM_CTR.data (bits 61:0) reports the monitored data.
|
||||
* IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
|
||||
* are error bits.
|
||||
*/
|
||||
wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
|
||||
rdmsrl(MSR_IA32_QM_CTR, msr_val);
|
||||
|
||||
if (msr_val & RMID_VAL_ERROR)
|
||||
return -EIO;
|
||||
if (msr_val & RMID_VAL_UNAVAIL)
|
||||
return -EINVAL;
|
||||
|
||||
*val = msr_val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
|
||||
u32 rmid,
|
||||
enum resctrl_event_id eventid)
|
||||
|
@ -172,8 +196,12 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
|
|||
struct arch_mbm_state *am;
|
||||
|
||||
am = get_arch_mbm_state(hw_dom, rmid, eventid);
|
||||
if (am)
|
||||
if (am) {
|
||||
memset(am, 0, sizeof(*am));
|
||||
|
||||
/* Record any initial, non-zero count value. */
|
||||
__rmid_read(rmid, eventid, &am->prev_msr);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
|
||||
|
@ -191,25 +219,14 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
|
|||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct arch_mbm_state *am;
|
||||
u64 msr_val, chunks;
|
||||
int ret;
|
||||
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
|
||||
* with a valid event code for supported resource type and the bits
|
||||
* IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
|
||||
* IA32_QM_CTR.data (bits 61:0) reports the monitored data.
|
||||
* IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
|
||||
* are error bits.
|
||||
*/
|
||||
wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
|
||||
rdmsrl(MSR_IA32_QM_CTR, msr_val);
|
||||
|
||||
if (msr_val & RMID_VAL_ERROR)
|
||||
return -EIO;
|
||||
if (msr_val & RMID_VAL_UNAVAIL)
|
||||
return -EINVAL;
|
||||
ret = __rmid_read(rmid, eventid, &msr_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
am = get_arch_mbm_state(hw_dom, rmid, eventid);
|
||||
if (am) {
|
||||
|
|
|
@ -580,8 +580,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
|
|||
/*
|
||||
* Ensure the task's closid and rmid are written before determining if
|
||||
* the task is current that will decide if it will be interrupted.
|
||||
* This pairs with the full barrier between the rq->curr update and
|
||||
* resctrl_sched_in() during context switch.
|
||||
*/
|
||||
barrier();
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* By now, the task's closid and rmid are set. If the task is current
|
||||
|
@ -2401,6 +2403,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
|
|||
WRITE_ONCE(t->closid, to->closid);
|
||||
WRITE_ONCE(t->rmid, to->mon.rmid);
|
||||
|
||||
/*
|
||||
* Order the closid/rmid stores above before the loads
|
||||
* in task_curr(). This pairs with the full barrier
|
||||
* between the rq->curr update and resctrl_sched_in()
|
||||
* during context switch.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* If the task is on a CPU, set the CPU in the mask.
|
||||
* The detection is inaccurate as tasks might move or
|
||||
|
|
|
@ -401,10 +401,8 @@ int crash_load_segments(struct kimage *image)
|
|||
kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret) {
|
||||
vfree((void *)image->elf_headers);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
image->elf_load_addr = kbuf.mem;
|
||||
pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
||||
image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/extable.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/moduleloader.h>
|
||||
|
@ -281,12 +282,15 @@ static int can_probe(unsigned long paddr)
|
|||
if (ret < 0)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
/*
|
||||
* Another debugging subsystem might insert this breakpoint.
|
||||
* In that case, we can't recover it.
|
||||
* If there is a dynamically installed kgdb sw breakpoint,
|
||||
* this function should not be probed.
|
||||
*/
|
||||
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
|
||||
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
|
||||
kgdb_has_hit_break(addr))
|
||||
return 0;
|
||||
#endif
|
||||
addr += insn.length;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/extable.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/objtool.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
@ -279,19 +280,6 @@ static int insn_is_indirect_jump(struct insn *insn)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
|
||||
{
|
||||
unsigned char ops;
|
||||
|
||||
for (; addr < eaddr; addr++) {
|
||||
if (get_kernel_nofault(ops, (void *)addr) < 0 ||
|
||||
ops != INT3_INSN_OPCODE)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Decode whole function to ensure any instructions don't jump into target */
|
||||
static int can_optimize(unsigned long paddr)
|
||||
{
|
||||
|
@ -334,15 +322,15 @@ static int can_optimize(unsigned long paddr)
|
|||
ret = insn_decode_kernel(&insn, (void *)recovered_insn);
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
/*
|
||||
* In the case of detecting unknown breakpoint, this could be
|
||||
* a padding INT3 between functions. Let's check that all the
|
||||
* rest of the bytes are also INT3.
|
||||
* If there is a dynamically installed kgdb sw breakpoint,
|
||||
* this function should not be probed.
|
||||
*/
|
||||
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
|
||||
return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
|
||||
|
||||
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
|
||||
kgdb_has_hit_break(addr))
|
||||
return 0;
|
||||
#endif
|
||||
/* Recover address */
|
||||
insn.kaddr = (void *)addr;
|
||||
insn.next_byte = (void *)(addr + insn.length);
|
||||
|
|
|
@ -1536,32 +1536,32 @@ static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
|
|||
static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
||||
{
|
||||
struct insn *insn = &ctxt->insn;
|
||||
enum insn_mmio_type mmio;
|
||||
unsigned int bytes = 0;
|
||||
enum mmio_type mmio;
|
||||
enum es_result ret;
|
||||
u8 sign_byte;
|
||||
long *reg_data;
|
||||
|
||||
mmio = insn_decode_mmio(insn, &bytes);
|
||||
if (mmio == MMIO_DECODE_FAILED)
|
||||
if (mmio == INSN_MMIO_DECODE_FAILED)
|
||||
return ES_DECODE_FAILED;
|
||||
|
||||
if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
|
||||
if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
|
||||
reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
|
||||
if (!reg_data)
|
||||
return ES_DECODE_FAILED;
|
||||
}
|
||||
|
||||
switch (mmio) {
|
||||
case MMIO_WRITE:
|
||||
case INSN_MMIO_WRITE:
|
||||
memcpy(ghcb->shared_buffer, reg_data, bytes);
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, false);
|
||||
break;
|
||||
case MMIO_WRITE_IMM:
|
||||
case INSN_MMIO_WRITE_IMM:
|
||||
memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, false);
|
||||
break;
|
||||
case MMIO_READ:
|
||||
case INSN_MMIO_READ:
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, true);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -1572,7 +1572,7 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
|||
|
||||
memcpy(reg_data, ghcb->shared_buffer, bytes);
|
||||
break;
|
||||
case MMIO_READ_ZERO_EXTEND:
|
||||
case INSN_MMIO_READ_ZERO_EXTEND:
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, true);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -1581,7 +1581,7 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
|||
memset(reg_data, 0, insn->opnd_bytes);
|
||||
memcpy(reg_data, ghcb->shared_buffer, bytes);
|
||||
break;
|
||||
case MMIO_READ_SIGN_EXTEND:
|
||||
case INSN_MMIO_READ_SIGN_EXTEND:
|
||||
ret = vc_do_mmio(ghcb, ctxt, bytes, true);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -1600,7 +1600,7 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
|||
memset(reg_data, sign_byte, insn->opnd_bytes);
|
||||
memcpy(reg_data, ghcb->shared_buffer, bytes);
|
||||
break;
|
||||
case MMIO_MOVS:
|
||||
case INSN_MMIO_MOVS:
|
||||
ret = vc_handle_mmio_movs(ctxt, bytes);
|
||||
break;
|
||||
default:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue