Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

This commit is contained in:
David S. Miller 2020-08-23 11:48:27 -07:00
commit 7611cbb900
299 changed files with 3448 additions and 1807 deletions

View File

@ -32,6 +32,7 @@ Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andi Kleen <ak@linux.intel.com> <ak@suse.de>
Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
Andreas Herrmann <aherrman@de.ibm.com>
Andrew Morton <akpm@linux-foundation.org>
@ -132,6 +133,7 @@ Jan Glauber <jan.glauber@gmail.com> <jang@de.ibm.com>
Jan Glauber <jan.glauber@gmail.com> <jang@linux.vnet.ibm.com>
Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
@ -178,6 +180,7 @@ Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
Leon Romanovsky <leon@kernel.org> <leonro@nvidia.com>
Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>

View File

@ -43,7 +43,7 @@ Description: read only
This sysfs interface exposes the number of cores per chip
present in the system.
What: /sys/devices/hv_24x7/interface/cpumask
What: /sys/devices/hv_24x7/cpumask
Date: July 2020
Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description: read only

View File

@ -489,6 +489,9 @@ Files in /sys/fs/ext4/<devname>:
multiple of this tuning parameter if the stripe size is not set in the
ext4 superblock
mb_max_inode_prealloc
The maximum length of per-inode ext4_prealloc_space list.
mb_max_to_scan
The maximum number of extents the multiblock allocator will search to
find the best extent.
@ -529,21 +532,21 @@ Files in /sys/fs/ext4/<devname>:
Ioctls
======
There is some Ext4 specific functionality which can be accessed by applications
through the system call interfaces. The list of all Ext4 specific ioctls are
shown in the table below.
Ext4 implements various ioctls which can be used by applications to access
ext4-specific functionality. An incomplete list of these ioctls is shown in the
table below. This list includes truly ext4-specific ioctls (``EXT4_IOC_*``) as
well as ioctls that may have been ext4-specific originally but are now supported
by some other filesystem(s) too (``FS_IOC_*``).
Table of Ext4 specific ioctls
Table of Ext4 ioctls
EXT4_IOC_GETFLAGS
FS_IOC_GETFLAGS
Get additional attributes associated with inode. The ioctl argument is
an integer bitfield, with bit values described in ext4.h. This ioctl is
an alias for FS_IOC_GETFLAGS.
an integer bitfield, with bit values described in ext4.h.
EXT4_IOC_SETFLAGS
FS_IOC_SETFLAGS
Set additional attributes associated with inode. The ioctl argument is
an integer bitfield, with bit values described in ext4.h. This ioctl is
an alias for FS_IOC_SETFLAGS.
an integer bitfield, with bit values described in ext4.h.
EXT4_IOC_GETVERSION, EXT4_IOC_GETVERSION_OLD
Get the inode i_generation number stored for each inode. The

View File

@ -1233,8 +1233,7 @@
efi= [EFI]
Format: { "debug", "disable_early_pci_dma",
"nochunk", "noruntime", "nosoftreserve",
"novamap", "no_disable_early_pci_dma",
"old_map" }
"novamap", "no_disable_early_pci_dma" }
debug: enable misc debug output.
disable_early_pci_dma: disable the busmaster bit on all
PCI bridges while in the EFI boot stub.
@ -1251,8 +1250,6 @@
novamap: do not call SetVirtualAddressMap().
no_disable_early_pci_dma: Leave the busmaster bit set
on all PCI bridges while in the EFI boot stub
old_map [X86-64]: switch to the old ioremap-based EFI
runtime services mapping. [Needs CONFIG_X86_UV=y]
efi_no_storage_paranoia [EFI; X86]
Using this parameter you can use more than 50% of

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Clock bindings for Freescale i.MX23
maintainers:
- Shawn Guo <shawn.guo@linaro.org>
- Shawn Guo <shawnguo@kernel.org>
description: |
The clock consumer should specify the desired clock by having the clock

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Clock bindings for Freescale i.MX28
maintainers:
- Shawn Guo <shawn.guo@linaro.org>
- Shawn Guo <shawnguo@kernel.org>
description: |
The clock consumer should specify the desired clock by having the clock

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale MXS GPIO controller
maintainers:
- Shawn Guo <shawn.guo@linaro.org>
- Shawn Guo <shawnguo@kernel.org>
- Anson Huang <Anson.Huang@nxp.com>
description: |

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale MXS Inter IC (I2C) Controller
maintainers:
- Shawn Guo <shawn.guo@linaro.org>
- Shawn Guo <shawnguo@kernel.org>
properties:
compatible:

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale Enhanced Secure Digital Host Controller (eSDHC) for i.MX
maintainers:
- Shawn Guo <shawn.guo@linaro.org>
- Shawn Guo <shawnguo@kernel.org>
allOf:
- $ref: "mmc-controller.yaml"

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale MXS MMC controller
maintainers:
- Shawn Guo <shawn.guo@linaro.org>
- Shawn Guo <shawnguo@kernel.org>
description: |
The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller

View File

@ -54,7 +54,8 @@ properties:
phy-connection-type:
description:
Operation mode of the PHY interface
Specifies interface type between the Ethernet device and a physical
layer (PHY) device.
enum:
# There is not a standard bus between the MAC and the PHY,
# something proprietary is being used to embed the PHY in the

View File

@ -59,9 +59,15 @@ properties:
clocks:
maxItems: 1
pinctrl-0: true
power-domains:
maxItems: 1
pinctrl-names: true
resets:
maxItems: 1
phy-mode: true
phy-handle: true
renesas,no-ether-link:
type: boolean
@ -74,6 +80,11 @@ properties:
specify when the Ether LINK signal is active-low instead of normal
active-high
patternProperties:
"^ethernet-phy@[0-9a-f]$":
type: object
$ref: ethernet-phy.yaml#
required:
- compatible
- reg
@ -83,7 +94,8 @@ required:
- '#address-cells'
- '#size-cells'
- clocks
- pinctrl-0
additionalProperties: false
examples:
# Lager board
@ -99,8 +111,6 @@ examples:
clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
phy-mode = "rmii";
phy-handle = <&phy1>;
pinctrl-0 = <&ether_pins>;
pinctrl-names = "default";
renesas,ether-link-active-low;
#address-cells = <1>;
#size-cells = <0>;
@ -109,7 +119,5 @@ examples:
reg = <1>;
interrupt-parent = <&irqc0>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&phy1_pins>;
pinctrl-names = "default";
};
};

View File

@ -9,6 +9,14 @@ title: PCIe RC controller on Intel Gateway SoCs
maintainers:
- Dilip Kota <eswara.kota@linux.intel.com>
select:
properties:
compatible:
contains:
const: intel,lgm-pcie
required:
- compatible
properties:
compatible:
items:

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale MXS PWM controller
maintainers:
- Shawn Guo <shawn.guo@linaro.org>
- Shawn Guo <shawnguo@kernel.org>
- Anson Huang <anson.huang@nxp.com>
properties:

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale (Enhanced) Configurable Serial Peripheral Interface (CSPI/eCSPI) for i.MX
maintainers:
- Shawn Guo <shawn.guo@linaro.org>
- Shawn Guo <shawnguo@kernel.org>
allOf:
- $ref: "/schemas/spi/spi-controller.yaml#"

View File

@ -39,6 +39,7 @@ properties:
spi common code does not support use of CS signals discontinuously.
i.MX8DXL-EVK board only uses CS1 without using CS0. Therefore, add
this property to re-config the chipselect value in the LPSPI driver.
type: boolean
required:
- compatible

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP i.MX Thermal Binding
maintainers:
- Shawn Guo <shawn.guo@linaro.org>
- Shawn Guo <shawnguo@kernel.org>
- Anson Huang <Anson.Huang@nxp.com>
properties:

View File

@ -0,0 +1,60 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/timer/sifive,clint.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: SiFive Core Local Interruptor
maintainers:
- Palmer Dabbelt <palmer@dabbelt.com>
- Anup Patel <anup.patel@wdc.com>
description:
SiFive (and other RISC-V) SOCs include an implementation of the SiFive
Core Local Interruptor (CLINT) for M-mode timer and M-mode inter-processor
interrupts. It directly connects to the timer and inter-processor interrupt
lines of various HARTs (or CPUs) so RISC-V per-HART (or per-CPU) local
interrupt controller is the parent interrupt controller for CLINT device.
The clock frequency of CLINT is specified via "timebase-frequency" DT
property of "/cpus" DT node. The "timebase-frequency" DT property is
described in Documentation/devicetree/bindings/riscv/cpus.yaml
properties:
compatible:
items:
- const: sifive,fu540-c000-clint
- const: sifive,clint0
description:
Should be "sifive,<chip>-clint" and "sifive,clint<version>".
Supported compatible strings are -
"sifive,fu540-c000-clint" for the SiFive CLINT v0 as integrated
onto the SiFive FU540 chip, and "sifive,clint0" for the SiFive
CLINT v0 IP block with no chip integration tweaks.
Please refer to sifive-blocks-ip-versioning.txt for details
reg:
maxItems: 1
interrupts-extended:
minItems: 1
additionalProperties: false
required:
- compatible
- reg
- interrupts-extended
examples:
- |
timer@2000000 {
compatible = "sifive,fu540-c000-clint", "sifive,clint0";
interrupts-extended = <&cpu1intc 3 &cpu1intc 7
&cpu2intc 3 &cpu2intc 7
&cpu3intc 3 &cpu3intc 7
&cpu4intc 3 &cpu4intc 7>;
reg = <0x2000000 0x10000>;
};
...

View File

@ -993,7 +993,7 @@ patternProperties:
"^sst,.*":
description: Silicon Storage Technology, Inc.
"^sstar,.*":
description: Xiamen Xingchen(SigmaStar) Technology Co., Ltd.
description: Xiamen Xingchen(SigmaStar) Technology Co., Ltd.
(formerly part of MStar Semiconductor, Inc.)
"^st,.*":
description: STMicroelectronics

View File

@ -5,7 +5,7 @@ Writing DeviceTree Bindings in json-schema
Devicetree bindings are written using json-schema vocabulary. Schema files are
written in a JSON compatible subset of YAML. YAML is used instead of JSON as it
considered more human readable and has some advantages such as allowing
is considered more human readable and has some advantages such as allowing
comments (Prefixed with '#').
Schema Contents
@ -19,7 +19,7 @@ $id
A json-schema unique identifier string. The string must be a valid
URI typically containing the binding's filename and path. For DT schema, it must
begin with "http://devicetree.org/schemas/". The URL is used in constructing
references to other files specified in schema "$ref" properties. A $ref values
references to other files specified in schema "$ref" properties. A $ref value
with a leading '/' will have the hostname prepended. A $ref value a relative
path or filename only will be prepended with the hostname and path components
of the current schema file's '$id' value. A URL is used even for local files,

View File

@ -39,6 +39,6 @@ entry.
Other References
----------------
Also see http://www.nongnu.org/ext2-doc/ for quite a collection of
Also see https://www.nongnu.org/ext2-doc/ for quite a collection of
information about ext2/3. Here's another old reference:
http://wiki.osdev.org/Ext2

View File

@ -5059,7 +5059,7 @@ F: include/linux/dm-*.h
F: include/uapi/linux/dm-*.h
DEVLINK
M: Jiri Pirko <jiri@mellanox.com>
M: Jiri Pirko <jiri@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/devlink
@ -6090,7 +6090,7 @@ F: include/linux/dynamic_debug.h
F: lib/dynamic_debug.c
DYNAMIC INTERRUPT MODERATION
M: Tal Gilboa <talgi@mellanox.com>
M: Tal Gilboa <talgi@nvidia.com>
S: Maintained
F: Documentation/networking/net_dim.rst
F: include/linux/dim.h
@ -6170,7 +6170,7 @@ F: Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
F: drivers/edac/aspeed_edac.c
EDAC-BLUEFIELD
M: Shravan Kumar Ramani <sramani@mellanox.com>
M: Shravan Kumar Ramani <sramani@nvidia.com>
S: Supported
F: drivers/edac/bluefield_edac.c
@ -6492,8 +6492,8 @@ S: Odd Fixes
F: drivers/net/ethernet/agere/
ETHERNET BRIDGE
M: Roopa Prabhu <roopa@cumulusnetworks.com>
M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
M: Roopa Prabhu <roopa@nvidia.com>
M: Nikolay Aleksandrov <nikolay@nvidia.com>
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Maintained
@ -6608,7 +6608,7 @@ F: drivers/iommu/exynos-iommu.c
EZchip NPS platform support
M: Vineet Gupta <vgupta@synopsys.com>
M: Ofer Levi <oferle@mellanox.com>
M: Ofer Levi <oferle@nvidia.com>
S: Supported
F: arch/arc/boot/dts/eznps.dts
F: arch/arc/plat-eznps
@ -8572,7 +8572,7 @@ F: drivers/iio/pressure/dps310.c
INFINIBAND SUBSYSTEM
M: Doug Ledford <dledford@redhat.com>
M: Jason Gunthorpe <jgg@mellanox.com>
M: Jason Gunthorpe <jgg@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: https://github.com/linux-rdma/rdma-core
@ -9235,7 +9235,7 @@ F: drivers/firmware/iscsi_ibft*
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
M: Sagi Grimberg <sagi@grimberg.me>
M: Max Gurtovoy <maxg@mellanox.com>
M: Max Gurtovoy <maxg@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.openfabrics.org
@ -11081,7 +11081,7 @@ F: Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
F: drivers/input/touchscreen/melfas_mip4.c
MELLANOX ETHERNET DRIVER (mlx4_en)
M: Tariq Toukan <tariqt@mellanox.com>
M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@ -11089,7 +11089,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlx4/en_*
MELLANOX ETHERNET DRIVER (mlx5e)
M: Saeed Mahameed <saeedm@mellanox.com>
M: Saeed Mahameed <saeedm@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@ -11097,7 +11097,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlx5/core/en_*
MELLANOX ETHERNET INNOVA DRIVERS
R: Boris Pismenny <borisp@mellanox.com>
R: Boris Pismenny <borisp@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@ -11108,8 +11108,8 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
F: include/linux/mlx5/mlx5_ifc_fpga.h
MELLANOX ETHERNET SWITCH DRIVERS
M: Jiri Pirko <jiri@mellanox.com>
M: Ido Schimmel <idosch@mellanox.com>
M: Jiri Pirko <jiri@nvidia.com>
M: Ido Schimmel <idosch@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@ -11118,7 +11118,7 @@ F: drivers/net/ethernet/mellanox/mlxsw/
F: tools/testing/selftests/drivers/net/mlxsw/
MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
M: mlxsw@mellanox.com
M: mlxsw@nvidia.com
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@ -11128,7 +11128,7 @@ F: drivers/net/ethernet/mellanox/mlxfw/
MELLANOX HARDWARE PLATFORM SUPPORT
M: Andy Shevchenko <andy@infradead.org>
M: Darren Hart <dvhart@infradead.org>
M: Vadim Pasternak <vadimp@mellanox.com>
M: Vadim Pasternak <vadimp@nvidia.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
F: Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
@ -11136,7 +11136,7 @@ F: drivers/platform/mellanox/
F: include/linux/platform_data/mlxreg.h
MELLANOX MLX4 core VPI driver
M: Tariq Toukan <tariqt@mellanox.com>
M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
S: Supported
@ -11146,7 +11146,7 @@ F: drivers/net/ethernet/mellanox/mlx4/
F: include/linux/mlx4/
MELLANOX MLX4 IB driver
M: Yishai Hadas <yishaih@mellanox.com>
M: Yishai Hadas <yishaih@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@ -11156,8 +11156,8 @@ F: include/linux/mlx4/
F: include/uapi/rdma/mlx4-abi.h
MELLANOX MLX5 core VPI driver
M: Saeed Mahameed <saeedm@mellanox.com>
M: Leon Romanovsky <leonro@mellanox.com>
M: Saeed Mahameed <saeedm@nvidia.com>
M: Leon Romanovsky <leonro@nvidia.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
S: Supported
@ -11168,7 +11168,7 @@ F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/
MELLANOX MLX5 IB driver
M: Leon Romanovsky <leonro@mellanox.com>
M: Leon Romanovsky <leonro@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@ -11178,8 +11178,8 @@ F: include/linux/mlx5/
F: include/uapi/rdma/mlx5-abi.h
MELLANOX MLXCPLD I2C AND MUX DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
M: Michael Shych <michaelsh@mellanox.com>
M: Vadim Pasternak <vadimp@nvidia.com>
M: Michael Shych <michaelsh@nvidia.com>
L: linux-i2c@vger.kernel.org
S: Supported
F: Documentation/i2c/busses/i2c-mlxcpld.rst
@ -11187,7 +11187,7 @@ F: drivers/i2c/busses/i2c-mlxcpld.c
F: drivers/i2c/muxes/i2c-mux-mlxcpld.c
MELLANOX MLXCPLD LED DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
M: Vadim Pasternak <vadimp@nvidia.com>
L: linux-leds@vger.kernel.org
S: Supported
F: Documentation/leds/leds-mlxcpld.rst
@ -11195,7 +11195,7 @@ F: drivers/leds/leds-mlxcpld.c
F: drivers/leds/leds-mlxreg.c
MELLANOX PLATFORM DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
M: Vadim Pasternak <vadimp@nvidia.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
F: drivers/platform/x86/mlx-platform.c
@ -12176,8 +12176,8 @@ F: net/ipv6/syncookies.c
F: net/ipv6/tcp*.c
NETWORKING [TLS]
M: Boris Pismenny <borisp@mellanox.com>
M: Aviad Yehezkel <aviadye@mellanox.com>
M: Boris Pismenny <borisp@nvidia.com>
M: Aviad Yehezkel <aviadye@nvidia.com>
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
M: Jakub Kicinski <kuba@kernel.org>
@ -12477,7 +12477,7 @@ S: Supported
F: drivers/nfc/nxp-nci
OBJAGG
M: Jiri Pirko <jiri@mellanox.com>
M: Jiri Pirko <jiri@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
F: include/linux/objagg.h
@ -13119,7 +13119,7 @@ F: drivers/video/logo/logo_parisc*
F: include/linux/hp_sdc.h
PARMAN
M: Jiri Pirko <jiri@mellanox.com>
M: Jiri Pirko <jiri@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
F: include/linux/parman.h
@ -16043,7 +16043,7 @@ F: drivers/infiniband/sw/siw/
F: include/uapi/rdma/siw-abi.h
SOFT-ROCE DRIVER (rxe)
M: Zhu Yanjun <yanjunz@mellanox.com>
M: Zhu Yanjun <yanjunz@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/sw/rxe/

View File

@ -265,8 +265,7 @@ no-dot-config-targets := $(clean-targets) \
$(version_h) headers headers_% archheaders archscripts \
%asm-generic kernelversion %src-pkg dt_binding_check \
outputmakefile
no-sync-config-targets := $(no-dot-config-targets) install %install \
kernelrelease
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
config-build :=
@ -292,7 +291,7 @@ ifneq ($(KBUILD_EXTMOD),)
endif
ifeq ($(KBUILD_EXTMOD),)
ifneq ($(filter config %config,$(MAKECMDGOALS)),)
ifneq ($(filter %config,$(MAKECMDGOALS)),)
config-build := 1
ifneq ($(words $(MAKECMDGOALS)),1)
mixed-build := 1

View File

@ -165,6 +165,7 @@ zinstall install:
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:

View File

@ -473,7 +473,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);

View File

@ -910,6 +910,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.desc = "ARM erratum 1418040",
.capability = ARM64_WORKAROUND_1418040,
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
.type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
},
#endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT

View File

@ -170,19 +170,6 @@ alternative_cb_end
stp x28, x29, [sp, #16 * 14]
.if \el == 0
.if \regsize == 32
/*
* If we're returning from a 32-bit task on a system affected by
* 1418040 then re-enable userspace access to the virtual counter.
*/
#ifdef CONFIG_ARM64_ERRATUM_1418040
alternative_if ARM64_WORKAROUND_1418040
mrs x0, cntkctl_el1
orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
msr cntkctl_el1, x0
alternative_else_nop_endif
#endif
.endif
clear_gp_regs
mrs x21, sp_el0
ldr_this_cpu tsk, __entry_task, x20
@ -294,14 +281,6 @@ alternative_else_nop_endif
tst x22, #PSR_MODE32_BIT // native task?
b.eq 3f
#ifdef CONFIG_ARM64_ERRATUM_1418040
alternative_if ARM64_WORKAROUND_1418040
mrs x0, cntkctl_el1
bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
msr cntkctl_el1, x0
alternative_else_nop_endif
#endif
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR

View File

@ -515,6 +515,39 @@ static void entry_task_switch(struct task_struct *next)
__this_cpu_write(__entry_task, next);
}
/*
* ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
* Assuming the virtual counter is enabled at the beginning of times:
*
* - disable access when switching from a 64bit task to a 32bit task
* - enable access when switching from a 32bit task to a 64bit task
*/
static void erratum_1418040_thread_switch(struct task_struct *prev,
struct task_struct *next)
{
bool prev32, next32;
u64 val;
if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
return;
prev32 = is_compat_thread(task_thread_info(prev));
next32 = is_compat_thread(task_thread_info(next));
if (prev32 == next32)
return;
val = read_sysreg(cntkctl_el1);
if (!next32)
val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
else
val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
write_sysreg(val, cntkctl_el1);
}
/*
* Thread switching.
*/
@ -530,6 +563,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
uao_thread_switch(next);
ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case

View File

@ -208,7 +208,7 @@ quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
# Install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
quiet_cmd_vdso_install = INSTALL32 $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
vdso.so: $(obj)/vdso.so.dbg

View File

@ -1640,6 +1640,10 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
"Only trusted guests should be used on this system.\n");
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
if (ret < 0) {

View File

@ -343,7 +343,8 @@ static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
* destroying the VM), otherwise another faulting VCPU may come in and mess
* with things behind our backs.
*/
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
bool may_block)
{
struct kvm *kvm = mmu->kvm;
pgd_t *pgd;
@ -369,11 +370,16 @@ static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
*/
if (next != end)
if (may_block && next != end)
cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
{
__unmap_stage2_range(mmu, start, size, true);
}
static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
@ -2208,18 +2214,21 @@ static int handle_hva_to_gpa(struct kvm *kvm,
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
unmap_stage2_range(&kvm->arch.mmu, gpa, size);
unsigned flags = *(unsigned *)data;
bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
__unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
return 0;
}
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end)
unsigned long start, unsigned long end, unsigned flags)
{
if (!kvm->arch.mmu.pgd)
return 0;
trace_kvm_unmap_hva_range(start, end);
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
return 0;
}

View File

@ -366,6 +366,15 @@ pgd_index (unsigned long address)
}
#define pgd_index pgd_index
/*
* In the kernel's mapped region we know everything is in region number 5, so
* as an optimisation its PGD already points to the area for that region.
* However, this also means that we cannot use pgd_index() and we must
* never add the region here.
*/
#define pgd_offset_k(addr) \
(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
/* Look up a pgd entry in the gate area. On IA-64, the gate-area
resides in the kernel-mapped segment, hence we use pgd_offset_k()
here. */

View File

@ -969,7 +969,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);

View File

@ -486,7 +486,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1;
}
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
unsigned flags)
{
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);

View File

@ -9,6 +9,11 @@
#ifndef __ASSEMBLY__
/*
* Added to include __machine_check_early_realmode_* functions
*/
#include <asm/mce.h>
/* This structure can grow, it's real size is used by head.S code
* via the mkdefs mechanism.
*/

View File

@ -52,7 +52,7 @@ enum fixed_addresses {
FIX_HOLE,
/* reserve the top 128K for early debugging purposes */
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1,
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,

View File

@ -15,11 +15,18 @@
#ifndef __ASSEMBLY__
#include <asm/page.h>
#include <linux/sizes.h>
#define KASAN_SHADOW_SCALE_SHIFT 3
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX)
#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
#else
#define KASAN_KERN_START PAGE_OFFSET
#endif
#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
(PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT))
(KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT))
#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)

View File

@ -58,7 +58,8 @@
#define KVM_ARCH_WANT_MMU_NOTIFIER
extern int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
unsigned long start, unsigned long end,
unsigned flags);
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);

View File

@ -210,6 +210,9 @@ struct mce_error_info {
#define MCE_EVENT_RELEASE true
#define MCE_EVENT_DONTRELEASE false
struct pt_regs;
struct notifier_block;
extern void save_mce_event(struct pt_regs *regs, long handled,
struct mce_error_info *mce_err, uint64_t nip,
uint64_t addr, uint64_t phys_addr);
@ -225,5 +228,9 @@ int mce_register_notifier(struct notifier_block *nb);
int mce_unregister_notifier(struct notifier_block *nb);
#ifdef CONFIG_PPC_BOOK3S_64
void flush_and_reload_slb(void);
long __machine_check_early_realmode_p7(struct pt_regs *regs);
long __machine_check_early_realmode_p8(struct pt_regs *regs);
long __machine_check_early_realmode_p9(struct pt_regs *regs);
long __machine_check_early_realmode_p10(struct pt_regs *regs);
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* __ASM_PPC64_MCE_H__ */

View File

@ -40,4 +40,7 @@ static inline bool is_sier_available(void) { return false; }
/* To support perf_regs sier update */
extern bool is_sier_available(void);
/* To define perf extended regs mask value */
extern u64 PERF_REG_EXTENDED_MASK;
#define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK
#endif

View File

@ -62,6 +62,11 @@ struct power_pmu {
int *blacklist_ev;
/* BHRB entries in the PMU */
int bhrb_nr;
/*
* set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if
* the pmu supports extended perf regs capability
*/
int capabilities;
};
/*

View File

@ -48,6 +48,24 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MMCRA,
PERF_REG_POWERPC_MAX,
/* Extended registers */
PERF_REG_POWERPC_MMCR0,
PERF_REG_POWERPC_MMCR1,
PERF_REG_POWERPC_MMCR2,
PERF_REG_POWERPC_MMCR3,
PERF_REG_POWERPC_SIER2,
PERF_REG_POWERPC_SIER3,
/* Max regs without the extended regs */
PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
};
#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */
#define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK)
/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */
#define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK)
#define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1)
#define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1)
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */

View File

@ -72,9 +72,6 @@ extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power9(void);
extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power10(void);
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@ -542,6 +539,25 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Power10 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00800000,
.cpu_name = "POWER10 (raw)",
.cpu_features = CPU_FTRS_POWER10,
.cpu_user_features = COMMON_USER_POWER10,
.cpu_user_features2 = COMMON_USER2_POWER10,
.mmu_features = MMU_FTRS_POWER10,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power10",
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power10,
.cpu_restore = __restore_cpu_power10,
.machine_check_early = __machine_check_early_realmode_p10,
.platform = "power10",
},
{ /* Cell Broadband Engine */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00700000,

View File

@ -64,10 +64,6 @@ struct dt_cpu_feature {
* Set up the base CPU
*/
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
extern long __machine_check_early_realmode_p10(struct pt_regs *regs);
static int hv_mode;
static struct {

View File

@ -311,6 +311,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
min = pvr & 0xFF;
break;
case 0x004e: /* POWER9 bits 12-15 give chip type */
case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
maj = (pvr >> 8) & 0x0F;
min = pvr & 0xFF;
break;

View File

@ -834,7 +834,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
}
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
unsigned flags)
{
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
}

View File

@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
return 0;
}
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
unsigned flags)
{
/* kvm_unmap_hva flushes everything anyways */
kvm_unmap_hva(kvm, start);

View File

@ -191,10 +191,17 @@ static bool is_module_segment(unsigned long addr)
{
if (!IS_ENABLED(CONFIG_MODULES))
return false;
#ifdef MODULES_VADDR
if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
return false;
if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
return false;
#else
if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M))
return false;
if (addr >= ALIGN(VMALLOC_END, SZ_256M))
if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1)
return false;
#endif
return true;
}

View File

@ -1115,8 +1115,10 @@ void hash__early_init_mmu_secondary(void)
&& cpu_has_feature(CPU_FTR_HVMODE))
tlbiel_all();
if (IS_ENABLED(CONFIG_PPC_MEM_KEYS) && mmu_has_feature(MMU_FTR_PKEY))
#ifdef CONFIG_PPC_MEM_KEYS
if (mmu_has_feature(MMU_FTR_PKEY))
mtspr(SPRN_UAMOR, default_uamor);
#endif
}
#endif /* CONFIG_SMP */

View File

@ -2141,6 +2141,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
} else if (period) {
/* Account for interrupt in case of invalid SIAR */
if (perf_event_account_interrupt(event))
power_pmu_stop(event, 0);
}
}
@ -2323,6 +2327,7 @@ int register_power_pmu(struct power_pmu *pmu)
pmu->name);
power_pmu.attr_groups = ppmu->attr_groups;
power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS);
#ifdef MSR_HV
/*

View File

@ -1128,6 +1128,15 @@ static struct bin_attribute *if_bin_attrs[] = {
NULL,
};
static struct attribute *cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static struct attribute_group cpumask_attr_group = {
.attrs = cpumask_attrs,
};
static struct attribute *if_attrs[] = {
&dev_attr_catalog_len.attr,
&dev_attr_catalog_version.attr,
@ -1135,7 +1144,6 @@ static struct attribute *if_attrs[] = {
&dev_attr_sockets.attr,
&dev_attr_chipspersocket.attr,
&dev_attr_coresperchip.attr,
&dev_attr_cpumask.attr,
NULL,
};
@ -1151,6 +1159,7 @@ static const struct attribute_group *attr_groups[] = {
&event_desc_group,
&event_long_desc_group,
&if_group,
&cpumask_attr_group,
NULL,
};

View File

@ -13,9 +13,11 @@
#include <asm/ptrace.h>
#include <asm/perf_regs.h>
u64 PERF_REG_EXTENDED_MASK;
#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
#define REG_RESERVED (~((1ULL << PERF_REG_POWERPC_MAX) - 1))
#define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK))
static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]),
@ -69,10 +71,36 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
/* Function to return the extended register values */
static u64 get_ext_regs_value(int idx)
{
switch (idx) {
case PERF_REG_POWERPC_MMCR0:
return mfspr(SPRN_MMCR0);
case PERF_REG_POWERPC_MMCR1:
return mfspr(SPRN_MMCR1);
case PERF_REG_POWERPC_MMCR2:
return mfspr(SPRN_MMCR2);
#ifdef CONFIG_PPC64
case PERF_REG_POWERPC_MMCR3:
return mfspr(SPRN_MMCR3);
case PERF_REG_POWERPC_SIER2:
return mfspr(SPRN_SIER2);
case PERF_REG_POWERPC_SIER3:
return mfspr(SPRN_SIER3);
#endif
default: return 0;
}
}
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX))
return 0;
u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX;
if (cpu_has_feature(CPU_FTR_ARCH_31))
perf_reg_extended_max = PERF_REG_MAX_ISA_31;
else if (cpu_has_feature(CPU_FTR_ARCH_300))
perf_reg_extended_max = PERF_REG_MAX_ISA_300;
if (idx == PERF_REG_POWERPC_SIER &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
@ -85,6 +113,16 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
IS_ENABLED(CONFIG_PPC32)))
return 0;
if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max)
return get_ext_regs_value(idx);
/*
* If the idx is referring to value beyond the
* supported registers, return 0 with a warning
*/
if (WARN_ON_ONCE(idx >= perf_reg_extended_max))
return 0;
return regs_get_register(regs, pt_regs_offset[idx]);
}

View File

@ -87,6 +87,8 @@
#define POWER10_MMCRA_IFM3 0x00000000C0000000UL
#define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL
extern u64 PERF_REG_EXTENDED_MASK;
/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
@ -397,6 +399,7 @@ static struct power_pmu power10_pmu = {
.cache_events = &power10_cache_events,
.attr_groups = power10_pmu_attr_groups,
.bhrb_nr = 32,
.capabilities = PERF_PMU_CAP_EXTENDED_REGS,
};
int init_power10_pmu(void)
@ -408,6 +411,9 @@ int init_power10_pmu(void)
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10"))
return -ENODEV;
/* Set the PERF_REG_EXTENDED_MASK here */
PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
rc = register_power_pmu(&power10_pmu);
if (rc)
return rc;

View File

@ -90,6 +90,8 @@ enum {
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
extern u64 PERF_REG_EXTENDED_MASK;
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
@ -434,6 +436,7 @@ static struct power_pmu power9_pmu = {
.cache_events = &power9_cache_events,
.attr_groups = power9_pmu_attr_groups,
.bhrb_nr = 32,
.capabilities = PERF_PMU_CAP_EXTENDED_REGS,
};
int init_power9_pmu(void)
@ -457,6 +460,9 @@ int init_power9_pmu(void)
}
}
/* Set the PERF_REG_EXTENDED_MASK here */
PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300;
rc = register_power_pmu(&power9_pmu);
if (rc)
return rc;

View File

@ -2705,7 +2705,7 @@ void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
struct iommu_table *tbl = pe->table_group.tables[0];
int64_t rc;
if (pe->dma_setup_done)
if (!pe->dma_setup_done)
return;
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);

View File

@ -107,22 +107,28 @@ static int pseries_cpu_disable(void)
*/
static void pseries_cpu_die(unsigned int cpu)
{
int tries;
int cpu_status = 1;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
unsigned long timeout = jiffies + msecs_to_jiffies(120000);
for (tries = 0; tries < 25; tries++) {
while (true) {
cpu_status = smp_query_cpu_stopped(pcpu);
if (cpu_status == QCSS_STOPPED ||
cpu_status == QCSS_HARDWARE_ERROR)
break;
cpu_relax();
if (time_after(jiffies, timeout)) {
pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
cpu, pcpu);
timeout = jiffies + msecs_to_jiffies(120000);
}
cond_resched();
}
if (cpu_status != 0) {
printk("Querying DEAD? cpu %i (%i) shows %i\n",
cpu, pcpu, cpu_status);
if (cpu_status == QCSS_HARDWARE_ERROR) {
pr_warn("CPU %i (hwid %i) reported error while dying\n",
cpu, pcpu);
}
/* Isolation and deallocation are definitely done by

View File

@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier)
case EPOW_SHUTDOWN_ON_UPS:
pr_emerg("Loss of system power detected. System is running on"
" UPS/battery. Check RTAS error log for details\n");
orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:

View File

@ -81,7 +81,7 @@ config RISCV
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
select RISCV_INTC
select RISCV_TIMER
select RISCV_TIMER if RISCV_SBI
select SPARSEMEM_STATIC if 32BIT
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE

View File

@ -12,6 +12,7 @@ config SOC_SIFIVE
config SOC_VIRT
bool "QEMU Virt Machine"
select CLINT_TIMER if RISCV_M_MODE
select POWER_RESET
select POWER_RESET_SYSCON
select POWER_RESET_SYSCON_POWEROFF
@ -24,6 +25,7 @@ config SOC_VIRT
config SOC_KENDRYTE
bool "Kendryte K210 SoC"
depends on !MMU
select CLINT_TIMER if RISCV_M_MODE
select SERIAL_SIFIVE if TTY
select SERIAL_SIFIVE_CONSOLE if TTY
select SIFIVE_PLIC

View File

@ -26,6 +26,7 @@ CONFIG_EXPERT=y
CONFIG_SLOB=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_MMU is not set
CONFIG_SOC_VIRT=y
CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
@ -49,7 +50,6 @@ CONFIG_VIRTIO_BLK=y
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_LDISC_AUTOLOAD is not set
# CONFIG_DEVMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
@ -57,16 +57,13 @@ CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_DEVMEM is not set
# CONFIG_HWMON is not set
# CONFIG_LCD_CLASS_DEVICE is not set
# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_SIFIVE_PLIC=y
# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set

View File

@ -14,6 +14,7 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
@ -62,6 +63,8 @@ CONFIG_HVC_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_SPI=y
CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_POWER_RESET=y
CONFIG_DRM=y
@ -77,6 +80,8 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_MMC=y
CONFIG_MMC_SPI=y
CONFIG_RTC_CLASS=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y

View File

@ -1,39 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_CLINT_H
#define _ASM_RISCV_CLINT_H 1
#include <linux/io.h>
#include <linux/smp.h>
#ifdef CONFIG_RISCV_M_MODE
extern u32 __iomem *clint_ipi_base;
void clint_init_boot_cpu(void);
static inline void clint_send_ipi_single(unsigned long hartid)
{
writel(1, clint_ipi_base + hartid);
}
static inline void clint_send_ipi_mask(const struct cpumask *mask)
{
int cpu;
for_each_cpu(cpu, mask)
clint_send_ipi_single(cpuid_to_hartid_map(cpu));
}
static inline void clint_clear_ipi(unsigned long hartid)
{
writel(0, clint_ipi_base + hartid);
}
#else /* CONFIG_RISCV_M_MODE */
#define clint_init_boot_cpu() do { } while (0)
/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
void clint_send_ipi_single(unsigned long hartid);
void clint_send_ipi_mask(const struct cpumask *hartid_mask);
void clint_clear_ipi(unsigned long hartid);
#endif /* CONFIG_RISCV_M_MODE */
#endif /* _ASM_RISCV_CLINT_H */

View File

@ -15,6 +15,11 @@
struct seq_file;
extern unsigned long boot_cpu_hartid;
struct riscv_ipi_ops {
void (*ipi_inject)(const struct cpumask *target);
void (*ipi_clear)(void);
};
#ifdef CONFIG_SMP
/*
* Mapping between linux logical cpu index and hartid.
@ -40,6 +45,12 @@ void arch_send_call_function_single_ipi(int cpu);
int riscv_hartid_to_cpuid(int hartid);
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
/* Set custom IPI operations */
void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
/* Clear IPI for current CPU */
void riscv_clear_ipi(void);
/* Secondary hart entry */
asmlinkage void smp_callin(void);
@ -81,6 +92,14 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
cpumask_set_cpu(boot_cpu_hartid, out);
}
static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
{
}
static inline void riscv_clear_ipi(void)
{
}
#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)

View File

@ -7,41 +7,27 @@
#define _ASM_RISCV_TIMEX_H
#include <asm/csr.h>
#include <asm/mmio.h>
typedef unsigned long cycles_t;
extern u64 __iomem *riscv_time_val;
extern u64 __iomem *riscv_time_cmp;
#ifdef CONFIG_64BIT
#define mmio_get_cycles() readq_relaxed(riscv_time_val)
#else
#define mmio_get_cycles() readl_relaxed(riscv_time_val)
#define mmio_get_cycles_hi() readl_relaxed(((u32 *)riscv_time_val) + 1)
#endif
static inline cycles_t get_cycles(void)
{
if (IS_ENABLED(CONFIG_RISCV_SBI))
return csr_read(CSR_TIME);
return mmio_get_cycles();
return csr_read(CSR_TIME);
}
#define get_cycles get_cycles
static inline u32 get_cycles_hi(void)
{
return csr_read(CSR_TIMEH);
}
#define get_cycles_hi get_cycles_hi
#ifdef CONFIG_64BIT
static inline u64 get_cycles64(void)
{
return get_cycles();
}
#else /* CONFIG_64BIT */
static inline u32 get_cycles_hi(void)
{
if (IS_ENABLED(CONFIG_RISCV_SBI))
return csr_read(CSR_TIMEH);
return mmio_get_cycles_hi();
}
static inline u64 get_cycles64(void)
{
u32 hi, lo;

View File

@ -31,7 +31,7 @@ obj-y += cacheinfo.o
obj-y += patch.o
obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_M_MODE) += clint.o traps_misaligned.o
obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o

View File

@ -1,44 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Christoph Hellwig.
*/
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/types.h>
#include <asm/clint.h>
#include <asm/csr.h>
#include <asm/timex.h>
#include <asm/smp.h>
/*
* This is the layout used by the SiFive clint, which is also shared by the qemu
* virt platform, and the Kendryte KD210 at least.
*/
#define CLINT_IPI_OFF 0
#define CLINT_TIME_CMP_OFF 0x4000
#define CLINT_TIME_VAL_OFF 0xbff8
u32 __iomem *clint_ipi_base;
void clint_init_boot_cpu(void)
{
struct device_node *np;
void __iomem *base;
np = of_find_compatible_node(NULL, NULL, "riscv,clint0");
if (!np) {
panic("clint not found");
return;
}
base = of_iomap(np, 0);
if (!base)
panic("could not map CLINT");
clint_ipi_base = base + CLINT_IPI_OFF;
riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
riscv_time_val = base + CLINT_TIME_VAL_OFF;
clint_clear_ipi(boot_cpu_hartid);
}

View File

@ -547,6 +547,18 @@ static inline long sbi_get_firmware_version(void)
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
}
static void sbi_send_cpumask_ipi(const struct cpumask *target)
{
struct cpumask hartid_mask;
riscv_cpuid_to_hartid_mask(target, &hartid_mask);
sbi_send_ipi(cpumask_bits(&hartid_mask));
}
static struct riscv_ipi_ops sbi_ipi_ops = {
.ipi_inject = sbi_send_cpumask_ipi
};
int __init sbi_init(void)
{
@ -587,5 +599,7 @@ int __init sbi_init(void)
__sbi_rfence = __sbi_rfence_v01;
}
riscv_set_ipi_ops(&sbi_ipi_ops);
return 0;
}

View File

@ -18,7 +18,6 @@
#include <linux/swiotlb.h>
#include <linux/smp.h>
#include <asm/clint.h>
#include <asm/cpu_ops.h>
#include <asm/setup.h>
#include <asm/sections.h>
@ -79,7 +78,6 @@ void __init setup_arch(char **cmdline_p)
#else
unflatten_device_tree();
#endif
clint_init_boot_cpu();
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);

View File

@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/irq_work.h>
#include <asm/clint.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@ -86,9 +85,25 @@ static void ipi_stop(void)
wait_for_interrupt();
}
static struct riscv_ipi_ops *ipi_ops;
void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
{
ipi_ops = ops;
}
EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
void riscv_clear_ipi(void)
{
if (ipi_ops && ipi_ops->ipi_clear)
ipi_ops->ipi_clear();
csr_clear(CSR_IP, IE_SIE);
}
EXPORT_SYMBOL_GPL(riscv_clear_ipi);
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{
struct cpumask hartid_mask;
int cpu;
smp_mb__before_atomic();
@ -96,33 +111,22 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_send_ipi(cpumask_bits(&hartid_mask));
if (ipi_ops && ipi_ops->ipi_inject)
ipi_ops->ipi_inject(mask);
else
clint_send_ipi_mask(mask);
pr_warn("SMP: IPI inject method not available\n");
}
static void send_ipi_single(int cpu, enum ipi_message_type op)
{
int hartid = cpuid_to_hartid_map(cpu);
smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
if (ipi_ops && ipi_ops->ipi_inject)
ipi_ops->ipi_inject(cpumask_of(cpu));
else
clint_send_ipi_single(hartid);
}
static inline void clear_ipi(void)
{
if (IS_ENABLED(CONFIG_RISCV_SBI))
csr_clear(CSR_IP, IE_SIE);
else
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
pr_warn("SMP: IPI inject method not available\n");
}
#ifdef CONFIG_IRQ_WORK
@ -140,7 +144,7 @@ void handle_IPI(struct pt_regs *regs)
irq_enter();
clear_ipi();
riscv_clear_ipi();
while (true) {
unsigned long ops;

View File

@ -24,7 +24,6 @@
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
#include <asm/clint.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
@ -147,8 +146,7 @@ asmlinkage __visible void smp_callin(void)
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
if (!IS_ENABLED(CONFIG_RISCV_SBI))
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
riscv_clear_ipi();
/* All kernel threads share the same mm context. */
mmgrab(mm);

View File

@ -1268,7 +1268,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
cb->pc == 1 &&
cb->qc == 0 &&
cb->reserved2 == 0 &&
cb->key == PAGE_DEFAULT_KEY &&
cb->reserved3 == 0 &&
cb->reserved4 == 0 &&
cb->reserved5 == 0 &&
@ -1330,7 +1329,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
kfree(data);
return -EINVAL;
}
/*
* Override access key in any case, since user space should
* not be able to set it, nor should it care about it.
*/
ri_cb.key = PAGE_DEFAULT_KEY >> 4;
preempt_disable();
if (!target->thread.ri_cb)
target->thread.ri_cb = data;

View File

@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
cb->k = 1;
cb->ps = 1;
cb->pc = 1;
cb->key = PAGE_DEFAULT_KEY;
cb->key = PAGE_DEFAULT_KEY >> 4;
cb->v = 1;
}

View File

@ -672,6 +672,19 @@ int zpci_disable_device(struct zpci_dev *zdev)
}
EXPORT_SYMBOL_GPL(zpci_disable_device);
void zpci_remove_device(struct zpci_dev *zdev)
{
struct zpci_bus *zbus = zdev->zbus;
struct pci_dev *pdev;
pdev = pci_get_slot(zbus->bus, zdev->devfn);
if (pdev) {
if (pdev->is_virtfn)
return zpci_remove_virtfn(pdev, zdev->vfn);
pci_stop_and_remove_bus_device_locked(pdev);
}
}
int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
@ -716,13 +729,8 @@ void zpci_release_device(struct kref *kref)
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
if (zdev->zbus->bus) {
struct pci_dev *pdev;
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
if (pdev)
pci_stop_and_remove_bus_device_locked(pdev);
}
if (zdev->zbus->bus)
zpci_remove_device(zdev);
switch (zdev->state) {
case ZPCI_FN_STATE_ONLINE:

View File

@ -132,13 +132,14 @@ static int zpci_bus_link_virtfn(struct pci_dev *pdev,
{
int rc;
virtfn->physfn = pci_dev_get(pdev);
rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
if (rc) {
pci_dev_put(pdev);
virtfn->physfn = NULL;
if (rc)
return rc;
}
virtfn->is_virtfn = 1;
virtfn->multifunction = 0;
virtfn->physfn = pci_dev_get(pdev);
return 0;
}
@ -151,9 +152,9 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
int rc = 0;
virtfn->is_virtfn = 1;
virtfn->multifunction = 0;
WARN_ON(vfid < 0);
if (!zbus->multifunction)
return 0;
/* If the parent PF for the given VF is also configured in the
* instance, it must be on the same zbus.
* We can then identify the parent PF by checking what
@ -165,11 +166,17 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
zdev = zbus->function[i];
if (zdev && zdev->is_physfn) {
pdev = pci_get_slot(zbus->bus, zdev->devfn);
if (!pdev)
continue;
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
if (cand_devfn == virtfn->devfn) {
rc = zpci_bus_link_virtfn(pdev, virtfn, vfid);
/* balance pci_get_slot() */
pci_dev_put(pdev);
break;
}
/* balance pci_get_slot() */
pci_dev_put(pdev);
}
}
return rc;
@ -178,12 +185,23 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
struct pci_dev *virtfn, int vfn)
{
virtfn->is_virtfn = 1;
virtfn->multifunction = 0;
return 0;
}
#endif
void pcibios_bus_add_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
/*
* With pdev->no_vf_scan the common PCI probing code does not
* perform PF/VF linking.
*/
if (zdev->vfn)
zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
}
static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
struct pci_bus *bus;
@ -214,20 +232,10 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
}
pdev = pci_scan_single_device(bus, zdev->devfn);
if (pdev) {
if (!zdev->is_physfn) {
rc = zpci_bus_setup_virtfn(zbus, pdev, zdev->vfn);
if (rc)
goto failed_with_pdev;
}
if (pdev)
pci_bus_add_device(pdev);
}
return 0;
failed_with_pdev:
pci_stop_and_remove_bus_device(pdev);
pci_dev_put(pdev);
return rc;
return 0;
}
static void zpci_bus_add_devices(struct zpci_bus *zbus)

View File

@ -29,3 +29,16 @@ static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
}
#ifdef CONFIG_PCI_IOV
static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn)
{
pci_lock_rescan_remove();
/* Linux' vfid's start at 0 vfn at 1 */
pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
pci_unlock_rescan_remove();
}
#else /* CONFIG_PCI_IOV */
static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) {}
#endif /* CONFIG_PCI_IOV */

View File

@ -92,6 +92,9 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
break;
}
/* the configuration request may be stale */
if (zdev->state != ZPCI_FN_STATE_STANDBY)
break;
zdev->fh = ccdf->fh;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
ret = zpci_enable_device(zdev);
@ -118,7 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
if (!zdev)
break;
if (pdev)
pci_stop_and_remove_bus_device_locked(pdev);
zpci_remove_device(zdev);
ret = zpci_disable_device(zdev);
if (ret)
@ -137,7 +140,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
/* Give the driver a hint that the function is
* already unusable. */
pdev->error_state = pci_channel_io_perm_failure;
pci_stop_and_remove_bus_device_locked(pdev);
zpci_remove_device(zdev);
}
zdev->state = ZPCI_FN_STATE_STANDBY;

View File

@ -30,12 +30,9 @@
#define STATIC static
/*
* Use normal definitions of mem*() from string.c. There are already
* included header files which expect a definition of memset() and by
* the time we define memset macro, it is too late.
* Provide definitions of memzero and memmove as some of the decompressors will
* try to define their own functions if these are not defined as macros.
*/
#undef memcpy
#undef memset
#define memzero(s, n) memset((s), 0, (n))
#define memmove memmove

View File

@ -11,10 +11,7 @@ void *memcpy(void *dst, const void *src, size_t len);
void *memset(void *dst, int c, size_t len);
int memcmp(const void *s1, const void *s2, size_t len);
/*
* Access builtin version by default. If one needs to use optimized version,
* do "undef memcpy" in .c file and link against right string.c
*/
/* Access builtin version by default. */
#define memcpy(d,s,l) __builtin_memcpy(d,s,l)
#define memset(d,c,l) __builtin_memset(d,c,l)
#define memcmp __builtin_memcmp

View File

@ -374,12 +374,14 @@ For 32-bit we have the following conventions - kernel is built with
* Fetch the per-CPU GSBASE value for this processor and put it in @reg.
* We normally use %gs for accessing per-CPU data, but we are setting up
* %gs here and obviously can not use %gs itself to access per-CPU data.
*
* Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
* may not restore the host's value until the CPU returns to userspace.
* Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
* while running KVM's run loop.
*/
.macro GET_PERCPU_BASE reg:req
ALTERNATIVE \
"LOAD_CPU_AND_NODE_SEG_LIMIT \reg", \
"RDPID \reg", \
X86_FEATURE_RDPID
LOAD_CPU_AND_NODE_SEG_LIMIT \reg
andq $VDSO_CPUNODE_MASK, \reg
movq __per_cpu_offset(, \reg, 8), \reg
.endm

View File

@ -390,6 +390,18 @@ static struct uncore_event_desc snb_uncore_imc_events[] = {
INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
{ /* end: all zeroes */ },
};
@ -405,13 +417,35 @@ static struct uncore_event_desc snb_uncore_imc_events[] = {
#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
/* BW break down- legacy counters */
#define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3
#define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040
#define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4
#define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044
#define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5
#define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048
enum perf_snb_uncore_imc_freerunning_types {
SNB_PCI_UNCORE_IMC_DATA = 0,
SNB_PCI_UNCORE_IMC_DATA_READS = 0,
SNB_PCI_UNCORE_IMC_DATA_WRITES,
SNB_PCI_UNCORE_IMC_GT_REQUESTS,
SNB_PCI_UNCORE_IMC_IA_REQUESTS,
SNB_PCI_UNCORE_IMC_IO_REQUESTS,
SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
};
static struct freerunning_counters snb_uncore_imc_freerunning[] = {
[SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
[SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
0x0, 0x0, 1, 32 },
[SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
0x0, 0x0, 1, 32 },
[SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
0x0, 0x0, 1, 32 },
[SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
0x0, 0x0, 1, 32 },
[SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
0x0, 0x0, 1, 32 },
};
static struct attribute *snb_uncore_imc_formats_attr[] = {
@ -525,6 +559,18 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
idx = UNCORE_PMC_IDX_FREERUNNING;
break;
case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
idx = UNCORE_PMC_IDX_FREERUNNING;
break;
case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
idx = UNCORE_PMC_IDX_FREERUNNING;
break;
case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
idx = UNCORE_PMC_IDX_FREERUNNING;
break;
default:
return -EINVAL;
}
@ -598,7 +644,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
static struct intel_uncore_type snb_uncore_imc = {
.name = "imc",
.num_counters = 2,
.num_counters = 5,
.num_boxes = 1,
.num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
.mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE,

View File

@ -81,11 +81,8 @@ extern unsigned long efi_fw_vendor, efi_config_table;
kernel_fpu_end(); \
})
#define arch_efi_call_virt(p, f, args...) p->f(args)
#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */
#define EFI_LOADER_SIGNATURE "EL64"
@ -125,9 +122,6 @@ struct efi_scratch {
kernel_fpu_end(); \
})
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
u32 type, u64 attribute);
#ifdef CONFIG_KASAN
/*
* CONFIG_KASAN may redefine memset to __memset. __memset function is present
@ -143,17 +137,13 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
#endif /* CONFIG_X86_32 */
extern struct efi_scratch efi_scratch;
extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
extern void __init efi_print_memmap(void);
extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
extern int __init efi_alloc_page_tables(void);
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init old_map_region(efi_memory_desc_t *md);
extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_update_mappings(void);
extern void __init efi_dump_pagetable(void);
extern void __init efi_apply_memmap_quirks(void);

View File

@ -1596,7 +1596,8 @@ asmlinkage void kvm_spurious_fault(void);
_ASM_EXTABLE(666b, 667b)
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
unsigned flags);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);

View File

@ -1916,7 +1916,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
}
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
unsigned flags)
{
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}

View File

@ -975,7 +975,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
X86_CR4_SMEP;
if (kvm_valid_cr4(vcpu, cr4))
return 1;
@ -10751,9 +10751,11 @@ EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{
struct x86_exception fault;
u32 access = error_code &
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) ||
vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code, &fault) != UNMAPPED_GVA) {
vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) {
/*
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed

View File

@ -26,6 +26,7 @@
#include <asm/xen/pci.h>
#include <asm/xen/cpuid.h>
#include <asm/apic.h>
#include <asm/acpi.h>
#include <asm/i8259.h>
static int xen_pcifront_enable_irq(struct pci_dev *dev)

View File

@ -49,7 +49,6 @@
#include <asm/efi.h>
#include <asm/e820/api.h>
#include <asm/time.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/uv/uv.h>
@ -496,74 +495,6 @@ void __init efi_init(void)
efi_print_memmap();
}
#if defined(CONFIG_X86_32)
void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
{
u64 addr, npages;
addr = md->virt_addr;
npages = md->num_pages;
memrange_efi_to_native(&addr, &npages);
if (executable)
set_memory_x(addr, npages);
else
set_memory_nx(addr, npages);
}
void __init runtime_code_page_mkexec(void)
{
efi_memory_desc_t *md;
/* Make EFI runtime service code area executable */
for_each_efi_memory_desc(md) {
if (md->type != EFI_RUNTIME_SERVICES_CODE)
continue;
efi_set_executable(md, true);
}
}
void __init efi_memory_uc(u64 addr, unsigned long size)
{
unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
u64 npages;
npages = round_up(size, page_shift) / page_shift;
memrange_efi_to_native(&addr, &npages);
set_memory_uc(addr, npages);
}
void __init old_map_region(efi_memory_desc_t *md)
{
u64 start_pfn, end_pfn, end;
unsigned long size;
void *va;
start_pfn = PFN_DOWN(md->phys_addr);
size = md->num_pages << PAGE_SHIFT;
end = md->phys_addr + size;
end_pfn = PFN_UP(end);
if (pfn_range_is_mapped(start_pfn, end_pfn)) {
va = __va(md->phys_addr);
if (!(md->attribute & EFI_MEMORY_WB))
efi_memory_uc((u64)(unsigned long)va, size);
} else
va = efi_ioremap(md->phys_addr, size,
md->type, md->attribute);
md->virt_addr = (u64) (unsigned long) va;
if (!va)
pr_err("ioremap of 0x%llX failed!\n",
(unsigned long long)md->phys_addr);
}
#endif
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
{

View File

@ -29,9 +29,35 @@
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/page.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/efi.h>
void __init efi_map_region(efi_memory_desc_t *md)
{
u64 start_pfn, end_pfn, end;
unsigned long size;
void *va;
start_pfn = PFN_DOWN(md->phys_addr);
size = md->num_pages << PAGE_SHIFT;
end = md->phys_addr + size;
end_pfn = PFN_UP(end);
if (pfn_range_is_mapped(start_pfn, end_pfn)) {
va = __va(md->phys_addr);
if (!(md->attribute & EFI_MEMORY_WB))
set_memory_uc((unsigned long)va, md->num_pages);
} else {
va = ioremap_cache(md->phys_addr, size);
}
md->virt_addr = (unsigned long)va;
if (!va)
pr_err("ioremap of 0x%llX failed!\n", md->phys_addr);
}
/*
* To make EFI call EFI runtime service in physical addressing mode we need
* prolog/epilog before/after the invocation to claim the EFI runtime service
@ -58,11 +84,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
return 0;
}
void __init efi_map_region(efi_memory_desc_t *md)
{
old_map_region(md);
}
void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
@ -107,6 +128,15 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size,
void __init efi_runtime_update_mappings(void)
{
if (__supported_pte_mask & _PAGE_NX)
runtime_code_page_mkexec();
if (__supported_pte_mask & _PAGE_NX) {
efi_memory_desc_t *md;
/* Make EFI runtime service code area executable */
for_each_efi_memory_desc(md) {
if (md->type != EFI_RUNTIME_SERVICES_CODE)
continue;
set_memory_x(md->virt_addr, md->num_pages);
}
}
}

View File

@ -259,6 +259,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
rodata = __pa(__start_rodata);
pfn = rodata >> PAGE_SHIFT;
pf = _PAGE_NX | _PAGE_ENC;
if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
pr_err("Failed to map kernel rodata 1:1\n");
return 1;

View File

@ -653,9 +653,8 @@ config ATCPIT100_TIMER
This option enables support for the Andestech ATCPIT100 timers.
config RISCV_TIMER
bool "Timer for the RISC-V platform"
bool "Timer for the RISC-V platform" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && RISCV
default y
select TIMER_PROBE
select TIMER_OF
help
@ -663,6 +662,15 @@ config RISCV_TIMER
is accessed via both the SBI and the rdcycle instruction. This is
required for all RISC-V systems.
config CLINT_TIMER
bool "CLINT Timer for the RISC-V platform" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && RISCV
select TIMER_PROBE
select TIMER_OF
help
This option enables the CLINT timer for RISC-V systems. The CLINT
driver is usually used for NoMMU RISC-V systems.
config CSKY_MP_TIMER
bool "SMP Timer for the C-SKY platform" if COMPILE_TEST
depends on CSKY

View File

@ -89,6 +89,7 @@ obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
obj-$(CONFIG_CLINT_TIMER) += timer-clint.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o

View File

@ -0,0 +1,226 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
*
* Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
* CLINT MMIO timer device.
*/
#define pr_fmt(fmt) "clint: " fmt
#include <linux/bitops.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/smp.h>
#define CLINT_IPI_OFF 0
#define CLINT_TIMER_CMP_OFF 0x4000
#define CLINT_TIMER_VAL_OFF 0xbff8
/* CLINT manages IPI and Timer for RISC-V M-mode */
static u32 __iomem *clint_ipi_base;
static u64 __iomem *clint_timer_cmp;
static u64 __iomem *clint_timer_val;
static unsigned long clint_timer_freq;
static unsigned int clint_timer_irq;
static void clint_send_ipi(const struct cpumask *target)
{
unsigned int cpu;
for_each_cpu(cpu, target)
writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
}
static void clint_clear_ipi(void)
{
writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
}
static struct riscv_ipi_ops clint_ipi_ops = {
.ipi_inject = clint_send_ipi,
.ipi_clear = clint_clear_ipi,
};
#ifdef CONFIG_64BIT
#define clint_get_cycles() readq_relaxed(clint_timer_val)
#else
#define clint_get_cycles() readl_relaxed(clint_timer_val)
#define clint_get_cycles_hi() readl_relaxed(((u32 *)clint_timer_val) + 1)
#endif
#ifdef CONFIG_64BIT
static u64 notrace clint_get_cycles64(void)
{
return clint_get_cycles();
}
#else /* CONFIG_64BIT */
static u64 notrace clint_get_cycles64(void)
{
u32 hi, lo;
do {
hi = clint_get_cycles_hi();
lo = clint_get_cycles();
} while (hi != clint_get_cycles_hi());
return ((u64)hi << 32) | lo;
}
#endif /* CONFIG_64BIT */
static u64 clint_rdtime(struct clocksource *cs)
{
return clint_get_cycles64();
}
static struct clocksource clint_clocksource = {
.name = "clint_clocksource",
.rating = 300,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.read = clint_rdtime,
};
static int clint_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
void __iomem *r = clint_timer_cmp +
cpuid_to_hartid_map(smp_processor_id());
csr_set(CSR_IE, IE_TIE);
writeq_relaxed(clint_get_cycles64() + delta, r);
return 0;
}
static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
.name = "clint_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.set_next_event = clint_clock_next_event,
};
static int clint_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
enable_percpu_irq(clint_timer_irq,
irq_get_trigger_type(clint_timer_irq));
return 0;
}
static int clint_timer_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(clint_timer_irq);
return 0;
}
static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
return IRQ_HANDLED;
}
static int __init clint_timer_init_dt(struct device_node *np)
{
int rc;
u32 i, nr_irqs;
void __iomem *base;
struct of_phandle_args oirq;
/*
* Ensure that CLINT device interrupts are either RV_IRQ_TIMER or
* RV_IRQ_SOFT. If it's anything else then we ignore the device.
*/
nr_irqs = of_irq_count(np);
for (i = 0; i < nr_irqs; i++) {
if (of_irq_parse_one(np, i, &oirq)) {
pr_err("%pOFP: failed to parse irq %d.\n", np, i);
continue;
}
if ((oirq.args_count != 1) ||
(oirq.args[0] != RV_IRQ_TIMER &&
oirq.args[0] != RV_IRQ_SOFT)) {
pr_err("%pOFP: invalid irq %d (hwirq %d)\n",
np, i, oirq.args[0]);
return -ENODEV;
}
/* Find parent irq domain and map timer irq */
if (!clint_timer_irq &&
oirq.args[0] == RV_IRQ_TIMER &&
irq_find_host(oirq.np))
clint_timer_irq = irq_of_parse_and_map(np, i);
}
/* If CLINT timer irq not found then fail */
if (!clint_timer_irq) {
pr_err("%pOFP: timer irq not found\n", np);
return -ENODEV;
}
base = of_iomap(np, 0);
if (!base) {
pr_err("%pOFP: could not map registers\n", np);
return -ENODEV;
}
clint_ipi_base = base + CLINT_IPI_OFF;
clint_timer_cmp = base + CLINT_TIMER_CMP_OFF;
clint_timer_val = base + CLINT_TIMER_VAL_OFF;
clint_timer_freq = riscv_timebase;
pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
if (rc) {
pr_err("%pOFP: clocksource register failed [%d]\n", np, rc);
goto fail_iounmap;
}
sched_clock_register(clint_get_cycles64, 64, clint_timer_freq);
rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt,
"clint-timer", &clint_clock_event);
if (rc) {
pr_err("registering percpu irq failed [%d]\n", rc);
goto fail_iounmap;
}
rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
"clockevents/clint/timer:starting",
clint_timer_starting_cpu,
clint_timer_dying_cpu);
if (rc) {
pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc);
goto fail_free_irq;
}
riscv_set_ipi_ops(&clint_ipi_ops);
clint_clear_ipi();
return 0;
fail_free_irq:
free_irq(clint_timer_irq, &clint_clock_event);
fail_iounmap:
iounmap(base);
return rc;
}
TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);

View File

@ -19,26 +19,13 @@
#include <linux/of_irq.h>
#include <asm/smp.h>
#include <asm/sbi.h>
u64 __iomem *riscv_time_cmp;
u64 __iomem *riscv_time_val;
static inline void mmio_set_timer(u64 val)
{
void __iomem *r;
r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id());
writeq_relaxed(val, r);
}
#include <asm/timex.h>
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
csr_set(CSR_IE, IE_TIE);
if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_set_timer(get_cycles64() + delta);
else
mmio_set_timer(get_cycles64() + delta);
sbi_set_timer(get_cycles64() + delta);
return 0;
}

View File

@ -1710,9 +1710,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv)
tp_event = HW_EVENT_ERR_FATAL;
else
tp_event = HW_EVENT_ERR_UNCORRECTED;
else
tp_event = HW_EVENT_ERR_FATAL;
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
}

View File

@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
int rc;
tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
HW_EVENT_ERR_CORRECTED;
/*

View File

@ -2982,9 +2982,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
tp_event = HW_EVENT_ERR_FATAL;
} else {
tp_event = HW_EVENT_ERR_UNCORRECTED;
} else {
tp_event = HW_EVENT_ERR_FATAL;
}
} else {
tp_event = HW_EVENT_ERR_CORRECTED;

View File

@ -493,9 +493,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
tp_event = HW_EVENT_ERR_FATAL;
} else {
tp_event = HW_EVENT_ERR_UNCORRECTED;
} else {
tp_event = HW_EVENT_ERR_FATAL;
}
} else {
tp_event = HW_EVENT_ERR_CORRECTED;

View File

@ -381,6 +381,7 @@ static int __init efisubsys_init(void)
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
pr_err("efi: Firmware registration failed.\n");
destroy_workqueue(efi_rts_wq);
return -ENOMEM;
}
@ -424,6 +425,7 @@ err_unregister:
generic_ops_unregister();
err_put:
kobject_put(efi_kobj);
destroy_workqueue(efi_rts_wq);
return error;
}

View File

@ -187,20 +187,28 @@ int efi_printk(const char *fmt, ...)
*/
efi_status_t efi_parse_options(char const *cmdline)
{
size_t len = strlen(cmdline) + 1;
size_t len;
efi_status_t status;
char *str, *buf;
if (!cmdline)
return EFI_SUCCESS;
len = strnlen(cmdline, COMMAND_LINE_SIZE - 1) + 1;
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf);
if (status != EFI_SUCCESS)
return status;
str = skip_spaces(memcpy(buf, cmdline, len));
memcpy(buf, cmdline, len - 1);
buf[len - 1] = '\0';
str = skip_spaces(buf);
while (*str) {
char *param, *val;
str = next_arg(str, &param, &val);
if (!val && !strcmp(param, "--"))
break;
if (!strcmp(param, "nokaslr")) {
efi_nokaslr = true;

View File

@ -195,19 +195,32 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
};
uint32_t retval = sdma_engine_reg_base[engine_id]
uint32_t sdma_engine_reg_base = 0;
uint32_t sdma_rlc_reg_offset;
switch (engine_id) {
default:
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 1:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
}
sdma_rlc_reg_offset = sdma_engine_reg_base
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
queue_id, retval);
queue_id, sdma_rlc_reg_offset);
return retval;
return sdma_rlc_reg_offset;
}
static inline struct v9_mqd *get_mqd(void *mqd)

View File

@ -1243,7 +1243,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
if (!obj || !obj->ent)
return;
debugfs_remove(obj->ent);
obj->ent = NULL;
put_obj(obj);
}
@ -1257,7 +1256,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
debugfs_remove_recursive(con->dir);
con->dir = NULL;
}
/* debugfs end */

Some files were not shown because too many files have changed in this diff Show More