Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
efd13b71a3
3
.mailmap
3
.mailmap
|
@ -36,6 +36,7 @@ Andrew Morton <akpm@linux-foundation.org>
|
|||
Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
|
||||
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
|
||||
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
||||
Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com>
|
||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
|
||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
|
||||
Andy Adamson <andros@citi.umich.edu>
|
||||
|
@ -65,6 +66,8 @@ Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
|
|||
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
|
||||
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
|
||||
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
|
||||
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
|
||||
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
|
||||
Christophe Ricard <christophe.ricard@gmail.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Corey Minyard <minyard@acm.org>
|
||||
|
|
|
@ -33,7 +33,7 @@ Contact: xfs@oss.sgi.com
|
|||
Description:
|
||||
The current state of the log write grant head. It
|
||||
represents the total log reservation of all currently
|
||||
oustanding transactions, including regrants due to
|
||||
outstanding transactions, including regrants due to
|
||||
rolling transactions. The grant head is exported in
|
||||
"cycle:bytes" format.
|
||||
Users: xfstests
|
||||
|
|
|
@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories:
|
|||
|
||||
- Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
|
||||
|
||||
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT,
|
||||
MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO,
|
||||
TCPA, TPM2, UEFI, XENV
|
||||
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT,
|
||||
IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT,
|
||||
STAO, TCPA, TPM2, UEFI, XENV
|
||||
|
||||
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT,
|
||||
MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
|
||||
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx,
|
||||
PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
|
||||
|
||||
====== ========================================================================
|
||||
Table Usage for ARMv8 Linux
|
||||
|
|
|
@ -130,6 +130,9 @@ stable kernels.
|
|||
| Marvell | ARM-MMU-500 | #582743 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| NVIDIA | Carmel Core | N/A | NVIDIA_CARMEL_CNP_ERRATUM |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
|
|
@ -23,6 +23,7 @@ properties:
|
|||
- enum:
|
||||
- ingenic,jz4775-intc
|
||||
- ingenic,jz4770-intc
|
||||
- ingenic,jz4760b-intc
|
||||
- const: ingenic,jz4760-intc
|
||||
- items:
|
||||
- const: ingenic,x1000-intc
|
||||
|
|
|
@ -21,6 +21,10 @@ properties:
|
|||
- fsl,vf610-spdif
|
||||
- fsl,imx6sx-spdif
|
||||
- fsl,imx8qm-spdif
|
||||
- fsl,imx8qxp-spdif
|
||||
- fsl,imx8mq-spdif
|
||||
- fsl,imx8mm-spdif
|
||||
- fsl,imx8mn-spdif
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
|
|
@ -613,6 +613,27 @@ Some of these date from the very introduction of KMS in 2008 ...
|
|||
|
||||
Level: Intermediate
|
||||
|
||||
Remove automatic page mapping from dma-buf importing
|
||||
----------------------------------------------------
|
||||
|
||||
When importing dma-bufs, the dma-buf and PRIME frameworks automatically map
|
||||
imported pages into the importer's DMA area. drm_gem_prime_fd_to_handle() and
|
||||
drm_gem_prime_handle_to_fd() require that importers call dma_buf_attach()
|
||||
even if they never do actual device DMA, but only CPU access through
|
||||
dma_buf_vmap(). This is a problem for USB devices, which do not support DMA
|
||||
operations.
|
||||
|
||||
To fix the issue, automatic page mappings should be removed from the
|
||||
buffer-sharing code. Fixing this is a bit more involved, since the import/export
|
||||
cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
|
||||
this problem for USB devices by fishing out the USB host controller device, as
|
||||
long as that supports DMA. Otherwise importing can still needlessly fail.
|
||||
|
||||
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
|
||||
|
||||
Level: Advanced
|
||||
|
||||
|
||||
Better Testing
|
||||
==============
|
||||
|
||||
|
|
|
@ -267,7 +267,7 @@ DATA PATH
|
|||
Tx
|
||||
--
|
||||
|
||||
end_start_xmit() is called by the stack. This function does the following:
|
||||
ena_start_xmit() is called by the stack. This function does the following:
|
||||
|
||||
- Maps data buffers (skb->data and frags).
|
||||
- Populates ena_buf for the push buffer (if the driver and device are
|
||||
|
|
|
@ -52,7 +52,7 @@ purposes as a standard complementary tool. The system's view from
|
|||
``devlink-dpipe`` should change according to the changes done by the
|
||||
standard configuration tools.
|
||||
|
||||
For example, it’s quiet common to implement Access Control Lists (ACL)
|
||||
For example, it’s quite common to implement Access Control Lists (ACL)
|
||||
using Ternary Content Addressable Memory (TCAM). The TCAM memory can be
|
||||
divided into TCAM regions. Complex TC filters can have multiple rules with
|
||||
different priorities and different lookup keys. On the other hand hardware
|
||||
|
|
|
@ -151,7 +151,7 @@ representor netdevice.
|
|||
-------------
|
||||
A subfunction devlink port is created but it is not active yet. That means the
|
||||
entities are created on devlink side, the e-switch port representor is created,
|
||||
but the subfunction device itself it not created. A user might use e-switch port
|
||||
but the subfunction device itself is not created. A user might use e-switch port
|
||||
representor to do settings, putting it into bridge, adding TC rules, etc. A user
|
||||
might as well configure the hardware address (such as MAC address) of the
|
||||
subfunction while subfunction is inactive.
|
||||
|
@ -173,7 +173,7 @@ Terms and Definitions
|
|||
* - Term
|
||||
- Definitions
|
||||
* - ``PCI device``
|
||||
- A physical PCI device having one or more PCI bus consists of one or
|
||||
- A physical PCI device having one or more PCI buses consists of one or
|
||||
more PCI controllers.
|
||||
* - ``PCI controller``
|
||||
- A controller consists of potentially multiple physical functions,
|
||||
|
|
|
@ -50,7 +50,7 @@ Callbacks to implement
|
|||
|
||||
The NIC driver offering ipsec offload will need to implement these
|
||||
callbacks to make the offload available to the network stack's
|
||||
XFRM subsytem. Additionally, the feature bits NETIF_F_HW_ESP and
|
||||
XFRM subsystem. Additionally, the feature bits NETIF_F_HW_ESP and
|
||||
NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
|
||||
|
||||
|
||||
|
|
|
@ -182,6 +182,9 @@ is dependent on the CPU capability and the kernel configuration. The limit can
|
|||
be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION
|
||||
ioctl() at run-time.
|
||||
|
||||
Creation of the VM will fail if the requested IPA size (whether it is
|
||||
implicit or explicit) is unsupported on the host.
|
||||
|
||||
Please note that configuring the IPA size does not affect the capability
|
||||
exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects
|
||||
size of the address translated by the stage2 level (guest physical to
|
||||
|
@ -1492,7 +1495,8 @@ Fails if any VCPU has already been created.
|
|||
|
||||
Define which vcpu is the Bootstrap Processor (BSP). Values are the same
|
||||
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
|
||||
is vcpu 0.
|
||||
is vcpu 0. This ioctl has to be called before vcpu creation,
|
||||
otherwise it will return EBUSY error.
|
||||
|
||||
|
||||
4.42 KVM_GET_XSAVE
|
||||
|
@ -4803,8 +4807,10 @@ If an MSR access is not permitted through the filtering, it generates a
|
|||
allows user space to deflect and potentially handle various MSR accesses
|
||||
into user space.
|
||||
|
||||
If a vCPU is in running state while this ioctl is invoked, the vCPU may
|
||||
experience inconsistent filtering behavior on MSR accesses.
|
||||
Note, invoking this ioctl with a vCPU is running is inherently racy. However,
|
||||
KVM does guarantee that vCPUs will see either the previous filter or the new
|
||||
filter, e.g. MSRs with identical settings in both the old and new filter will
|
||||
have deterministic behavior.
|
||||
|
||||
4.127 KVM_XEN_HVM_SET_ATTR
|
||||
--------------------------
|
||||
|
|
30
MAINTAINERS
30
MAINTAINERS
|
@ -261,8 +261,8 @@ ABI/API
|
|||
L: linux-api@vger.kernel.org
|
||||
F: include/linux/syscalls.h
|
||||
F: kernel/sys_ni.c
|
||||
F: include/uapi/
|
||||
F: arch/*/include/uapi/
|
||||
X: include/uapi/
|
||||
X: arch/*/include/uapi/
|
||||
|
||||
ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
|
@ -1181,7 +1181,7 @@ M: Joel Fernandes <joel@joelfernandes.org>
|
|||
M: Christian Brauner <christian@brauner.io>
|
||||
M: Hridya Valsaraju <hridya@google.com>
|
||||
M: Suren Baghdasaryan <surenb@google.com>
|
||||
L: devel@driverdev.osuosl.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
||||
F: drivers/android/
|
||||
|
@ -5839,7 +5839,7 @@ M: David Airlie <airlied@linux.ie>
|
|||
M: Daniel Vetter <daniel@ffwll.ch>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
B: https://bugs.freedesktop.org/
|
||||
B: https://gitlab.freedesktop.org/drm
|
||||
C: irc://chat.freenode.net/dri-devel
|
||||
T: git git://anongit.freedesktop.org/drm/drm
|
||||
F: Documentation/devicetree/bindings/display/
|
||||
|
@ -8120,7 +8120,6 @@ F: drivers/crypto/hisilicon/sec2/sec_main.c
|
|||
|
||||
HISILICON STAGING DRIVERS FOR HIKEY 960/970
|
||||
M: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
|
||||
L: devel@driverdev.osuosl.org
|
||||
S: Maintained
|
||||
F: drivers/staging/hikey9xx/
|
||||
|
||||
|
@ -8525,6 +8524,7 @@ IBM Power SRIOV Virtual NIC Device Driver
|
|||
M: Dany Madden <drt@linux.ibm.com>
|
||||
M: Lijun Pan <ljp@linux.ibm.com>
|
||||
M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
|
||||
R: Thomas Falcon <tlfalcon@linux.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/ibm/ibmvnic.*
|
||||
|
@ -12542,7 +12542,7 @@ NETWORKING [MPTCP]
|
|||
M: Mat Martineau <mathew.j.martineau@linux.intel.com>
|
||||
M: Matthieu Baerts <matthieu.baerts@tessares.net>
|
||||
L: netdev@vger.kernel.org
|
||||
L: mptcp@lists.01.org
|
||||
L: mptcp@lists.linux.dev
|
||||
S: Maintained
|
||||
W: https://github.com/multipath-tcp/mptcp_net-next/wiki
|
||||
B: https://github.com/multipath-tcp/mptcp_net-next/issues
|
||||
|
@ -14713,15 +14713,11 @@ F: drivers/net/ethernet/qlogic/qlcnic/
|
|||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||
M: Manish Chopra <manishc@marvell.com>
|
||||
M: GR-Linux-NIC-Dev@marvell.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/staging/qlge/
|
||||
|
||||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||
M: Coiby Xu <coiby.xu@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Supported
|
||||
F: Documentation/networking/device_drivers/qlogic/qlge.rst
|
||||
F: drivers/staging/qlge/
|
||||
|
||||
QM1D1B0004 MEDIA DRIVER
|
||||
M: Akihiro Tsukada <tskd08@gmail.com>
|
||||
|
@ -16891,8 +16887,10 @@ F: tools/spi/
|
|||
|
||||
SPIDERNET NETWORK DRIVER for CELL
|
||||
M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
|
||||
M: Geoff Levand <geoff@infradead.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
|
||||
F: drivers/net/ethernet/toshiba/spider_net*
|
||||
|
||||
|
@ -17044,7 +17042,7 @@ F: drivers/staging/vt665?/
|
|||
|
||||
STAGING SUBSYSTEM
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
L: devel@driverdev.osuosl.org
|
||||
L: linux-staging@lists.linux.dev
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
||||
F: drivers/staging/
|
||||
|
@ -19139,7 +19137,7 @@ VME SUBSYSTEM
|
|||
M: Martyn Welch <martyn@welchs.me.uk>
|
||||
M: Manohar Vanga <manohar.vanga@gmail.com>
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
L: devel@driverdev.osuosl.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
|
||||
F: Documentation/driver-api/vme.rst
|
||||
|
@ -19170,7 +19168,7 @@ S: Maintained
|
|||
F: drivers/infiniband/hw/vmw_pvrdma/
|
||||
|
||||
VMware PVSCSI driver
|
||||
M: Jim Gill <jgill@vmware.com>
|
||||
M: Vishal Bhakta <vbhakta@vmware.com>
|
||||
M: VMware PV-Drivers <pv-drivers@vmware.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
6
Makefile
6
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Frozen Wasteland
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -264,7 +264,8 @@ no-dot-config-targets := $(clean-targets) \
|
|||
$(version_h) headers headers_% archheaders archscripts \
|
||||
%asm-generic kernelversion %src-pkg dt_binding_check \
|
||||
outputmakefile
|
||||
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease
|
||||
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
|
||||
image_name
|
||||
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
|
||||
|
||||
config-build :=
|
||||
|
@ -478,6 +479,7 @@ USERINCLUDE := \
|
|||
-I$(objtree)/arch/$(SRCARCH)/include/generated/uapi \
|
||||
-I$(srctree)/include/uapi \
|
||||
-I$(objtree)/include/generated/uapi \
|
||||
-include $(srctree)/include/linux/compiler-version.h \
|
||||
-include $(srctree)/include/linux/kconfig.h
|
||||
|
||||
# Use LINUXINCLUDE when you must reference the include/ directory.
|
||||
|
|
|
@ -632,13 +632,12 @@ config HAS_LTO_CLANG
|
|||
def_bool y
|
||||
# Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510
|
||||
depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD
|
||||
depends on $(success,test $(LLVM) -eq 1)
|
||||
depends on $(success,test $(LLVM_IAS) -eq 1)
|
||||
depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
|
||||
depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
|
||||
depends on ARCH_SUPPORTS_LTO_CLANG
|
||||
depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
|
||||
depends on !KASAN
|
||||
depends on !KASAN || KASAN_HW_TAGS
|
||||
depends on !GCOV_KERNEL
|
||||
help
|
||||
The compiler and Kconfig options support building with Clang's
|
||||
|
|
|
@ -348,6 +348,7 @@ config ARCH_EP93XX
|
|||
select ARM_AMBA
|
||||
imply ARM_PATCH_PHYS_VIRT
|
||||
select ARM_VIC
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select AUTO_ZRELADDR
|
||||
select CLKDEV_LOOKUP
|
||||
select CLKSRC_MMIO
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/memory.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/page.h>
|
||||
#include <xen/swiotlb-xen.h>
|
||||
|
||||
|
@ -109,7 +110,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
map_ops[i].status = GNTST_general_error;
|
||||
unmap.host_addr = map_ops[i].host_addr,
|
||||
unmap.handle = map_ops[i].handle;
|
||||
map_ops[i].handle = ~0;
|
||||
map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
if (map_ops[i].flags & GNTMAP_device_map)
|
||||
unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
|
||||
else
|
||||
|
@ -130,7 +131,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
|
||||
|
||||
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||
struct gnttab_unmap_grant_ref *kunmap_ops,
|
||||
|
@ -145,7 +145,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
|
||||
|
||||
bool __set_phys_to_machine_multi(unsigned long pfn,
|
||||
unsigned long mfn, unsigned long nr_pages)
|
||||
|
|
|
@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config NVIDIA_CARMEL_CNP_ERRATUM
|
||||
bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
|
||||
default y
|
||||
help
|
||||
If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
|
||||
invalidate shared TLB entries installed by a different core, as it would
|
||||
on standard ARM cores.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config SOCIONEXT_SYNQUACER_PREITS
|
||||
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
|
||||
default y
|
||||
|
@ -1055,8 +1065,6 @@ config HW_PERF_EVENTS
|
|||
config SYS_SUPPORTS_HUGETLBFS
|
||||
def_bool y
|
||||
|
||||
config ARCH_WANT_HUGE_PMD_SHARE
|
||||
|
||||
config ARCH_HAS_CACHE_LINE_SIZE
|
||||
def_bool y
|
||||
|
||||
|
@ -1157,8 +1165,8 @@ config XEN
|
|||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int
|
||||
default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
|
||||
default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
|
||||
default "14" if ARM64_64K_PAGES
|
||||
default "12" if ARM64_16K_PAGES
|
||||
default "11"
|
||||
help
|
||||
The kernel memory allocator divides physically contiguous memory
|
||||
|
@ -1855,12 +1863,6 @@ config CMDLINE_FROM_BOOTLOADER
|
|||
the boot loader doesn't provide any, the default kernel command
|
||||
string provided in CMDLINE will be used.
|
||||
|
||||
config CMDLINE_EXTEND
|
||||
bool "Extend bootloader kernel arguments"
|
||||
help
|
||||
The command-line arguments provided by the boot loader will be
|
||||
appended to the default kernel command string.
|
||||
|
||||
config CMDLINE_FORCE
|
||||
bool "Always use the default kernel command string"
|
||||
help
|
||||
|
|
|
@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
|||
} while (--n > 0);
|
||||
|
||||
sum += ((sum >> 32) | (sum << 32));
|
||||
return csum_fold((__force u32)(sum >> 32));
|
||||
return csum_fold((__force __wsum)(sum >> 32));
|
||||
}
|
||||
#define ip_fast_csum ip_fast_csum
|
||||
|
||||
|
|
|
@ -66,7 +66,8 @@
|
|||
#define ARM64_WORKAROUND_1508412 58
|
||||
#define ARM64_HAS_LDAPR 59
|
||||
#define ARM64_KVM_PROTECTED_MODE 60
|
||||
#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
|
||||
|
||||
#define ARM64_NCAPS 61
|
||||
#define ARM64_NCAPS 62
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
|
|
@ -47,10 +47,10 @@
|
|||
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
|
||||
|
@ -183,16 +183,16 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
|
|||
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
|
||||
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
|
||||
int level);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
|
||||
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
|
||||
|
||||
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
||||
extern u64 __vgic_v3_get_gic_config(void);
|
||||
extern u64 __vgic_v3_read_vmcr(void);
|
||||
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
||||
extern void __vgic_v3_init_lrs(void);
|
||||
|
|
|
@ -83,6 +83,11 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
|||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
|
||||
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
||||
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
||||
|
||||
|
@ -97,7 +102,8 @@ bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
|
|||
|
||||
void __noreturn hyp_panic(void);
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
||||
void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
||||
u64 elr, u64 par);
|
||||
#endif
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_H__ */
|
||||
|
|
|
@ -328,6 +328,11 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|||
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
|
||||
|
||||
#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
|
||||
#define page_to_virt(x) ({ \
|
||||
__typeof__(x) __page = x; \
|
||||
void *__addr = __va(page_to_phys(__page)); \
|
||||
(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
|
||||
})
|
||||
#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
|
||||
#else
|
||||
#define page_to_virt(x) ({ \
|
||||
|
|
|
@ -63,23 +63,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
|
|||
extern u64 idmap_t0sz;
|
||||
extern u64 idmap_ptrs_per_pgd;
|
||||
|
||||
static inline bool __cpu_uses_extended_idmap(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
|
||||
return false;
|
||||
|
||||
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
|
||||
}
|
||||
|
||||
/*
|
||||
* True if the extended ID map requires an extra level of translation table
|
||||
* to be configured.
|
||||
*/
|
||||
static inline bool __cpu_uses_extended_idmap_level(void)
|
||||
{
|
||||
return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure TCR.T0SZ is set to the provided value.
|
||||
*/
|
||||
|
|
|
@ -66,7 +66,6 @@ extern bool arm64_use_ng_mappings;
|
|||
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
|
||||
#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
|
||||
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
|
||||
|
|
|
@ -486,6 +486,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
|||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_device(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_tagged(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
|
||||
#define pgprot_mhp pgprot_tagged
|
||||
/*
|
||||
* DMA allocations for non-coherent devices use what the Arm architecture calls
|
||||
* "Normal non-cacheable" memory, which permits speculation, unaligned accesses
|
||||
|
|
|
@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
||||
asmlinkage void arm64_preempt_schedule_irq(void);
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
|
||||
|
||||
|
|
|
@ -796,6 +796,11 @@
|
|||
#define ID_AA64MMFR0_PARANGE_48 0x5
|
||||
#define ID_AA64MMFR0_PARANGE_52 0x6
|
||||
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
|
||||
#else
|
||||
|
@ -962,13 +967,16 @@
|
|||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
|
||||
#elif defined(CONFIG_ARM64_16K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
|
||||
#elif defined(CONFIG_ARM64_64K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
|
||||
#endif
|
||||
|
||||
#define MVFR2_FPMISC_SHIFT 4
|
||||
|
|
|
@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
|
|||
#define arch_setup_new_exec arch_setup_new_exec
|
||||
|
||||
void arch_release_task_struct(struct task_struct *tsk);
|
||||
int arch_dup_task_struct(struct task_struct *dst,
|
||||
struct task_struct *src);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -525,6 +525,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
0, 0,
|
||||
1, 0),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
|
||||
{
|
||||
/* NVIDIA Carmel */
|
||||
.desc = "NVIDIA Carmel CNP erratum",
|
||||
.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
|
||||
ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
|
|
@ -1324,6 +1324,9 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
|
|||
if (is_kdump_kernel())
|
||||
return false;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
|
||||
return false;
|
||||
|
||||
return has_cpuid_feature(entry, scope);
|
||||
}
|
||||
|
||||
|
|
|
@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
|||
* with the CLIDR_EL1 fields to avoid triggering false warnings
|
||||
* when there is a mismatch across the CPUs. Keep track of the
|
||||
* effective value of the CTR_EL0 in our internal records for
|
||||
* acurate sanity check and feature enablement.
|
||||
* accurate sanity check and feature enablement.
|
||||
*/
|
||||
info->reg_ctr = read_cpuid_effective_cachetype();
|
||||
info->reg_dczid = read_cpuid(DCZID_EL0);
|
||||
|
|
|
@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|||
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
|
||||
{
|
||||
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
|
||||
*ppos += count;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
|
|||
*/
|
||||
adrp x5, __idmap_text_end
|
||||
clz x5, x5
|
||||
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
|
||||
cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
|
||||
b.ge 1f // .. then skip VA range extension
|
||||
|
||||
adr_l x6, idmap_t0sz
|
||||
|
@ -655,8 +655,10 @@ SYM_FUNC_END(__secondary_too_slow)
|
|||
SYM_FUNC_START(__enable_mmu)
|
||||
mrs x2, ID_AA64MMFR0_EL1
|
||||
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
|
||||
b.ne __no_granule_support
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
|
||||
b.lt __no_granule_support
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
|
||||
b.gt __no_granule_support
|
||||
update_early_cpu_boot_status 0, x2, x3
|
||||
adrp x2, idmap_pg_dir
|
||||
phys_to_ttbr x1, x1
|
||||
|
|
|
@ -163,33 +163,36 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
|
|||
} while (1);
|
||||
}
|
||||
|
||||
static __init void parse_cmdline(void)
|
||||
static __init const u8 *get_bootargs_cmdline(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
|
||||
const u8 *prop;
|
||||
void *fdt;
|
||||
int node;
|
||||
|
||||
fdt = get_early_fdt_ptr();
|
||||
if (!fdt)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
node = fdt_path_offset(fdt, "/chosen");
|
||||
if (node < 0)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
prop = fdt_getprop(fdt, node, "bootargs", NULL);
|
||||
if (!prop)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
__parse_cmdline(prop, true);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
|
||||
return;
|
||||
return strlen(prop) ? prop : NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
static __init void parse_cmdline(void)
|
||||
{
|
||||
const u8 *prop = get_bootargs_cmdline();
|
||||
|
||||
if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
|
||||
__parse_cmdline(CONFIG_CMDLINE, true);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
|
||||
__parse_cmdline(prop, true);
|
||||
}
|
||||
|
||||
/* Keep checkers quiet */
|
||||
|
|
|
@ -101,6 +101,9 @@ KVM_NVHE_ALIAS(__stop___kvm_ex_table);
|
|||
/* Array containing bases of nVHE per-CPU memory regions. */
|
||||
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
|
||||
|
||||
/* PMU available static key */
|
||||
KVM_NVHE_ALIAS(kvm_arm_pmu_available);
|
||||
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
|
||||
|
|
|
@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
|
|||
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
|
||||
}
|
||||
|
||||
static inline u32 armv8pmu_read_evcntr(int idx)
|
||||
static inline u64 armv8pmu_read_evcntr(int idx)
|
||||
{
|
||||
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
||||
|
||||
|
|
|
@ -57,6 +57,8 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/pointer_auth.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
#include <linux/stackprotector.h>
|
||||
|
|
|
@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
|||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
|
||||
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
||||
struct task_struct *task, struct pt_regs *regs)
|
||||
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
|
||||
void *cookie, struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct stackframe frame;
|
||||
|
||||
|
@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
|||
start_backtrace(&frame, regs->regs[29], regs->pc);
|
||||
else if (task == current)
|
||||
start_backtrace(&frame,
|
||||
(unsigned long)__builtin_frame_address(0),
|
||||
(unsigned long)arch_stack_walk);
|
||||
(unsigned long)__builtin_frame_address(1),
|
||||
(unsigned long)__builtin_return_address(0));
|
||||
else
|
||||
start_backtrace(&frame, thread_saved_fp(task),
|
||||
thread_saved_pc(task));
|
||||
|
|
|
@ -385,11 +385,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
|
||||
|
||||
/*
|
||||
* We guarantee that both TLBs and I-cache are private to each
|
||||
* vcpu. If detecting that a vcpu from the same VM has
|
||||
* previously run on the same physical CPU, call into the
|
||||
* hypervisor code to nuke the relevant contexts.
|
||||
*
|
||||
* We might get preempted before the vCPU actually runs, but
|
||||
* over-invalidation doesn't affect correctness.
|
||||
*/
|
||||
if (*last_ran != vcpu->vcpu_id) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
|
||||
kvm_call_hyp(__kvm_flush_cpu_context, mmu);
|
||||
*last_ran = vcpu->vcpu_id;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,8 +85,10 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
|
|||
|
||||
// If the hyp context is loaded, go straight to hyp_panic
|
||||
get_loaded_vcpu x0, x1
|
||||
cbz x0, hyp_panic
|
||||
cbnz x0, 1f
|
||||
b hyp_panic
|
||||
|
||||
1:
|
||||
// The hyp context is saved so make sure it is restored to allow
|
||||
// hyp_panic to run at hyp and, subsequently, panic to run in the host.
|
||||
// This makes use of __guest_exit to avoid duplication but sets the
|
||||
|
@ -94,7 +96,7 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
|
|||
// current state is saved to the guest context but it will only be
|
||||
// accurate if the guest had been completely restored.
|
||||
adr_this_cpu x0, kvm_hyp_ctxt, x1
|
||||
adr x1, hyp_panic
|
||||
adr_l x1, hyp_panic
|
||||
str x1, [x0, #CPU_XREG_OFFSET(30)]
|
||||
|
||||
get_vcpu_ptr x1, x0
|
||||
|
@ -146,7 +148,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
|
|||
// Now restore the hyp regs
|
||||
restore_callee_saved_regs x2
|
||||
|
||||
set_loaded_vcpu xzr, x1, x2
|
||||
set_loaded_vcpu xzr, x2, x3
|
||||
|
||||
alternative_if ARM64_HAS_RAS_EXTN
|
||||
// If we have the RAS extensions we can consume a pending error
|
||||
|
|
|
@ -90,14 +90,17 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
|||
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
|
||||
* EL1 instead of being trapped to EL2.
|
||||
*/
|
||||
if (kvm_arm_support_pmu_v3()) {
|
||||
write_sysreg(0, pmselr_el0);
|
||||
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
|
||||
}
|
||||
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
||||
}
|
||||
|
||||
static inline void __deactivate_traps_common(void)
|
||||
{
|
||||
write_sysreg(0, hstr_el2);
|
||||
if (kvm_arm_support_pmu_v3())
|
||||
write_sysreg(0, pmuserenr_el0);
|
||||
}
|
||||
|
||||
|
|
|
@ -58,16 +58,24 @@ static void __debug_restore_spe(u64 pmscr_el1)
|
|||
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Disable and flush SPE data generation */
|
||||
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_switch_to_guest_common(vcpu);
|
||||
}
|
||||
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
__debug_switch_to_host_common(vcpu);
|
||||
}
|
||||
|
||||
|
|
|
@ -71,7 +71,8 @@ SYM_FUNC_START(__host_enter)
|
|||
SYM_FUNC_END(__host_enter)
|
||||
|
||||
/*
|
||||
* void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
||||
* void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
||||
* u64 elr, u64 par);
|
||||
*/
|
||||
SYM_FUNC_START(__hyp_do_panic)
|
||||
/* Prepare and exit to the host's panic funciton. */
|
||||
|
@ -82,9 +83,11 @@ SYM_FUNC_START(__hyp_do_panic)
|
|||
hyp_kimg_va lr, x6
|
||||
msr elr_el2, lr
|
||||
|
||||
/* Set the panic format string. Use the, now free, LR as scratch. */
|
||||
ldr lr, =__hyp_panic_string
|
||||
hyp_kimg_va lr, x6
|
||||
mov x29, x0
|
||||
|
||||
/* Load the format string into x0 and arguments into x1-7 */
|
||||
ldr x0, =__hyp_panic_string
|
||||
hyp_kimg_va x0, x6
|
||||
|
||||
/* Load the format arguments into x1-7. */
|
||||
mov x6, x3
|
||||
|
@ -94,9 +97,7 @@ SYM_FUNC_START(__hyp_do_panic)
|
|||
mrs x5, hpfar_el2
|
||||
|
||||
/* Enter the host, conditionally restoring the host context. */
|
||||
cmp x0, xzr
|
||||
mov x0, lr
|
||||
b.eq __host_enter_without_restoring
|
||||
cbz x29, __host_enter_without_restoring
|
||||
b __host_enter_for_panic
|
||||
SYM_FUNC_END(__hyp_do_panic)
|
||||
|
||||
|
|
|
@ -46,11 +46,11 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
|
|||
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
|
||||
}
|
||||
|
||||
static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
|
||||
|
||||
__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
|
||||
__kvm_flush_cpu_context(kern_hyp_va(mmu));
|
||||
}
|
||||
|
||||
static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
|
||||
|
@ -67,9 +67,9 @@ static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
|
|||
write_sysreg_el2(tmp, SYS_SCTLR);
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_get_ich_vtr_el2(struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_get_ich_vtr_el2();
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
|
||||
|
@ -115,10 +115,10 @@ static const hcall_t host_hcall[] = {
|
|||
HANDLE_FUNC(__kvm_flush_vm_context),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
|
||||
HANDLE_FUNC(__kvm_flush_cpu_context),
|
||||
HANDLE_FUNC(__kvm_timer_set_cntvoff),
|
||||
HANDLE_FUNC(__kvm_enable_ssbs),
|
||||
HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
|
||||
HANDLE_FUNC(__vgic_v3_get_gic_config),
|
||||
HANDLE_FUNC(__vgic_v3_read_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_write_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_init_lrs),
|
||||
|
|
|
@ -192,6 +192,14 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
|
||||
|
||||
__sysreg_save_state_nvhe(host_ctxt);
|
||||
/*
|
||||
* We must flush and disable the SPE buffer for nVHE, as
|
||||
* the translation regime(EL1&0) is going to be loaded with
|
||||
* that of the guest. And we must do this before we change the
|
||||
* translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
|
||||
* before we load guest Stage1.
|
||||
*/
|
||||
__debug_save_host_buffers_nvhe(vcpu);
|
||||
|
||||
__adjust_pc(vcpu);
|
||||
|
||||
|
@ -234,11 +242,12 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
|
||||
__fpsimd_save_fpexc32(vcpu);
|
||||
|
||||
__debug_switch_to_host(vcpu);
|
||||
/*
|
||||
* This must come after restoring the host sysregs, since a non-VHE
|
||||
* system may enable SPE here and make use of the TTBRs.
|
||||
*/
|
||||
__debug_switch_to_host(vcpu);
|
||||
__debug_restore_host_buffers_nvhe(vcpu);
|
||||
|
||||
if (pmu_switch_needed)
|
||||
__pmu_switch_to_host(host_ctxt);
|
||||
|
@ -257,7 +266,6 @@ void __noreturn hyp_panic(void)
|
|||
u64 spsr = read_sysreg_el2(SYS_SPSR);
|
||||
u64 elr = read_sysreg_el2(SYS_ELR);
|
||||
u64 par = read_sysreg_par();
|
||||
bool restore_host = true;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
|
@ -271,7 +279,7 @@ void __noreturn hyp_panic(void)
|
|||
__sysreg_restore_state_nvhe(host_ctxt);
|
||||
}
|
||||
|
||||
__hyp_do_panic(restore_host, spsr, elr, par);
|
||||
__hyp_do_panic(host_ctxt, spsr, elr, par);
|
||||
unreachable();
|
||||
}
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_host(&cxt);
|
||||
}
|
||||
|
||||
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
||||
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
struct tlb_inv_context cxt;
|
||||
|
||||
|
@ -131,6 +131,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
asm volatile("ic iallu");
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
|
|
|
@ -223,6 +223,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
|||
goto out;
|
||||
|
||||
if (!table) {
|
||||
data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
|
||||
data->addr += kvm_granule_size(level);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -405,9 +405,45 @@ void __vgic_v3_init_lrs(void)
|
|||
__gic_v3_set_lr(0, i);
|
||||
}
|
||||
|
||||
u64 __vgic_v3_get_ich_vtr_el2(void)
|
||||
/*
|
||||
* Return the GIC CPU configuration:
|
||||
* - [31:0] ICH_VTR_EL2
|
||||
* - [62:32] RES0
|
||||
* - [63] MMIO (GICv2) capable
|
||||
*/
|
||||
u64 __vgic_v3_get_gic_config(void)
|
||||
{
|
||||
return read_gicreg(ICH_VTR_EL2);
|
||||
u64 val, sre = read_gicreg(ICC_SRE_EL1);
|
||||
unsigned long flags = 0;
|
||||
|
||||
/*
|
||||
* To check whether we have a MMIO-based (GICv2 compatible)
|
||||
* CPU interface, we need to disable the system register
|
||||
* view. To do that safely, we have to prevent any interrupt
|
||||
* from firing (which would be deadly).
|
||||
*
|
||||
* Note that this only makes sense on VHE, as interrupts are
|
||||
* already masked for nVHE as part of the exception entry to
|
||||
* EL2.
|
||||
*/
|
||||
if (has_vhe())
|
||||
flags = local_daif_save();
|
||||
|
||||
write_gicreg(0, ICC_SRE_EL1);
|
||||
isb();
|
||||
|
||||
val = read_gicreg(ICC_SRE_EL1);
|
||||
|
||||
write_gicreg(sre, ICC_SRE_EL1);
|
||||
isb();
|
||||
|
||||
if (has_vhe())
|
||||
local_daif_restore(flags);
|
||||
|
||||
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
|
||||
val |= read_gicreg(ICH_VTR_EL2);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
u64 __vgic_v3_read_vmcr(void)
|
||||
|
|
|
@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_host(&cxt);
|
||||
}
|
||||
|
||||
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
||||
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
struct tlb_inv_context cxt;
|
||||
|
||||
|
@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
asm volatile("ic iallu");
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
|
|
|
@ -1312,8 +1312,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
* Prevent userspace from creating a memory region outside of the IPA
|
||||
* space addressable by the KVM guest IPA space.
|
||||
*/
|
||||
if (memslot->base_gfn + memslot->npages >=
|
||||
(kvm_phys_size(kvm) >> PAGE_SHIFT))
|
||||
if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
|
||||
return -EFAULT;
|
||||
|
||||
mmap_read_lock(current->mm);
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
|
||||
|
||||
static int kvm_is_in_guest(void)
|
||||
{
|
||||
return kvm_get_running_vcpu() != NULL;
|
||||
|
@ -48,6 +50,14 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
|
|||
|
||||
int kvm_perf_init(void)
|
||||
{
|
||||
/*
|
||||
* Check if HW_PERF_EVENTS are supported by checking the number of
|
||||
* hardware performance counters. This could ensure the presence of
|
||||
* a physical PMU and CONFIG_PERF_EVENT is selected.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0)
|
||||
static_branch_enable(&kvm_arm_pmu_available);
|
||||
|
||||
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
||||
}
|
||||
|
||||
|
|
|
@ -823,16 +823,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
|||
return val & mask;
|
||||
}
|
||||
|
||||
bool kvm_arm_support_pmu_v3(void)
|
||||
{
|
||||
/*
|
||||
* Check if HW_PERF_EVENTS are supported by checking the number of
|
||||
* hardware performance counters. This could ensure the presence of
|
||||
* a physical PMU and CONFIG_PERF_EVENT is selected.
|
||||
*/
|
||||
return (perf_num_counters() > 0);
|
||||
}
|
||||
|
||||
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
|
|
|
@ -311,23 +311,24 @@ int kvm_set_ipa_limit(void)
|
|||
}
|
||||
|
||||
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
|
||||
default:
|
||||
case 1:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
|
||||
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
|
||||
return -EINVAL;
|
||||
case 0:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
|
||||
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
|
||||
break;
|
||||
case 2:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
|
||||
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
|
||||
break;
|
||||
default:
|
||||
kvm_err("Unsupported value for TGRAN_2, giving up\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
|
||||
WARN(kvm_ipa_limit < KVM_PHYS_SHIFT,
|
||||
"KVM IPA Size Limit (%d bits) is smaller than default size\n",
|
||||
kvm_ipa_limit);
|
||||
kvm_info("IPA Size Limit: %d bits\n", kvm_ipa_limit);
|
||||
kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
|
||||
((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
|
||||
" (Reduced IPA size, limited VM/VMM compatibility)" : ""));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -356,6 +357,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
|
|||
return -EINVAL;
|
||||
} else {
|
||||
phys_shift = KVM_PHYS_SHIFT;
|
||||
if (phys_shift > kvm_ipa_limit) {
|
||||
pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
|
||||
current->comm);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
|
|
|
@ -574,9 +574,13 @@ early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
|
|||
*/
|
||||
int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||
{
|
||||
u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
|
||||
u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
|
||||
bool has_v2;
|
||||
int ret;
|
||||
|
||||
has_v2 = ich_vtr_el2 >> 63;
|
||||
ich_vtr_el2 = (u32)ich_vtr_el2;
|
||||
|
||||
/*
|
||||
* The ListRegs field is 5 bits, but there is an architectural
|
||||
* maximum of 16 list registers. Just ignore bit 4...
|
||||
|
@ -594,13 +598,15 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
|||
gicv4_enable ? "en" : "dis");
|
||||
}
|
||||
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
|
||||
if (!info->vcpu.start) {
|
||||
kvm_info("GICv3: no GICV resource entry\n");
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
} else if (!has_v2) {
|
||||
pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
|
||||
} else if (!PAGE_ALIGNED(info->vcpu.start)) {
|
||||
pr_warn("GICV physical address 0x%llx not page aligned\n",
|
||||
(unsigned long long)info->vcpu.start);
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
} else {
|
||||
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
|
||||
kvm_vgic_global_state.can_emulate_gicv2 = true;
|
||||
|
|
|
@ -219,17 +219,40 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
|
||||
int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
phys_addr_t addr = pfn << PAGE_SHIFT;
|
||||
phys_addr_t addr = PFN_PHYS(pfn);
|
||||
|
||||
if ((addr >> PAGE_SHIFT) != pfn)
|
||||
/*
|
||||
* Ensure the upper PAGE_SHIFT bits are clear in the
|
||||
* pfn. Else it might lead to false positives when
|
||||
* some of the upper bits are set, but the lower bits
|
||||
* match a valid pfn.
|
||||
*/
|
||||
if (PHYS_PFN(addr) != pfn)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
{
|
||||
struct mem_section *ms;
|
||||
|
||||
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
||||
return 0;
|
||||
|
||||
if (!valid_section(__pfn_to_section(pfn)))
|
||||
ms = __pfn_to_section(pfn);
|
||||
if (!valid_section(ms))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* ZONE_DEVICE memory does not have the memblock entries.
|
||||
* memblock_is_map_memory() check for ZONE_DEVICE based
|
||||
* addresses will always fail. Even the normal hotplugged
|
||||
* memory will never have MEMBLOCK_NOMAP flag set in their
|
||||
* memblock entries. Skip memblock search for all non early
|
||||
* memory sections covering all of hotplug memory including
|
||||
* both normal and ZONE_DEVICE based.
|
||||
*/
|
||||
if (!early_section(ms))
|
||||
return pfn_section_valid(ms, pfn);
|
||||
}
|
||||
#endif
|
||||
return memblock_is_map_memory(addr);
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#define NO_BLOCK_MAPPINGS BIT(0)
|
||||
#define NO_CONT_MAPPINGS BIT(1)
|
||||
|
||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
|
||||
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
|
||||
|
||||
u64 __section(".mmuoff.data.write") vabits_actual;
|
||||
|
@ -512,7 +512,8 @@ static void __init map_mem(pgd_t *pgdp)
|
|||
* if MTE is present. Otherwise, it has the same attributes as
|
||||
* PAGE_KERNEL.
|
||||
*/
|
||||
__map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
|
||||
__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
|
||||
flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1447,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
|
|||
struct range arch_get_mappable_range(void)
|
||||
{
|
||||
struct range mhp_range;
|
||||
u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
|
||||
u64 end_linear_pa = __pa(PAGE_END - 1);
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
/*
|
||||
* Check for a wrap, it is possible because of randomized linear
|
||||
* mapping the start physical address is actually bigger than
|
||||
* the end physical address. In this case set start to zero
|
||||
* because [0, end_linear_pa] range must still be able to cover
|
||||
* all addressable physical addresses.
|
||||
*/
|
||||
if (start_linear_pa > end_linear_pa)
|
||||
start_linear_pa = 0;
|
||||
}
|
||||
|
||||
WARN_ON(start_linear_pa > end_linear_pa);
|
||||
|
||||
/*
|
||||
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
|
||||
|
@ -1454,8 +1471,9 @@ struct range arch_get_mappable_range(void)
|
|||
* range which can be mapped inside this linear mapping range, must
|
||||
* also be derived from its end points.
|
||||
*/
|
||||
mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
|
||||
mhp_range.end = __pa(PAGE_END - 1);
|
||||
mhp_range.start = start_linear_pa;
|
||||
mhp_range.end = end_linear_pa;
|
||||
|
||||
return mhp_range;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ int arch_check_ftrace_location(struct kprobe *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
|
||||
/* Ftrace callback handler for kprobes -- called under preepmt disabled */
|
||||
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
||||
{
|
||||
|
|
|
@ -32,7 +32,7 @@ static inline void syscall_rollback(struct task_struct *task,
|
|||
static inline long syscall_get_error(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return regs->r10 == -1 ? regs->r8:0;
|
||||
return regs->r10 == -1 ? -regs->r8:0;
|
||||
}
|
||||
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
|
|
|
@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \
|
|||
char *buf) \
|
||||
{ \
|
||||
u32 cpu=dev->id; \
|
||||
return sprintf(buf, "%lx\n", name[cpu]); \
|
||||
return sprintf(buf, "%llx\n", name[cpu]); \
|
||||
}
|
||||
|
||||
#define store(name) \
|
||||
|
@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
|
||||
printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
|
||||
printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
|
||||
printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
|
||||
printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
|
||||
printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
|
||||
printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
|
||||
err_data_buffer[cpu].data1,
|
||||
err_data_buffer[cpu].data2,
|
||||
err_data_buffer[cpu].data3);
|
||||
|
@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
|
||||
printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
|
||||
printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
|
||||
printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
|
||||
printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
|
||||
#endif
|
||||
return size;
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
|
|||
char *buf)
|
||||
{
|
||||
unsigned int cpu=dev->id;
|
||||
return sprintf(buf, "%lx\n", phys_addr[cpu]);
|
||||
return sprintf(buf, "%llx\n", phys_addr[cpu]);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
|
|||
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
|
||||
if (ret<=0) {
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk("Virtual address %lx is not existing.\n",virt_addr);
|
||||
printk("Virtual address %llx is not existing.\n", virt_addr);
|
||||
#endif
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
|
|||
{
|
||||
unsigned int cpu=dev->id;
|
||||
|
||||
return sprintf(buf, "%lx, %lx, %lx\n",
|
||||
return sprintf(buf, "%llx, %llx, %llx\n",
|
||||
err_data_buffer[cpu].data1,
|
||||
err_data_buffer[cpu].data2,
|
||||
err_data_buffer[cpu].data3);
|
||||
|
@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
|
|||
int ret;
|
||||
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
|
||||
printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
|
||||
err_data_buffer[cpu].data1,
|
||||
err_data_buffer[cpu].data2,
|
||||
err_data_buffer[cpu].data3,
|
||||
cpu);
|
||||
#endif
|
||||
ret=sscanf(buf, "%lx, %lx, %lx",
|
||||
ret = sscanf(buf, "%llx, %llx, %llx",
|
||||
&err_data_buffer[cpu].data1,
|
||||
&err_data_buffer[cpu].data2,
|
||||
&err_data_buffer[cpu].data3);
|
||||
|
|
|
@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data)
|
|||
data = mca_bootmem();
|
||||
first_time = 0;
|
||||
} else
|
||||
data = (void *)__get_free_pages(GFP_KERNEL,
|
||||
data = (void *)__get_free_pages(GFP_ATOMIC,
|
||||
get_order(sz));
|
||||
if (!data)
|
||||
panic("Could not allocate MCA memory for cpu %d\n",
|
||||
|
|
|
@ -2013,27 +2013,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
|
|||
{
|
||||
struct syscall_get_set_args *args = data;
|
||||
struct pt_regs *pt = args->regs;
|
||||
unsigned long *krbs, cfm, ndirty;
|
||||
unsigned long *krbs, cfm, ndirty, nlocals, nouts;
|
||||
int i, count;
|
||||
|
||||
if (unw_unwind_to_user(info) < 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We get here via a few paths:
|
||||
* - break instruction: cfm is shared with caller.
|
||||
* syscall args are in out= regs, locals are non-empty.
|
||||
* - epsinstruction: cfm is set by br.call
|
||||
* locals don't exist.
|
||||
*
|
||||
* For both cases argguments are reachable in cfm.sof - cfm.sol.
|
||||
* CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
|
||||
*/
|
||||
cfm = pt->cr_ifs;
|
||||
nlocals = (cfm >> 7) & 0x7f; /* aka sol */
|
||||
nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
|
||||
krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
|
||||
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
|
||||
|
||||
count = 0;
|
||||
if (in_syscall(pt))
|
||||
count = min_t(int, args->n, cfm & 0x7f);
|
||||
count = min_t(int, args->n, nouts);
|
||||
|
||||
/* Iterate over outs. */
|
||||
for (i = 0; i < count; i++) {
|
||||
int j = ndirty + nlocals + i + args->i;
|
||||
if (args->rw)
|
||||
*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
|
||||
args->args[i];
|
||||
*ia64_rse_skip_regs(krbs, j) = args->args[i];
|
||||
else
|
||||
args->args[i] = *ia64_rse_skip_regs(krbs,
|
||||
ndirty + i + args->i);
|
||||
args->args[i] = *ia64_rse_skip_regs(krbs, j);
|
||||
}
|
||||
|
||||
if (!args->rw) {
|
||||
|
|
|
@ -176,7 +176,7 @@ SECTIONS
|
|||
.fill : {
|
||||
FILL(0);
|
||||
BYTE(0);
|
||||
. = ALIGN(8);
|
||||
STRUCT_ALIGN();
|
||||
}
|
||||
__appended_dtb = .;
|
||||
/* leave space for appended DTB */
|
||||
|
|
|
@ -73,9 +73,10 @@ void __patch_exception(int exc, unsigned long addr);
|
|||
#endif
|
||||
|
||||
#define OP_RT_RA_MASK 0xffff0000UL
|
||||
#define LIS_R2 0x3c020000UL
|
||||
#define ADDIS_R2_R12 0x3c4c0000UL
|
||||
#define ADDI_R2_R2 0x38420000UL
|
||||
#define LIS_R2 (PPC_INST_ADDIS | __PPC_RT(R2))
|
||||
#define ADDIS_R2_R12 (PPC_INST_ADDIS | __PPC_RT(R2) | __PPC_RA(R12))
|
||||
#define ADDI_R2_R2 (PPC_INST_ADDI | __PPC_RT(R2) | __PPC_RA(R2))
|
||||
|
||||
|
||||
static inline unsigned long ppc_function_entry(void *func)
|
||||
{
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <linux/bug.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
static inline bool early_cpu_has_feature(unsigned long feature)
|
||||
static __always_inline bool early_cpu_has_feature(unsigned long feature)
|
||||
{
|
||||
return !!((CPU_FTRS_ALWAYS & feature) ||
|
||||
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
|
||||
|
@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
|
|||
return static_branch_likely(&cpu_feature_keys[i]);
|
||||
}
|
||||
#else
|
||||
static inline bool cpu_has_feature(unsigned long feature)
|
||||
static __always_inline bool cpu_has_feature(unsigned long feature)
|
||||
{
|
||||
return early_cpu_has_feature(feature);
|
||||
}
|
||||
|
|
|
@ -410,7 +410,6 @@ DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
|
|||
DECLARE_INTERRUPT_HANDLER(CacheLockingException);
|
||||
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
|
||||
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
|
||||
DECLARE_INTERRUPT_HANDLER(unrecoverable_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(WatchdogException);
|
||||
DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
|
||||
|
||||
|
@ -437,6 +436,8 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
|
|||
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
|
||||
|
||||
void unrecoverable_exception(struct pt_regs *regs);
|
||||
|
||||
void replay_system_reset(void);
|
||||
void replay_soft_interrupts(void);
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
|
|||
#define TRAP_FLAGS_MASK 0x11
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||
#endif
|
||||
#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
|
||||
#define NV_REG_POISON 0xdeadbeefdeadbeefUL
|
||||
|
@ -210,7 +210,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
|
|||
#define TRAP_FLAGS_MASK 0x1F
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
|
||||
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
|
||||
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
|
||||
|
|
|
@ -71,6 +71,16 @@ static inline void disable_kernel_vsx(void)
|
|||
{
|
||||
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
||||
}
|
||||
#else
|
||||
static inline void enable_kernel_vsx(void)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
static inline void disable_kernel_vsx(void)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
|
|
|
@ -466,7 +466,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
|
|||
|
||||
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
|
||||
/* MSR[RI] is clear iff using SRR regs */
|
||||
.if IHSRR == EXC_HV_OR_STD
|
||||
.if IHSRR_IF_HVMODE
|
||||
BEGIN_FTR_SECTION
|
||||
xori r10,r10,MSR_RI
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
|
||||
|
|
|
@ -436,7 +436,6 @@ again:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void unrecoverable_exception(struct pt_regs *regs);
|
||||
void preempt_schedule_irq(void);
|
||||
|
||||
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
|
||||
|
|
|
@ -2170,7 +2170,7 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
|
|||
* in the MSR is 0. This indicates that SRR0/1 are live, and that
|
||||
* we therefore lost state by taking this exception.
|
||||
*/
|
||||
DEFINE_INTERRUPT_HANDLER(unrecoverable_exception)
|
||||
void unrecoverable_exception(struct pt_regs *regs)
|
||||
{
|
||||
pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
|
||||
regs->trap, regs->nip, regs->msr);
|
||||
|
|
|
@ -65,3 +65,14 @@ V_FUNCTION_END(__kernel_clock_getres)
|
|||
V_FUNCTION_BEGIN(__kernel_time)
|
||||
cvdso_call_time __c_kernel_time
|
||||
V_FUNCTION_END(__kernel_time)
|
||||
|
||||
/* Routines for restoring integer registers, called by the compiler. */
|
||||
/* Called with r11 pointing to the stack header word of the caller of the */
|
||||
/* function, just beyond the end of the integer restore area. */
|
||||
_GLOBAL(_restgpr_31_x)
|
||||
_GLOBAL(_rest32gpr_31_x)
|
||||
lwz r0,4(r11)
|
||||
lwz r31,-4(r11)
|
||||
mtlr r0
|
||||
mr r1,r11
|
||||
blr
|
||||
|
|
|
@ -93,7 +93,6 @@ config RISCV
|
|||
select PCI_MSI if PCI
|
||||
select RISCV_INTC
|
||||
select RISCV_TIMER if RISCV_SBI
|
||||
select SPARSEMEM_STATIC if 32BIT
|
||||
select SPARSE_IRQ
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
select THREAD_INFO_IN_TASK
|
||||
|
@ -154,7 +153,8 @@ config ARCH_FLATMEM_ENABLE
|
|||
config ARCH_SPARSEMEM_ENABLE
|
||||
def_bool y
|
||||
depends on MMU
|
||||
select SPARSEMEM_VMEMMAP_ENABLE
|
||||
select SPARSEMEM_STATIC if 32BIT && SPARSMEM
|
||||
select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
|
||||
|
||||
config ARCH_SELECT_MEMORY_MODEL
|
||||
def_bool ARCH_SPARSEMEM_ENABLE
|
||||
|
|
|
@ -31,6 +31,8 @@ config SOC_CANAAN
|
|||
select SIFIVE_PLIC
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select PINCTRL
|
||||
select COMMON_CLK
|
||||
select COMMON_CLK_K210
|
||||
help
|
||||
This enables support for Canaan Kendryte K210 SoC platform hardware.
|
||||
|
||||
|
|
|
@ -9,4 +9,20 @@ long long __lshrti3(long long a, int b);
|
|||
long long __ashrti3(long long a, int b);
|
||||
long long __ashlti3(long long a, int b);
|
||||
|
||||
|
||||
#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs)
|
||||
|
||||
DECLARE_DO_ERROR_INFO(do_trap_unknown);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_insn_fault);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_insn_illegal);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_load_fault);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_load_misaligned);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_store_misaligned);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_store_fault);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_ecall_u);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_break);
|
||||
|
||||
#endif /* _ASM_RISCV_PROTOTYPES_H */
|
||||
|
|
|
@ -12,4 +12,6 @@
|
|||
|
||||
#include <asm-generic/irq.h>
|
||||
|
||||
extern void __init init_IRQ(void);
|
||||
|
||||
#endif /* _ASM_RISCV_IRQ_H */
|
||||
|
|
|
@ -71,6 +71,7 @@ int riscv_of_processor_hartid(struct device_node *node);
|
|||
int riscv_of_parent_hartid(struct device_node *node);
|
||||
|
||||
extern void riscv_fill_hwcap(void);
|
||||
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -119,6 +119,11 @@ extern int regs_query_register_offset(const char *name);
|
|||
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
|
||||
unsigned int n);
|
||||
|
||||
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
unsigned long frame_pointer);
|
||||
int do_syscall_trace_enter(struct pt_regs *regs);
|
||||
void do_syscall_trace_exit(struct pt_regs *regs);
|
||||
|
||||
/**
|
||||
* regs_get_register() - get register value from its offset
|
||||
* @regs: pt_regs from which register value is gotten
|
||||
|
|
|
@ -51,10 +51,10 @@ enum sbi_ext_rfence_fid {
|
|||
SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
|
||||
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
|
||||
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
||||
};
|
||||
|
||||
enum sbi_ext_hsm_fid {
|
||||
|
|
|
@ -88,4 +88,6 @@ static inline int read_current_timer(unsigned long *timer_val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern void time_init(void);
|
||||
|
||||
#endif /* _ASM_RISCV_TIMEX_H */
|
||||
|
|
|
@ -8,6 +8,7 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
|||
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||
|
||||
extra-y += head.o
|
||||
extra-y += vmlinux.lds
|
||||
|
|
|
@ -2,39 +2,41 @@
|
|||
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
|
||||
/* Ftrace callback handler for kprobes -- called under preepmt disabled */
|
||||
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *ops, struct ftrace_regs *regs)
|
||||
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
||||
{
|
||||
struct kprobe *p;
|
||||
struct pt_regs *regs;
|
||||
struct kprobe_ctlblk *kcb;
|
||||
|
||||
p = get_kprobe((kprobe_opcode_t *)ip);
|
||||
if (unlikely(!p) || kprobe_disabled(p))
|
||||
return;
|
||||
|
||||
regs = ftrace_get_regs(fregs);
|
||||
kcb = get_kprobe_ctlblk();
|
||||
if (kprobe_running()) {
|
||||
kprobes_inc_nmissed_count(p);
|
||||
} else {
|
||||
unsigned long orig_ip = instruction_pointer(&(regs->regs));
|
||||
unsigned long orig_ip = instruction_pointer(regs);
|
||||
|
||||
instruction_pointer_set(&(regs->regs), ip);
|
||||
instruction_pointer_set(regs, ip);
|
||||
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
if (!p->pre_handler || !p->pre_handler(p, &(regs->regs))) {
|
||||
if (!p->pre_handler || !p->pre_handler(p, regs)) {
|
||||
/*
|
||||
* Emulate singlestep (and also recover regs->pc)
|
||||
* as if there is a nop
|
||||
*/
|
||||
instruction_pointer_set(&(regs->regs),
|
||||
instruction_pointer_set(regs,
|
||||
(unsigned long)p->addr + MCOUNT_INSN_SIZE);
|
||||
if (unlikely(p->post_handler)) {
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
p->post_handler(p, &(regs->regs), 0);
|
||||
p->post_handler(p, regs, 0);
|
||||
}
|
||||
instruction_pointer_set(&(regs->regs), orig_ip);
|
||||
instruction_pointer_set(regs, orig_ip);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -256,8 +256,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
|
|||
* normal page fault.
|
||||
*/
|
||||
regs->epc = (unsigned long) cur->addr;
|
||||
if (!instruction_pointer(regs))
|
||||
BUG();
|
||||
BUG_ON(!instruction_pointer(regs));
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/ptrace.h>
|
||||
|
|
|
@ -116,7 +116,7 @@ void sbi_clear_ipi(void)
|
|||
EXPORT_SYMBOL(sbi_clear_ipi);
|
||||
|
||||
/**
|
||||
* sbi_set_timer_v01() - Program the timer for next timer event.
|
||||
* __sbi_set_timer_v01() - Program the timer for next timer event.
|
||||
* @stime_value: The value after which next timer event should fire.
|
||||
*
|
||||
* Return: None
|
||||
|
|
|
@ -147,7 +147,8 @@ static void __init init_resources(void)
|
|||
bss_res.end = __pa_symbol(__bss_stop) - 1;
|
||||
bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
|
||||
mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
|
||||
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
|
||||
mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt + 1) * sizeof(*mem_res);
|
||||
mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
|
||||
if (!mem_res)
|
||||
panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/timex.h>
|
||||
|
||||
unsigned long riscv_timebase;
|
||||
EXPORT_SYMBOL_GPL(riscv_timebase);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
|
|
@ -155,7 +155,7 @@ static void __init kasan_populate(void *start, void *end)
|
|||
memset(start, KASAN_SHADOW_INIT, end - start);
|
||||
}
|
||||
|
||||
void __init kasan_shallow_populate(void *start, void *end)
|
||||
static void __init kasan_shallow_populate(void *start, void *end)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
||||
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
||||
|
@ -187,6 +187,8 @@ void __init kasan_shallow_populate(void *start, void *end)
|
|||
}
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
void __init kasan_init(void)
|
||||
|
|
|
@ -275,9 +275,9 @@ CONFIG_IP_VS_DH=m
|
|||
CONFIG_IP_VS_SH=m
|
||||
CONFIG_IP_VS_SED=m
|
||||
CONFIG_IP_VS_NQ=m
|
||||
CONFIG_IP_VS_TWOS=m
|
||||
CONFIG_IP_VS_FTP=m
|
||||
CONFIG_IP_VS_PE_SIP=m
|
||||
CONFIG_NF_TABLES_IPV4=y
|
||||
CONFIG_NFT_FIB_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=y
|
||||
CONFIG_IP_NF_IPTABLES=m
|
||||
|
@ -298,7 +298,6 @@ CONFIG_IP_NF_SECURITY=m
|
|||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_TABLES_IPV6=y
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
CONFIG_IP6_NF_IPTABLES=m
|
||||
CONFIG_IP6_NF_MATCH_AH=m
|
||||
|
@ -481,7 +480,6 @@ CONFIG_NLMON=m
|
|||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_AURORA is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
# CONFIG_NET_VENDOR_CADENCE is not set
|
||||
|
@ -581,7 +579,6 @@ CONFIG_VIRTIO_BALLOON=m
|
|||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VHOST_NET=m
|
||||
CONFIG_VHOST_VSOCK=m
|
||||
# CONFIG_SURFACE_PLATFORMS is not set
|
||||
CONFIG_S390_CCW_IOMMU=y
|
||||
CONFIG_S390_AP_IOMMU=y
|
||||
CONFIG_EXT4_FS=y
|
||||
|
@ -635,6 +632,7 @@ CONFIG_NTFS_RW=y
|
|||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
|
@ -714,12 +712,8 @@ CONFIG_CRYPTO_VMAC=m
|
|||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD128=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
|
@ -731,7 +725,6 @@ CONFIG_CRYPTO_CAST6=m
|
|||
CONFIG_CRYPTO_DES=m
|
||||
CONFIG_CRYPTO_FCRYPT=m
|
||||
CONFIG_CRYPTO_KHAZAD=m
|
||||
CONFIG_CRYPTO_SALSA20=m
|
||||
CONFIG_CRYPTO_SEED=m
|
||||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_SM4=m
|
||||
|
@ -796,12 +789,9 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
|
|||
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
|
||||
CONFIG_SLUB_DEBUG_ON=y
|
||||
CONFIG_SLUB_STATS=y
|
||||
CONFIG_DEBUG_KMEMLEAK=y
|
||||
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_VM_VMACACHE=y
|
||||
CONFIG_DEBUG_VM_RB=y
|
||||
CONFIG_DEBUG_VM_PGFLAGS=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
||||
|
@ -838,6 +828,7 @@ CONFIG_BPF_KPROBE_OVERRIDE=y
|
|||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_FTRACE_STARTUP_TEST=y
|
||||
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
|
||||
CONFIG_DEBUG_ENTRY=y
|
||||
CONFIG_NOTIFIER_ERROR_INJECTION=m
|
||||
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_FAULT_INJECTION=y
|
||||
|
@ -861,4 +852,3 @@ CONFIG_PERCPU_TEST=m
|
|||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_TEST_BITOPS=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_DEBUG_ENTRY=y
|
||||
|
|
|
@ -266,9 +266,9 @@ CONFIG_IP_VS_DH=m
|
|||
CONFIG_IP_VS_SH=m
|
||||
CONFIG_IP_VS_SED=m
|
||||
CONFIG_IP_VS_NQ=m
|
||||
CONFIG_IP_VS_TWOS=m
|
||||
CONFIG_IP_VS_FTP=m
|
||||
CONFIG_IP_VS_PE_SIP=m
|
||||
CONFIG_NF_TABLES_IPV4=y
|
||||
CONFIG_NFT_FIB_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=y
|
||||
CONFIG_IP_NF_IPTABLES=m
|
||||
|
@ -289,7 +289,6 @@ CONFIG_IP_NF_SECURITY=m
|
|||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_TABLES_IPV6=y
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
CONFIG_IP6_NF_IPTABLES=m
|
||||
CONFIG_IP6_NF_MATCH_AH=m
|
||||
|
@ -473,7 +472,6 @@ CONFIG_NLMON=m
|
|||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_AURORA is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
# CONFIG_NET_VENDOR_CADENCE is not set
|
||||
|
@ -573,7 +571,6 @@ CONFIG_VIRTIO_BALLOON=m
|
|||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VHOST_NET=m
|
||||
CONFIG_VHOST_VSOCK=m
|
||||
# CONFIG_SURFACE_PLATFORMS is not set
|
||||
CONFIG_S390_CCW_IOMMU=y
|
||||
CONFIG_S390_AP_IOMMU=y
|
||||
CONFIG_EXT4_FS=y
|
||||
|
@ -623,6 +620,7 @@ CONFIG_NTFS_RW=y
|
|||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
|
@ -703,12 +701,8 @@ CONFIG_CRYPTO_VMAC=m
|
|||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD128=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
|
@ -720,7 +714,6 @@ CONFIG_CRYPTO_CAST6=m
|
|||
CONFIG_CRYPTO_DES=m
|
||||
CONFIG_CRYPTO_FCRYPT=m
|
||||
CONFIG_CRYPTO_KHAZAD=m
|
||||
CONFIG_CRYPTO_SALSA20=m
|
||||
CONFIG_CRYPTO_SEED=m
|
||||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_SM4=m
|
||||
|
|
|
@ -26,7 +26,6 @@ CONFIG_CRASH_DUMP=y
|
|||
# CONFIG_SECCOMP is not set
|
||||
# CONFIG_GCC_PLUGINS is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
CONFIG_IBM_PARTITION=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_COMPACTION is not set
|
||||
# CONFIG_MIGRATION is not set
|
||||
|
@ -61,11 +60,9 @@ CONFIG_RAW_DRIVER=y
|
|||
# CONFIG_HID is not set
|
||||
# CONFIG_VIRTIO_MENU is not set
|
||||
# CONFIG_VHOST_MENU is not set
|
||||
# CONFIG_SURFACE_PLATFORMS is not set
|
||||
# CONFIG_IOMMU_SUPPORT is not set
|
||||
# CONFIG_DNOTIFY is not set
|
||||
# CONFIG_INOTIFY_USER is not set
|
||||
CONFIG_CONFIGFS_FS=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
CONFIG_LSM="yama,loadpin,safesetid,integrity"
|
||||
|
|
|
@ -14,12 +14,12 @@
|
|||
|
||||
struct s390_idle_data {
|
||||
seqcount_t seqcount;
|
||||
unsigned long long idle_count;
|
||||
unsigned long long idle_time;
|
||||
unsigned long long clock_idle_enter;
|
||||
unsigned long long clock_idle_exit;
|
||||
unsigned long long timer_idle_enter;
|
||||
unsigned long long timer_idle_exit;
|
||||
unsigned long idle_count;
|
||||
unsigned long idle_time;
|
||||
unsigned long clock_idle_enter;
|
||||
unsigned long clock_idle_exit;
|
||||
unsigned long timer_idle_enter;
|
||||
unsigned long timer_idle_exit;
|
||||
unsigned long mt_cycles_enter[8];
|
||||
};
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ extern unsigned int s390_pci_no_rid;
|
|||
----------------------------------------------------------------------------- */
|
||||
/* Base stuff */
|
||||
int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
|
||||
void zpci_remove_device(struct zpci_dev *zdev);
|
||||
void zpci_remove_device(struct zpci_dev *zdev, bool set_error);
|
||||
int zpci_enable_device(struct zpci_dev *);
|
||||
int zpci_disable_device(struct zpci_dev *);
|
||||
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
|
||||
|
|
|
@ -98,10 +98,10 @@ extern unsigned char ptff_function_mask[16];
|
|||
|
||||
/* Query TOD offset result */
|
||||
struct ptff_qto {
|
||||
unsigned long long physical_clock;
|
||||
unsigned long long tod_offset;
|
||||
unsigned long long logical_tod_offset;
|
||||
unsigned long long tod_epoch_difference;
|
||||
unsigned long physical_clock;
|
||||
unsigned long tod_offset;
|
||||
unsigned long logical_tod_offset;
|
||||
unsigned long tod_epoch_difference;
|
||||
} __packed;
|
||||
|
||||
static inline int ptff_query(unsigned int nr)
|
||||
|
@ -151,9 +151,9 @@ struct ptff_qui {
|
|||
rc; \
|
||||
})
|
||||
|
||||
static inline unsigned long long local_tick_disable(void)
|
||||
static inline unsigned long local_tick_disable(void)
|
||||
{
|
||||
unsigned long long old;
|
||||
unsigned long old;
|
||||
|
||||
old = S390_lowcore.clock_comparator;
|
||||
S390_lowcore.clock_comparator = clock_comparator_max;
|
||||
|
@ -161,7 +161,7 @@ static inline unsigned long long local_tick_disable(void)
|
|||
return old;
|
||||
}
|
||||
|
||||
static inline void local_tick_enable(unsigned long long comp)
|
||||
static inline void local_tick_enable(unsigned long comp)
|
||||
{
|
||||
S390_lowcore.clock_comparator = comp;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
|
@ -169,9 +169,9 @@ static inline void local_tick_enable(unsigned long long comp)
|
|||
|
||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
static inline unsigned long long get_tod_clock(void)
|
||||
static inline unsigned long get_tod_clock(void)
|
||||
{
|
||||
union tod_clock clk;
|
||||
|
||||
|
@ -179,10 +179,10 @@ static inline unsigned long long get_tod_clock(void)
|
|||
return clk.tod;
|
||||
}
|
||||
|
||||
static inline unsigned long long get_tod_clock_fast(void)
|
||||
static inline unsigned long get_tod_clock_fast(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
|
||||
unsigned long long clk;
|
||||
unsigned long clk;
|
||||
|
||||
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
|
||||
return clk;
|
||||
|
@ -208,9 +208,9 @@ extern union tod_clock tod_clock_base;
|
|||
* Therefore preemption must be disabled, otherwise the returned
|
||||
* value is not guaranteed to be monotonic.
|
||||
*/
|
||||
static inline unsigned long long get_tod_clock_monotonic(void)
|
||||
static inline unsigned long get_tod_clock_monotonic(void)
|
||||
{
|
||||
unsigned long long tod;
|
||||
unsigned long tod;
|
||||
|
||||
preempt_disable_notrace();
|
||||
tod = get_tod_clock() - tod_clock_base.tod;
|
||||
|
@ -237,7 +237,7 @@ static inline unsigned long long get_tod_clock_monotonic(void)
|
|||
* -> ns = (th * 125) + ((tl * 125) >> 9);
|
||||
*
|
||||
*/
|
||||
static inline unsigned long long tod_to_ns(unsigned long long todval)
|
||||
static inline unsigned long tod_to_ns(unsigned long todval)
|
||||
{
|
||||
return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
|
||||
}
|
||||
|
@ -249,10 +249,10 @@ static inline unsigned long long tod_to_ns(unsigned long long todval)
|
|||
*
|
||||
* Returns: true if a is later than b
|
||||
*/
|
||||
static inline int tod_after(unsigned long long a, unsigned long long b)
|
||||
static inline int tod_after(unsigned long a, unsigned long b)
|
||||
{
|
||||
if (MACHINE_HAS_SCC)
|
||||
return (long long) a > (long long) b;
|
||||
return (long) a > (long) b;
|
||||
return a > b;
|
||||
}
|
||||
|
||||
|
@ -263,10 +263,10 @@ static inline int tod_after(unsigned long long a, unsigned long long b)
|
|||
*
|
||||
* Returns: true if a is later than b
|
||||
*/
|
||||
static inline int tod_after_eq(unsigned long long a, unsigned long long b)
|
||||
static inline int tod_after_eq(unsigned long a, unsigned long b)
|
||||
{
|
||||
if (MACHINE_HAS_SCC)
|
||||
return (long long) a >= (long long) b;
|
||||
return (long) a >= (long) b;
|
||||
return a >= b;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ void account_idle_time_irq(void)
|
|||
void arch_cpu_idle(void)
|
||||
{
|
||||
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
|
||||
unsigned long long idle_time;
|
||||
unsigned long idle_time;
|
||||
unsigned long psw_mask;
|
||||
|
||||
/* Wait for external, I/O or machine check interrupt. */
|
||||
|
@ -73,7 +73,7 @@ static ssize_t show_idle_count(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned long long idle_count;
|
||||
unsigned long idle_count;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
|
@ -82,14 +82,14 @@ static ssize_t show_idle_count(struct device *dev,
|
|||
if (READ_ONCE(idle->clock_idle_enter))
|
||||
idle_count++;
|
||||
} while (read_seqcount_retry(&idle->seqcount, seq));
|
||||
return sprintf(buf, "%llu\n", idle_count);
|
||||
return sprintf(buf, "%lu\n", idle_count);
|
||||
}
|
||||
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
|
||||
|
||||
static ssize_t show_idle_time(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
|
||||
unsigned long now, idle_time, idle_enter, idle_exit, in_idle;
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned int seq;
|
||||
|
||||
|
@ -109,14 +109,14 @@ static ssize_t show_idle_time(struct device *dev,
|
|||
}
|
||||
}
|
||||
idle_time += in_idle;
|
||||
return sprintf(buf, "%llu\n", idle_time >> 12);
|
||||
return sprintf(buf, "%lu\n", idle_time >> 12);
|
||||
}
|
||||
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
|
||||
|
||||
u64 arch_cpu_idle_time(int cpu)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
|
||||
unsigned long long now, idle_enter, idle_exit, in_idle;
|
||||
unsigned long now, idle_enter, idle_exit, in_idle;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
|
|
|
@ -269,7 +269,7 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
|
|||
case CPUMF_CTR_SET_MAX:
|
||||
/* The counter could not be associated to a counter set */
|
||||
return -EINVAL;
|
||||
};
|
||||
}
|
||||
|
||||
/* Initialize for using the CPU-measurement counter facility */
|
||||
if (!atomic_inc_not_zero(&num_events)) {
|
||||
|
|
|
@ -26,12 +26,10 @@
|
|||
#include <asm/timex.h>
|
||||
#include <asm/debug.h>
|
||||
|
||||
#include <asm/perf_cpum_cf_diag.h>
|
||||
#include <asm/hwctrset.h>
|
||||
|
||||
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
|
||||
#define CF_DIAG_MIN_INTERVAL 60 /* Minimum counter set read */
|
||||
/* interval in seconds */
|
||||
static unsigned long cf_diag_interval = CF_DIAG_MIN_INTERVAL;
|
||||
static unsigned int cf_diag_cpu_speed;
|
||||
static debug_info_t *cf_diag_dbg;
|
||||
|
||||
|
@ -729,7 +727,6 @@ static DEFINE_MUTEX(cf_diag_ctrset_mutex);
|
|||
static struct cf_diag_ctrset {
|
||||
unsigned long ctrset; /* Bit mask of counter set to read */
|
||||
cpumask_t mask; /* CPU mask to read from */
|
||||
time64_t lastread; /* Epoch counter set last read */
|
||||
} cf_diag_ctrset;
|
||||
|
||||
static void cf_diag_ctrset_clear(void)
|
||||
|
@ -866,27 +863,16 @@ static int cf_diag_all_read(unsigned long arg)
|
|||
{
|
||||
struct cf_diag_call_on_cpu_parm p;
|
||||
cpumask_var_t mask;
|
||||
time64_t now;
|
||||
int rc = 0;
|
||||
int rc;
|
||||
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
|
||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
now = ktime_get_seconds();
|
||||
if (cf_diag_ctrset.lastread + cf_diag_interval > now) {
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s now %lld "
|
||||
" lastread %lld\n", __func__, now,
|
||||
cf_diag_ctrset.lastread);
|
||||
rc = -EAGAIN;
|
||||
goto out;
|
||||
} else {
|
||||
cf_diag_ctrset.lastread = now;
|
||||
}
|
||||
|
||||
p.sets = cf_diag_ctrset.ctrset;
|
||||
cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
|
||||
on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
|
||||
rc = cf_diag_all_copy(arg, mask);
|
||||
out:
|
||||
free_cpumask_var(mask);
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
|
||||
return rc;
|
||||
|
@ -982,7 +968,7 @@ static int cf_diag_all_start(void)
|
|||
*/
|
||||
static size_t cf_diag_needspace(unsigned int sets)
|
||||
{
|
||||
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
|
||||
struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
|
||||
size_t bytes = 0;
|
||||
int i;
|
||||
|
||||
|
@ -998,6 +984,7 @@ static size_t cf_diag_needspace(unsigned int sets)
|
|||
sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
|
||||
bytes);
|
||||
put_cpu_ptr(&cpu_cf_events);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
|
|
|
@ -68,10 +68,10 @@ EXPORT_SYMBOL(s390_epoch_delta_notifier);
|
|||
|
||||
unsigned char ptff_function_mask[16];
|
||||
|
||||
static unsigned long long lpar_offset;
|
||||
static unsigned long long initial_leap_seconds;
|
||||
static unsigned long long tod_steering_end;
|
||||
static long long tod_steering_delta;
|
||||
static unsigned long lpar_offset;
|
||||
static unsigned long initial_leap_seconds;
|
||||
static unsigned long tod_steering_end;
|
||||
static long tod_steering_delta;
|
||||
|
||||
/*
|
||||
* Get time offsets with PTFF
|
||||
|
@ -96,7 +96,7 @@ void __init time_early_init(void)
|
|||
|
||||
/* get initial leap seconds */
|
||||
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
|
||||
initial_leap_seconds = (unsigned long long)
|
||||
initial_leap_seconds = (unsigned long)
|
||||
((long) qui.old_leap * 4096000000L);
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
|
|||
|
||||
static u64 read_tod_clock(struct clocksource *cs)
|
||||
{
|
||||
unsigned long long now, adj;
|
||||
unsigned long now, adj;
|
||||
|
||||
preempt_disable(); /* protect from changes to steering parameters */
|
||||
now = get_tod_clock();
|
||||
|
@ -362,7 +362,7 @@ static inline int check_sync_clock(void)
|
|||
* Apply clock delta to the global data structures.
|
||||
* This is called once on the CPU that performed the clock sync.
|
||||
*/
|
||||
static void clock_sync_global(unsigned long long delta)
|
||||
static void clock_sync_global(unsigned long delta)
|
||||
{
|
||||
unsigned long now, adj;
|
||||
struct ptff_qto qto;
|
||||
|
@ -378,7 +378,7 @@ static void clock_sync_global(unsigned long long delta)
|
|||
-(adj >> 15) : (adj >> 15);
|
||||
tod_steering_delta += delta;
|
||||
if ((abs(tod_steering_delta) >> 48) != 0)
|
||||
panic("TOD clock sync offset %lli is too large to drift\n",
|
||||
panic("TOD clock sync offset %li is too large to drift\n",
|
||||
tod_steering_delta);
|
||||
tod_steering_end = now + (abs(tod_steering_delta) << 15);
|
||||
vdso_data->arch_data.tod_steering_end = tod_steering_end;
|
||||
|
@ -394,7 +394,7 @@ static void clock_sync_global(unsigned long long delta)
|
|||
* Apply clock delta to the per-CPU data structures of this CPU.
|
||||
* This is called for each online CPU after the call to clock_sync_global.
|
||||
*/
|
||||
static void clock_sync_local(unsigned long long delta)
|
||||
static void clock_sync_local(unsigned long delta)
|
||||
{
|
||||
/* Add the delta to the clock comparator. */
|
||||
if (S390_lowcore.clock_comparator != clock_comparator_max) {
|
||||
|
@ -418,7 +418,7 @@ static void __init time_init_wq(void)
|
|||
struct clock_sync_data {
|
||||
atomic_t cpus;
|
||||
int in_sync;
|
||||
unsigned long long clock_delta;
|
||||
unsigned long clock_delta;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -538,7 +538,7 @@ static int stpinfo_valid(void)
|
|||
static int stp_sync_clock(void *data)
|
||||
{
|
||||
struct clock_sync_data *sync = data;
|
||||
unsigned long long clock_delta, flags;
|
||||
u64 clock_delta, flags;
|
||||
static int first;
|
||||
int rc;
|
||||
|
||||
|
@ -720,8 +720,8 @@ static ssize_t ctn_id_show(struct device *dev,
|
|||
|
||||
mutex_lock(&stp_mutex);
|
||||
if (stpinfo_valid())
|
||||
ret = sprintf(buf, "%016llx\n",
|
||||
*(unsigned long long *) stp_info.ctnid);
|
||||
ret = sprintf(buf, "%016lx\n",
|
||||
*(unsigned long *) stp_info.ctnid);
|
||||
mutex_unlock(&stp_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -794,7 +794,7 @@ static ssize_t leap_seconds_scheduled_show(struct device *dev,
|
|||
if (!stzi.lsoib.p)
|
||||
return sprintf(buf, "0,0\n");
|
||||
|
||||
return sprintf(buf, "%llu,%d\n",
|
||||
return sprintf(buf, "%lu,%d\n",
|
||||
tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
|
||||
stzi.lsoib.nlso - stzi.lsoib.also);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue