Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/ipa/ipa_main.c8afc7e471a
("net: ipa: separate disabling setup from modem stop")76b5fbcd6b
("net: ipa: kill ipa_modem_init()") Duplicated include, drop one. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
93d5404e89
3
.mailmap
3
.mailmap
|
@ -71,6 +71,9 @@ Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
|
|||
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
|
||||
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
|
||||
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
|
||||
Christophe Ricard <christophe.ricard@gmail.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
|
||||
|
|
|
@ -1520,15 +1520,15 @@ This sysfs attribute controls the keyboard "face" that will be shown on the
|
|||
Lenovo X1 Carbon 2nd gen (2014)'s adaptive keyboard. The value can be read
|
||||
and set.
|
||||
|
||||
- 1 = Home mode
|
||||
- 2 = Web-browser mode
|
||||
- 3 = Web-conference mode
|
||||
- 4 = Function mode
|
||||
- 5 = Layflat mode
|
||||
- 0 = Home mode
|
||||
- 1 = Web-browser mode
|
||||
- 2 = Web-conference mode
|
||||
- 3 = Function mode
|
||||
- 4 = Layflat mode
|
||||
|
||||
For more details about which buttons will appear depending on the mode, please
|
||||
review the laptop's user guide:
|
||||
http://www.lenovo.com/shop/americas/content/user_guides/x1carbon_2_ug_en.pdf
|
||||
https://download.lenovo.com/ibmdl/pub/pc/pccbbs/mobiles_pdf/x1carbon_2_ug_en.pdf
|
||||
|
||||
Battery charge control
|
||||
----------------------
|
||||
|
|
|
@ -17,9 +17,10 @@ properties:
|
|||
oneOf:
|
||||
- enum:
|
||||
- fsl,imx7ulp-lpi2c
|
||||
- fsl,imx8qm-lpi2c
|
||||
- items:
|
||||
- const: fsl,imx8qxp-lpi2c
|
||||
- enum:
|
||||
- fsl,imx8qxp-lpi2c
|
||||
- fsl,imx8qm-lpi2c
|
||||
- const: fsl,imx7ulp-lpi2c
|
||||
|
||||
reg:
|
||||
|
|
|
@ -36,6 +36,8 @@ Key to symbols
|
|||
|
||||
=============== =============================================================
|
||||
S Start condition
|
||||
Sr Repeated start condition, used to switch from write to
|
||||
read mode.
|
||||
P Stop condition
|
||||
Rd/Wr (1 bit) Read/Write bit. Rd equals 1, Wr equals 0.
|
||||
A, NA (1 bit) Acknowledge (ACK) and Not Acknowledge (NACK) bit
|
||||
|
@ -100,7 +102,7 @@ Implemented by i2c_smbus_read_byte_data()
|
|||
This reads a single byte from a device, from a designated register.
|
||||
The register is specified through the Comm byte::
|
||||
|
||||
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
|
||||
S Addr Wr [A] Comm [A] Sr Addr Rd [A] [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
|
||||
|
||||
|
@ -114,7 +116,7 @@ This operation is very like Read Byte; again, data is read from a
|
|||
device, from a designated register that is specified through the Comm
|
||||
byte. But this time, the data is a complete word (16 bits)::
|
||||
|
||||
S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
S Addr Wr [A] Comm [A] Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
|
||||
|
||||
|
@ -164,7 +166,7 @@ This command selects a device register (through the Comm byte), sends
|
|||
16 bits of data to it, and reads 16 bits of data in return::
|
||||
|
||||
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A]
|
||||
S Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
|
||||
|
||||
|
@ -181,7 +183,7 @@ of data is specified by the device in the Count byte.
|
|||
::
|
||||
|
||||
S Addr Wr [A] Comm [A]
|
||||
S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
|
||||
Sr Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
|
||||
|
||||
|
@ -212,7 +214,7 @@ This command selects a device register (through the Comm byte), sends
|
|||
1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return::
|
||||
|
||||
S Addr Wr [A] Comm [A] Count [A] Data [A] ...
|
||||
S Addr Rd [A] [Count] A [Data] ... A P
|
||||
Sr Addr Rd [A] [Count] A [Data] ... A P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
|
||||
|
||||
|
@ -300,7 +302,7 @@ This command reads a block of bytes from a device, from a
|
|||
designated register that is specified through the Comm byte::
|
||||
|
||||
S Addr Wr [A] Comm [A]
|
||||
S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
|
||||
Sr Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
|
||||
|
||||
|
|
|
@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
|
|||
|
||||
0: disable any special handling on port reuse. The new
|
||||
connection will be delivered to the same real server that was
|
||||
servicing the previous connection. This will effectively
|
||||
disable expire_nodest_conn.
|
||||
servicing the previous connection.
|
||||
|
||||
bit 1: enable rescheduling of new connections when it is safe.
|
||||
That is, whenever expire_nodest_conn and for TCP sockets, when
|
||||
|
|
|
@ -486,8 +486,8 @@ of packets.
|
|||
Drivers are free to use a more permissive configuration than the requested
|
||||
configuration. It is expected that drivers should only implement directly the
|
||||
most generic mode that can be supported. For example if the hardware can
|
||||
support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
|
||||
HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
|
||||
support HWTSTAMP_FILTER_PTP_V2_EVENT, then it should generally always upscale
|
||||
HWTSTAMP_FILTER_PTP_V2_L2_SYNC, and so forth, as HWTSTAMP_FILTER_PTP_V2_EVENT
|
||||
is more generic (and more useful to applications).
|
||||
|
||||
A driver which supports hardware time stamping shall update the struct
|
||||
|
|
|
@ -84,6 +84,16 @@ CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
|
|||
2.2 Registration of performance domains
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Registration of 'advanced' EM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The 'advanced' EM gets it's name due to the fact that the driver is allowed
|
||||
to provide more precised power model. It's not limited to some implemented math
|
||||
formula in the framework (like it's in 'simple' EM case). It can better reflect
|
||||
the real power measurements performed for each performance state. Thus, this
|
||||
registration method should be preferred in case considering EM static power
|
||||
(leakage) is important.
|
||||
|
||||
Drivers are expected to register performance domains into the EM framework by
|
||||
calling the following API::
|
||||
|
||||
|
@ -103,6 +113,18 @@ to: return warning/error, stop working or panic.
|
|||
See Section 3. for an example of driver implementing this
|
||||
callback, or Section 2.4 for further documentation on this API
|
||||
|
||||
Registration of 'simple' EM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The 'simple' EM is registered using the framework helper function
|
||||
cpufreq_register_em_with_opp(). It implements a power model which is tight to
|
||||
math formula::
|
||||
|
||||
Power = C * V^2 * f
|
||||
|
||||
The EM which is registered using this method might not reflect correctly the
|
||||
physics of a real device, e.g. when static power (leakage) is important.
|
||||
|
||||
|
||||
2.3 Accessing performance domains
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@ -138,6 +160,10 @@ or in Section 2.4
|
|||
3. Example driver
|
||||
-----------------
|
||||
|
||||
The CPUFreq framework supports dedicated callback for registering
|
||||
the EM for a given CPU(s) 'policy' object: cpufreq_driver::register_em().
|
||||
That callback has to be implemented properly for a given driver,
|
||||
because the framework would call it at the right time during setup.
|
||||
This section provides a simple example of a CPUFreq driver registering a
|
||||
performance domain in the Energy Model framework using the (fake) 'foo'
|
||||
protocol. The driver implements an est_power() function to be provided to the
|
||||
|
@ -167,25 +193,22 @@ EM framework::
|
|||
20 return 0;
|
||||
21 }
|
||||
22
|
||||
23 static int foo_cpufreq_init(struct cpufreq_policy *policy)
|
||||
23 static void foo_cpufreq_register_em(struct cpufreq_policy *policy)
|
||||
24 {
|
||||
25 struct em_data_callback em_cb = EM_DATA_CB(est_power);
|
||||
26 struct device *cpu_dev;
|
||||
27 int nr_opp, ret;
|
||||
27 int nr_opp;
|
||||
28
|
||||
29 cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
|
||||
30
|
||||
31 /* Do the actual CPUFreq init work ... */
|
||||
32 ret = do_foo_cpufreq_init(policy);
|
||||
33 if (ret)
|
||||
34 return ret;
|
||||
35
|
||||
36 /* Find the number of OPPs for this policy */
|
||||
37 nr_opp = foo_get_nr_opp(policy);
|
||||
31 /* Find the number of OPPs for this policy */
|
||||
32 nr_opp = foo_get_nr_opp(policy);
|
||||
33
|
||||
34 /* And register the new performance domain */
|
||||
35 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
|
||||
36 true);
|
||||
37 }
|
||||
38
|
||||
39 /* And register the new performance domain */
|
||||
40 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
|
||||
41 true);
|
||||
42
|
||||
43 return 0;
|
||||
44 }
|
||||
39 static struct cpufreq_driver foo_cpufreq_driver = {
|
||||
40 .register_em = foo_cpufreq_register_em,
|
||||
41 };
|
||||
|
|
26
MAINTAINERS
26
MAINTAINERS
|
@ -2263,6 +2263,15 @@ L: linux-iio@vger.kernel.org
|
|||
S: Maintained
|
||||
F: drivers/counter/microchip-tcb-capture.c
|
||||
|
||||
ARM/MILBEAUT ARCHITECTURE
|
||||
M: Taichi Sugaya <sugaya.taichi@socionext.com>
|
||||
M: Takao Orito <orito.takao@socionext.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/milbeaut*
|
||||
F: arch/arm/mach-milbeaut/
|
||||
N: milbeaut
|
||||
|
||||
ARM/MIOA701 MACHINE SUPPORT
|
||||
M: Robert Jarzmik <robert.jarzmik@free.fr>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
|
@ -2729,10 +2738,11 @@ S: Maintained
|
|||
F: drivers/memory/*emif*
|
||||
|
||||
ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Santosh Shilimkar <ssantosh@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
|
||||
F: arch/arm/boot/dts/keystone-*
|
||||
F: arch/arm/mach-keystone/
|
||||
|
||||
|
@ -3570,13 +3580,14 @@ L: netdev@vger.kernel.org
|
|||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/b44.*
|
||||
|
||||
BROADCOM B53 ETHERNET SWITCH DRIVER
|
||||
BROADCOM B53/SF2 ETHERNET SWITCH DRIVER
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
|
||||
F: drivers/net/dsa/b53/*
|
||||
F: drivers/net/dsa/bcm_sf2*
|
||||
F: include/linux/dsa/brcm.h
|
||||
F: include/linux/platform_data/b53.h
|
||||
|
||||
|
@ -10445,7 +10456,7 @@ F: arch/riscv/include/uapi/asm/kvm*
|
|||
F: arch/riscv/kvm/
|
||||
|
||||
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
|
||||
M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
M: Janosch Frank <frankja@linux.ibm.com>
|
||||
R: David Hildenbrand <david@redhat.com>
|
||||
R: Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||
|
@ -16573,7 +16584,7 @@ F: drivers/video/fbdev/savage/
|
|||
S390
|
||||
M: Heiko Carstens <hca@linux.ibm.com>
|
||||
M: Vasily Gorbik <gor@linux.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
R: Alexander Gordeev <agordeev@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -18483,6 +18494,7 @@ F: include/uapi/linux/pkt_sched.h
|
|||
F: include/uapi/linux/tc_act/
|
||||
F: include/uapi/linux/tc_ematch/
|
||||
F: net/sched/
|
||||
F: tools/testing/selftests/tc-testing
|
||||
|
||||
TC90522 MEDIA DRIVER
|
||||
M: Akihiro Tsukada <tskd08@gmail.com>
|
||||
|
@ -19031,11 +19043,12 @@ F: drivers/mmc/host/tifm_sd.c
|
|||
F: include/linux/tifm.h
|
||||
|
||||
TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Santosh Shilimkar <ssantosh@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
|
||||
F: drivers/soc/ti/*
|
||||
|
||||
TI LM49xxx FAMILY ASoC CODEC DRIVERS
|
||||
|
@ -20317,7 +20330,8 @@ F: arch/x86/include/asm/vmware.h
|
|||
F: arch/x86/kernel/cpu/vmware.c
|
||||
|
||||
VMWARE PVRDMA DRIVER
|
||||
M: Adit Ranadive <aditr@vmware.com>
|
||||
M: Bryan Tan <bryantan@vmware.com>
|
||||
M: Vishnu Dasa <vdasa@vmware.com>
|
||||
M: VMware PV-Drivers <pv-drivers@vmware.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
4
Makefile
4
Makefile
|
@ -2,8 +2,8 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
NAME = Trick or Treat
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Gobble Gobble
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
|
|
@ -488,3 +488,4 @@
|
|||
556 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 557 reserved for memfd_secret
|
||||
558 common process_mrelease sys_process_mrelease
|
||||
559 common futex_waitv sys_futex_waitv
|
||||
|
|
|
@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
|
|||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
|
||||
void dma_cache_inv(phys_addr_t start, unsigned long sz);
|
||||
|
|
|
@ -1463,6 +1463,7 @@ config HIGHMEM
|
|||
bool "High Memory Support"
|
||||
depends on MMU
|
||||
select KMAP_LOCAL
|
||||
select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
|
||||
help
|
||||
The address space of ARM processors is only 4 Gigabytes large
|
||||
and it has to accommodate user address space, kernel address
|
||||
|
|
|
@ -506,11 +506,17 @@
|
|||
#address-cells = <3>;
|
||||
#interrupt-cells = <1>;
|
||||
#size-cells = <2>;
|
||||
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
|
||||
interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "pcie", "msi";
|
||||
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
|
||||
interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 2 &gicv2 GIC_SPI 144
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 3 &gicv2 GIC_SPI 145
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 4 &gicv2 GIC_SPI 146
|
||||
IRQ_TYPE_LEVEL_HIGH>;
|
||||
msi-controller;
|
||||
msi-parent = <&pcie0>;
|
||||
|
|
|
@ -242,6 +242,8 @@
|
|||
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pcie0: pcie@12000 {
|
||||
|
@ -408,7 +410,7 @@
|
|||
i2c0: i2c@18009000 {
|
||||
compatible = "brcm,iproc-i2c";
|
||||
reg = <0x18009000 0x50>;
|
||||
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
clock-frequency = <100000>;
|
||||
|
|
|
@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
|
|||
*/
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
extern void flush_dcache_page(struct page *);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
|
||||
static inline void flush_kernel_vmap_range(void *addr, int size)
|
||||
|
|
|
@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
|
|||
u32 socfpga_sdram_self_refresh(u32 sdr_base);
|
||||
extern unsigned int socfpga_sdram_self_refresh_sz;
|
||||
|
||||
extern char secondary_trampoline, secondary_trampoline_end;
|
||||
extern char secondary_trampoline[], secondary_trampoline_end[];
|
||||
|
||||
extern unsigned long socfpga_cpu1start_addr;
|
||||
|
||||
|
|
|
@ -20,14 +20,14 @@
|
|||
|
||||
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
/* This will put CPU #1 into reset. */
|
||||
writel(RSTMGR_MPUMODRST_CPU1,
|
||||
rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
|
||||
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
|
||||
|
@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||
|
||||
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
|
||||
SOCFPGA_A10_RSTMGR_MODMPURST);
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
|
||||
|
|
|
@ -296,8 +296,7 @@
|
|||
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
|
||||
phys = <&ufs_0_phy>;
|
||||
phy-names = "ufs-phy";
|
||||
samsung,sysreg = <&syscon_fsys2>;
|
||||
samsung,ufs-shareability-reg-offset = <0x710>;
|
||||
samsung,sysreg = <&syscon_fsys2 0x710>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -12,6 +12,17 @@
|
|||
|
||||
#define HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
|
||||
/*
|
||||
* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
|
||||
* "return address pointer" which can be used to uniquely identify a return
|
||||
* address which has been overwritten.
|
||||
*
|
||||
* On arm64 we use the address of the caller's frame record, which remains the
|
||||
* same for the lifetime of the instrumented function, unlike the return
|
||||
* address in the LR.
|
||||
*/
|
||||
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#else
|
||||
|
|
|
@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
|||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
||||
{
|
||||
VM_BUG_ON(mm != &init_mm);
|
||||
VM_BUG_ON(mm && mm != &init_mm);
|
||||
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
|
||||
}
|
||||
|
||||
|
|
|
@ -47,9 +47,6 @@ struct stack_info {
|
|||
* @prev_type: The type of stack this frame record was on, or a synthetic
|
||||
* value of STACK_TYPE_UNKNOWN. This is used to detect a
|
||||
* transition from one stack to another.
|
||||
*
|
||||
* @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
|
||||
* replacement lr value in the ftrace graph stack.
|
||||
*/
|
||||
struct stackframe {
|
||||
unsigned long fp;
|
||||
|
@ -57,9 +54,6 @@ struct stackframe {
|
|||
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
|
||||
unsigned long prev_fp;
|
||||
enum stack_type prev_type;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
int graph;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
struct llist_node *kr_cur;
|
||||
#endif
|
||||
|
|
|
@ -281,12 +281,22 @@ do { \
|
|||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_get_user(x, ptr, err) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
|
||||
__typeof__(x) __rgu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_get_mem("ldtr", x, ptr, err); \
|
||||
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
\
|
||||
(x) = __rgu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
|
@ -310,14 +320,22 @@ do { \
|
|||
|
||||
#define get_user __get_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __gkn_dst = (dst); \
|
||||
__typeof__(src) __gkn_src = (src); \
|
||||
int __gkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_get_mem("ldr", *((type *)(dst)), \
|
||||
(__force type *)(src), __gkn_err); \
|
||||
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
|
||||
(__force type *)(__gkn_src), __gkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__gkn_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
|
@ -351,11 +369,19 @@ do { \
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_put_user(x, ptr, err) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
|
||||
__typeof__(*(ptr)) __rpu_val = (x); \
|
||||
__chk_user_ptr(__rpu_ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_put_mem("sttr", x, ptr, err); \
|
||||
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
} while (0)
|
||||
|
||||
|
@ -380,14 +406,22 @@ do { \
|
|||
|
||||
#define put_user __put_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __pkn_dst = (dst); \
|
||||
__typeof__(src) __pkn_src = (src); \
|
||||
int __pkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_put_mem("str", *((type *)(src)), \
|
||||
(__force type *)(dst), __pkn_err); \
|
||||
__raw_put_mem("str", *((type *)(__pkn_src)), \
|
||||
(__force type *)(__pkn_dst), __pkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__pkn_err)) \
|
||||
goto err_label; \
|
||||
} while(0)
|
||||
|
|
|
@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command)
|
|||
* on the way back to parent. For this purpose, this function is called
|
||||
* in _mcount() or ftrace_caller() to replace return address (*parent) on
|
||||
* the call stack to return_to_handler.
|
||||
*
|
||||
* Note that @frame_pointer is used only for sanity check later.
|
||||
*/
|
||||
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
unsigned long frame_pointer)
|
||||
|
@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
|||
*/
|
||||
old = *parent;
|
||||
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer,
|
||||
(void *)frame_pointer)) {
|
||||
*parent = return_hooker;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
|
|
@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
|
|||
{
|
||||
frame->fp = fp;
|
||||
frame->pc = pc;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
frame->graph = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
frame->kr_cur = NULL;
|
||||
#endif
|
||||
|
@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
|||
frame->prev_fp = fp;
|
||||
frame->prev_type = info.type;
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if (tsk->ret_stack &&
|
||||
(ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
|
||||
struct ftrace_ret_stack *ret_stack;
|
||||
(frame->pc == (unsigned long)return_to_handler)) {
|
||||
unsigned long orig_pc;
|
||||
/*
|
||||
* This is a case where function graph tracer has
|
||||
* modified a return address (LR) in a stack frame
|
||||
* to hook a function return.
|
||||
* So replace it to an original value.
|
||||
*/
|
||||
ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
|
||||
if (WARN_ON_ONCE(!ret_stack))
|
||||
orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
|
||||
(void *)frame->fp);
|
||||
if (WARN_ON_ONCE(frame->pc == orig_pc))
|
||||
return -EINVAL;
|
||||
frame->pc = ret_stack->ret;
|
||||
frame->pc = orig_pc;
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
|
@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
|||
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
|
||||
#endif
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(unwind_frame);
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Timer support for Hexagon
|
||||
*
|
||||
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TIMER_REGS_H
|
||||
#define _ASM_TIMER_REGS_H
|
||||
|
||||
/* This stuff should go into a platform specific file */
|
||||
#define TCX0_CLK_RATE 19200
|
||||
#define TIMER_ENABLE 0
|
||||
#define TIMER_CLR_ON_MATCH 1
|
||||
|
||||
/*
|
||||
* 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
|
||||
* release 1.1, and then it's "adjustable" and probably not defaulted.
|
||||
*/
|
||||
#define RTOS_TIMER_INT 3
|
||||
#ifdef CONFIG_HEXAGON_COMET
|
||||
#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
|
||||
#endif
|
||||
#define SLEEP_CLK_RATE 32000
|
||||
|
||||
#endif
|
|
@ -7,11 +7,10 @@
|
|||
#define _ASM_TIMEX_H
|
||||
|
||||
#include <asm-generic/timex.h>
|
||||
#include <asm/timer-regs.h>
|
||||
#include <asm/hexagon_vm.h>
|
||||
|
||||
/* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */
|
||||
#define CLOCK_TICK_RATE TCX0_CLK_RATE
|
||||
#define CLOCK_TICK_RATE 19200
|
||||
|
||||
#define ARCH_HAS_READ_CURRENT_TIMER
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
vmlinux.lds
|
|
@ -17,9 +17,10 @@
|
|||
#include <linux/of_irq.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/timer-regs.h>
|
||||
#include <asm/hexagon_vm.h>
|
||||
|
||||
#define TIMER_ENABLE BIT(0)
|
||||
|
||||
/*
|
||||
* For the clocksource we need:
|
||||
* pcycle frequency (600MHz)
|
||||
|
@ -33,6 +34,13 @@ cycles_t pcycle_freq_mhz;
|
|||
cycles_t thread_freq_mhz;
|
||||
cycles_t sleep_clk_freq;
|
||||
|
||||
/*
|
||||
* 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
|
||||
* release 1.1, and then it's "adjustable" and probably not defaulted.
|
||||
*/
|
||||
#define RTOS_TIMER_INT 3
|
||||
#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
|
||||
|
||||
static struct resource rtos_timer_resources[] = {
|
||||
{
|
||||
.start = RTOS_TIMER_REGS_ADDR,
|
||||
|
@ -80,7 +88,7 @@ static int set_next_event(unsigned long delta, struct clock_event_device *evt)
|
|||
iowrite32(0, &rtos_timer->clear);
|
||||
|
||||
iowrite32(delta, &rtos_timer->match);
|
||||
iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
|
||||
iowrite32(TIMER_ENABLE, &rtos_timer->enable);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ void __raw_readsw(const void __iomem *addr, void *data, int len)
|
|||
*dst++ = *src;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_readsw);
|
||||
|
||||
/*
|
||||
* __raw_writesw - read words a short at a time
|
||||
|
@ -47,6 +48,7 @@ void __raw_writesw(void __iomem *addr, const void *data, int len)
|
|||
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_writesw);
|
||||
|
||||
/* Pretty sure len is pre-adjusted for the length of the access already */
|
||||
void __raw_readsl(const void __iomem *addr, void *data, int len)
|
||||
|
@ -62,6 +64,7 @@ void __raw_readsl(const void __iomem *addr, void *data, int len)
|
|||
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_readsl);
|
||||
|
||||
void __raw_writesl(void __iomem *addr, const void *data, int len)
|
||||
{
|
||||
|
@ -76,3 +79,4 @@ void __raw_writesl(void __iomem *addr, const void *data, int len)
|
|||
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_writesl);
|
||||
|
|
|
@ -369,3 +369,4 @@
|
|||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
|
|
@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr)
|
|||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
|
||||
|
|
|
@ -448,3 +448,4 @@
|
|||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
|
|
@ -1145,7 +1145,7 @@ asmlinkage void set_esp0(unsigned long ssp)
|
|||
*/
|
||||
asmlinkage void fpsp040_die(void)
|
||||
{
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_M68KFPU_EMU
|
||||
|
|
|
@ -454,3 +454,4 @@
|
|||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
|
|
@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page)
|
|||
SetPageDcacheDirty(page);
|
||||
}
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end);
|
|||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, void *src, int len);
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
|
|
|
@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
|||
unsigned long pfn);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
||||
|
|
|
@ -147,6 +147,17 @@
|
|||
extrd,u \r, 63-(\sa), 64-(\sa), \t
|
||||
.endm
|
||||
|
||||
/* Extract unsigned for 32- and 64-bit
|
||||
* The extru instruction leaves the most significant 32 bits of the
|
||||
* target register in an undefined state on PA 2.0 systems. */
|
||||
.macro extru_safe r, p, len, t
|
||||
#ifdef CONFIG_64BIT
|
||||
extrd,u \r, 32+(\p), \len, \t
|
||||
#else
|
||||
extru \r, \p, \len, \t
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* load 32-bit 'value' into 'reg' compensating for the ldil
|
||||
* sign-extension when running in wide mode.
|
||||
* WARNING!! neither 'value' nor 'reg' can be expressions
|
||||
|
|
|
@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
|
|||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
||||
|
|
|
@ -366,17 +366,9 @@
|
|||
*/
|
||||
.macro L2_ptep pmd,pte,index,va,fault
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
|
||||
extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
|
||||
#else
|
||||
# if defined(CONFIG_64BIT)
|
||||
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
#else
|
||||
# if PAGE_SIZE > 4096
|
||||
extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
|
||||
# else
|
||||
extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
# endif
|
||||
# endif
|
||||
extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
#endif
|
||||
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
|
||||
#if CONFIG_PGTABLE_LEVELS < 3
|
||||
|
@ -386,7 +378,7 @@
|
|||
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
|
||||
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
|
||||
SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
|
||||
extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
|
||||
extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
|
||||
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
|
||||
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
|
||||
.endm
|
||||
|
|
|
@ -566,7 +566,7 @@ lws_compare_and_swap:
|
|||
ldo R%lws_lock_start(%r20), %r28
|
||||
|
||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
||||
extru %r26, 28, 8, %r20
|
||||
extru_safe %r26, 28, 8, %r20
|
||||
|
||||
/* Find lock to use, the hash is either one of 0 to
|
||||
15, multiplied by 16 (keep it 16-byte aligned)
|
||||
|
@ -751,7 +751,7 @@ cas2_lock_start:
|
|||
ldo R%lws_lock_start(%r20), %r28
|
||||
|
||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
||||
extru %r26, 28, 8, %r20
|
||||
extru_safe %r26, 28, 8, %r20
|
||||
|
||||
/* Find lock to use, the hash is either one of 0 to
|
||||
15, multiplied by 16 (keep it 16-byte aligned)
|
||||
|
|
|
@ -57,8 +57,6 @@ SECTIONS
|
|||
{
|
||||
. = KERNEL_BINARY_TEXT_START;
|
||||
|
||||
_stext = .; /* start of kernel text, includes init code & data */
|
||||
|
||||
__init_begin = .;
|
||||
HEAD_TEXT_SECTION
|
||||
MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
|
||||
|
@ -82,6 +80,7 @@ SECTIONS
|
|||
/* freed after init ends here */
|
||||
|
||||
_text = .; /* Text and read-only data */
|
||||
_stext = .;
|
||||
MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
|
||||
.text ALIGN(PAGE_SIZE) : {
|
||||
TEXT_TEXT
|
||||
|
|
|
@ -196,3 +196,6 @@ clean-files := vmlinux.lds
|
|||
# Force dependency (incbin is bad)
|
||||
$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
|
||||
$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
|
||||
|
||||
# for cleaning
|
||||
subdir- += vdso32 vdso64
|
||||
|
|
|
@ -733,6 +733,7 @@ _GLOBAL(mmu_pin_tlb)
|
|||
#ifdef CONFIG_PIN_TLB_DATA
|
||||
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
|
||||
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
|
||||
li r8, 0
|
||||
#ifdef CONFIG_PIN_TLB_IMMR
|
||||
li r0, 3
|
||||
#else
|
||||
|
@ -741,26 +742,26 @@ _GLOBAL(mmu_pin_tlb)
|
|||
mtctr r0
|
||||
cmpwi r4, 0
|
||||
beq 4f
|
||||
LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
|
||||
LOAD_REG_ADDR(r9, _sinittext)
|
||||
|
||||
2: ori r0, r6, MD_EVALID
|
||||
ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
|
||||
mtspr SPRN_MD_CTR, r5
|
||||
mtspr SPRN_MD_EPN, r0
|
||||
mtspr SPRN_MD_TWC, r7
|
||||
mtspr SPRN_MD_RPN, r8
|
||||
mtspr SPRN_MD_RPN, r12
|
||||
addi r5, r5, 0x100
|
||||
addis r6, r6, SZ_8M@h
|
||||
addis r8, r8, SZ_8M@h
|
||||
cmplw r6, r9
|
||||
bdnzt lt, 2b
|
||||
|
||||
4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
|
||||
4:
|
||||
2: ori r0, r6, MD_EVALID
|
||||
ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
|
||||
mtspr SPRN_MD_CTR, r5
|
||||
mtspr SPRN_MD_EPN, r0
|
||||
mtspr SPRN_MD_TWC, r7
|
||||
mtspr SPRN_MD_RPN, r8
|
||||
mtspr SPRN_MD_RPN, r12
|
||||
addi r5, r5, 0x100
|
||||
addis r6, r6, SZ_8M@h
|
||||
addis r8, r8, SZ_8M@h
|
||||
|
@ -781,7 +782,7 @@ _GLOBAL(mmu_pin_tlb)
|
|||
#endif
|
||||
#if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
|
||||
lis r0, (MD_RSV4I | MD_TWAM)@h
|
||||
mtspr SPRN_MI_CTR, r0
|
||||
mtspr SPRN_MD_CTR, r0
|
||||
#endif
|
||||
mtspr SPRN_SRR1, r10
|
||||
mtspr SPRN_SRR0, r11
|
||||
|
|
|
@ -25,8 +25,14 @@ static inline int __get_user_sigset(sigset_t *dst, const sigset_t __user *src)
|
|||
|
||||
return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]);
|
||||
}
|
||||
#define unsafe_get_user_sigset(dst, src, label) \
|
||||
unsafe_get_user((dst)->sig[0], (u64 __user *)&(src)->sig[0], label)
|
||||
#define unsafe_get_user_sigset(dst, src, label) do { \
|
||||
sigset_t *__dst = dst; \
|
||||
const sigset_t __user *__src = src; \
|
||||
int i; \
|
||||
\
|
||||
for (i = 0; i < _NSIG_WORDS; i++) \
|
||||
unsafe_get_user(__dst->sig[i], &__src->sig[i], label); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
extern unsigned long copy_vsx_to_user(void __user *to,
|
||||
|
|
|
@ -1063,7 +1063,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
|
|||
* We kill the task with a SIGSEGV in this situation.
|
||||
*/
|
||||
if (do_setcontext(new_ctx, regs, 0)) {
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
|
|
@ -704,7 +704,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
|
|||
*/
|
||||
|
||||
if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) {
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
return -EFAULT;
|
||||
}
|
||||
set_current_blocked(&set);
|
||||
|
@ -713,7 +713,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
|
|||
return -EFAULT;
|
||||
if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
|
||||
user_read_access_end();
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
return -EFAULT;
|
||||
}
|
||||
user_read_access_end();
|
||||
|
|
|
@ -528,3 +528,4 @@
|
|||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
|
|
@ -2005,7 +2005,7 @@ hcall_real_table:
|
|||
.globl hcall_real_table_end
|
||||
hcall_real_table_end:
|
||||
|
||||
_GLOBAL(kvmppc_h_set_xdabr)
|
||||
_GLOBAL_TOC(kvmppc_h_set_xdabr)
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
|
||||
andi. r0, r5, DABRX_USER | DABRX_KERNEL
|
||||
beq 6f
|
||||
|
@ -2015,7 +2015,7 @@ EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
|
|||
6: li r3, H_PARAMETER
|
||||
blr
|
||||
|
||||
_GLOBAL(kvmppc_h_set_dabr)
|
||||
_GLOBAL_TOC(kvmppc_h_set_dabr)
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
|
||||
li r5, DABRX_USER | DABRX_KERNEL
|
||||
3:
|
||||
|
|
|
@ -314,7 +314,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
|
|||
pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
|
||||
|
||||
ram = min_t(phys_addr_t, __max_low_memory, size);
|
||||
ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false);
|
||||
ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
|
||||
linear_sz = min_t(unsigned long, ram, SZ_512M);
|
||||
|
||||
/* If the linear size is smaller than 64M, do not randmize */
|
||||
|
|
|
@ -645,7 +645,7 @@ static void early_init_this_mmu(void)
|
|||
|
||||
if (map)
|
||||
linear_map_top = map_mem_in_cams(linear_map_top,
|
||||
num_cams, true, true);
|
||||
num_cams, false, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -766,7 +766,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
|||
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
|
||||
|
||||
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
|
||||
false, true);
|
||||
true, true);
|
||||
|
||||
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
|
||||
} else
|
||||
|
|
|
@ -376,9 +376,9 @@ static void initialize_form2_numa_distance_lookup_table(void)
|
|||
{
|
||||
int i, j;
|
||||
struct device_node *root;
|
||||
const __u8 *numa_dist_table;
|
||||
const __u8 *form2_distances;
|
||||
const __be32 *numa_lookup_index;
|
||||
int numa_dist_table_length;
|
||||
int form2_distances_length;
|
||||
int max_numa_index, distance_index;
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_OPAL))
|
||||
|
@ -392,45 +392,41 @@ static void initialize_form2_numa_distance_lookup_table(void)
|
|||
max_numa_index = of_read_number(&numa_lookup_index[0], 1);
|
||||
|
||||
/* first element of the array is the size and is encode-int */
|
||||
numa_dist_table = of_get_property(root, "ibm,numa-distance-table", NULL);
|
||||
numa_dist_table_length = of_read_number((const __be32 *)&numa_dist_table[0], 1);
|
||||
form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
|
||||
form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
|
||||
/* Skip the size which is encoded int */
|
||||
numa_dist_table += sizeof(__be32);
|
||||
form2_distances += sizeof(__be32);
|
||||
|
||||
pr_debug("numa_dist_table_len = %d, numa_dist_indexes_len = %d\n",
|
||||
numa_dist_table_length, max_numa_index);
|
||||
pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
|
||||
form2_distances_length, max_numa_index);
|
||||
|
||||
for (i = 0; i < max_numa_index; i++)
|
||||
/* +1 skip the max_numa_index in the property */
|
||||
numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
|
||||
|
||||
|
||||
if (numa_dist_table_length != max_numa_index * max_numa_index) {
|
||||
if (form2_distances_length != max_numa_index * max_numa_index) {
|
||||
WARN(1, "Wrong NUMA distance information\n");
|
||||
/* consider everybody else just remote. */
|
||||
for (i = 0; i < max_numa_index; i++) {
|
||||
for (j = 0; j < max_numa_index; j++) {
|
||||
int nodeA = numa_id_index_table[i];
|
||||
int nodeB = numa_id_index_table[j];
|
||||
|
||||
if (nodeA == nodeB)
|
||||
numa_distance_table[nodeA][nodeB] = LOCAL_DISTANCE;
|
||||
else
|
||||
numa_distance_table[nodeA][nodeB] = REMOTE_DISTANCE;
|
||||
form2_distances = NULL; // don't use it
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
distance_index = 0;
|
||||
for (i = 0; i < max_numa_index; i++) {
|
||||
for (j = 0; j < max_numa_index; j++) {
|
||||
int nodeA = numa_id_index_table[i];
|
||||
int nodeB = numa_id_index_table[j];
|
||||
int dist;
|
||||
|
||||
numa_distance_table[nodeA][nodeB] = numa_dist_table[distance_index++];
|
||||
pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, numa_distance_table[nodeA][nodeB]);
|
||||
if (form2_distances)
|
||||
dist = form2_distances[distance_index++];
|
||||
else if (nodeA == nodeB)
|
||||
dist = LOCAL_DISTANCE;
|
||||
else
|
||||
dist = REMOTE_DISTANCE;
|
||||
numa_distance_table[nodeA][nodeB] = dist;
|
||||
pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
|
||||
}
|
||||
}
|
||||
|
||||
of_node_put(root);
|
||||
}
|
||||
|
||||
|
|
|
@ -186,7 +186,6 @@ err:
|
|||
static int mcu_remove(struct i2c_client *client)
|
||||
{
|
||||
struct mcu *mcu = i2c_get_clientdata(client);
|
||||
int ret;
|
||||
|
||||
kthread_stop(shutdown_thread);
|
||||
|
||||
|
|
|
@ -1094,15 +1094,6 @@ static phys_addr_t ddw_memory_hotplug_max(void)
|
|||
phys_addr_t max_addr = memory_hotplug_max();
|
||||
struct device_node *memory;
|
||||
|
||||
/*
|
||||
* The "ibm,pmemory" can appear anywhere in the address space.
|
||||
* Assuming it is still backed by page structs, set the upper limit
|
||||
* for the huge DMA window as MAX_PHYSMEM_BITS.
|
||||
*/
|
||||
if (of_find_node_by_type(NULL, "ibm,pmemory"))
|
||||
return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ?
|
||||
(phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS);
|
||||
|
||||
for_each_node_by_type(memory, "memory") {
|
||||
unsigned long start, size;
|
||||
int n_mem_addr_cells, n_mem_size_cells, len;
|
||||
|
@ -1238,7 +1229,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||
u32 ddw_avail[DDW_APPLICABLE_SIZE];
|
||||
struct dma_win *window;
|
||||
struct property *win64;
|
||||
bool ddw_enabled = false;
|
||||
struct failed_ddw_pdn *fpdn;
|
||||
bool default_win_removed = false, direct_mapping = false;
|
||||
bool pmem_present;
|
||||
|
@ -1253,7 +1243,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||
|
||||
if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
|
||||
direct_mapping = (len >= max_ram_len);
|
||||
ddw_enabled = true;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
@ -1367,8 +1356,10 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||
len = order_base_2(query.largest_available_block << page_shift);
|
||||
win_name = DMA64_PROPNAME;
|
||||
} else {
|
||||
direct_mapping = true;
|
||||
win_name = DIRECT64_PROPNAME;
|
||||
direct_mapping = !default_win_removed ||
|
||||
(len == MAX_PHYSMEM_BITS) ||
|
||||
(!pmem_present && (len == max_ram_len));
|
||||
win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME;
|
||||
}
|
||||
|
||||
ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
|
||||
|
@ -1454,7 +1445,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||
spin_unlock(&dma_win_list_lock);
|
||||
|
||||
dev->dev.archdata.dma_offset = win_addr;
|
||||
ddw_enabled = true;
|
||||
goto out_unlock;
|
||||
|
||||
out_del_list:
|
||||
|
@ -1490,10 +1480,10 @@ out_unlock:
|
|||
* as RAM, then we failed to create a window to cover persistent
|
||||
* memory and need to set the DMA limit.
|
||||
*/
|
||||
if (pmem_present && ddw_enabled && direct_mapping && len == max_ram_len)
|
||||
if (pmem_present && direct_mapping && len == max_ram_len)
|
||||
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len);
|
||||
|
||||
return ddw_enabled && direct_mapping;
|
||||
return direct_mapping;
|
||||
}
|
||||
|
||||
static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
|
||||
|
|
|
@ -3,7 +3,6 @@ config PPC_XIVE
|
|||
bool
|
||||
select PPC_SMP_MUXED_IPI
|
||||
select HARDIRQS_SW_RESEND
|
||||
select IRQ_DOMAIN_NOMAP
|
||||
|
||||
config PPC_XIVE_NATIVE
|
||||
bool
|
||||
|
|
|
@ -1443,8 +1443,7 @@ static const struct irq_domain_ops xive_irq_domain_ops = {
|
|||
|
||||
static void __init xive_init_host(struct device_node *np)
|
||||
{
|
||||
xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ,
|
||||
&xive_irq_domain_ops, NULL);
|
||||
xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL);
|
||||
if (WARN_ON(xive_irq_domain == NULL))
|
||||
return;
|
||||
irq_set_default_host(xive_irq_domain);
|
||||
|
|
|
@ -107,11 +107,13 @@ PHONY += vdso_install
|
|||
vdso_install:
|
||||
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
|
||||
|
||||
ifeq ($(KBUILD_EXTMOD),)
|
||||
ifeq ($(CONFIG_MMU),y)
|
||||
prepare: vdso_prepare
|
||||
vdso_prepare: prepare0
|
||||
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_XIP_KERNEL),y)
|
||||
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
|
||||
|
|
|
@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y
|
|||
CONFIG_SOC_MICROCHIP_POLARFIRE=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_VIRTUALIZATION=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_JUMP_LABEL=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
|
|
|
@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y
|
|||
CONFIG_ARCH_RV32I=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_VIRTUALIZATION=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_JUMP_LABEL=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
|
|
|
@ -47,7 +47,7 @@ config ARCH_SUPPORTS_UPROBES
|
|||
config KASAN_SHADOW_OFFSET
|
||||
hex
|
||||
depends on KASAN
|
||||
default 0x18000000000000
|
||||
default 0x1C000000000000
|
||||
|
||||
config S390
|
||||
def_bool y
|
||||
|
@ -194,6 +194,7 @@ config S390
|
|||
select HAVE_RELIABLE_STACKTRACE
|
||||
select HAVE_RSEQ
|
||||
select HAVE_SAMPLE_FTRACE_DIRECT
|
||||
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
|
||||
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
|
|
|
@ -77,10 +77,12 @@ KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y)
|
|||
KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y)
|
||||
|
||||
ifneq ($(call cc-option,-mstack-size=8192 -mstack-guard=128),)
|
||||
cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
|
||||
ifeq ($(call cc-option,-mstack-size=8192),)
|
||||
cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
|
||||
endif
|
||||
CC_FLAGS_CHECK_STACK := -mstack-size=$(STACK_SIZE)
|
||||
ifeq ($(call cc-option,-mstack-size=8192),)
|
||||
CC_FLAGS_CHECK_STACK += -mstack-guard=$(CONFIG_STACK_GUARD)
|
||||
endif
|
||||
export CC_FLAGS_CHECK_STACK
|
||||
cflags-$(CONFIG_CHECK_STACK) += $(CC_FLAGS_CHECK_STACK)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_EXPOLINE
|
||||
|
|
|
@ -149,82 +149,56 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
|
|||
|
||||
static void setup_kernel_memory_layout(void)
|
||||
{
|
||||
bool vmalloc_size_verified = false;
|
||||
unsigned long vmemmap_off;
|
||||
unsigned long vspace_left;
|
||||
unsigned long vmemmap_start;
|
||||
unsigned long rte_size;
|
||||
unsigned long pages;
|
||||
unsigned long vmax;
|
||||
|
||||
pages = ident_map_size / PAGE_SIZE;
|
||||
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
|
||||
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
|
||||
|
||||
/* choose kernel address space layout: 4 or 3 levels. */
|
||||
vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
|
||||
vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
|
||||
if (IS_ENABLED(CONFIG_KASAN) ||
|
||||
vmalloc_size > _REGION2_SIZE ||
|
||||
vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
|
||||
vmax = _REGION1_SIZE;
|
||||
else
|
||||
vmax = _REGION2_SIZE;
|
||||
|
||||
/* keep vmemmap_off aligned to a top level region table entry */
|
||||
rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
|
||||
MODULES_END = vmax;
|
||||
if (is_prot_virt_host()) {
|
||||
vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
|
||||
_REGION2_SIZE) {
|
||||
MODULES_END = _REGION1_SIZE;
|
||||
rte_size = _REGION2_SIZE;
|
||||
} else {
|
||||
MODULES_END = _REGION2_SIZE;
|
||||
rte_size = _REGION3_SIZE;
|
||||
}
|
||||
/*
|
||||
* forcing modules and vmalloc area under the ultravisor
|
||||
* secure storage limit, so that any vmalloc allocation
|
||||
* we do could be used to back secure guest storage.
|
||||
*/
|
||||
adjust_to_uv_max(&MODULES_END);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
if (MODULES_END < vmax) {
|
||||
/* force vmalloc and modules below kasan shadow */
|
||||
MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
|
||||
} else {
|
||||
/*
|
||||
* leave vmalloc and modules above kasan shadow but make
|
||||
* sure they don't overlap with it
|
||||
*/
|
||||
vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
|
||||
vmalloc_size_verified = true;
|
||||
vspace_left = KASAN_SHADOW_START;
|
||||
}
|
||||
#endif
|
||||
MODULES_VADDR = MODULES_END - MODULES_LEN;
|
||||
VMALLOC_END = MODULES_VADDR;
|
||||
|
||||
if (vmalloc_size_verified) {
|
||||
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
|
||||
vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
|
||||
VMALLOC_START = VMALLOC_END - vmalloc_size;
|
||||
} else {
|
||||
vmemmap_off = round_up(ident_map_size, rte_size);
|
||||
|
||||
if (vmemmap_off + vmemmap_size > VMALLOC_END ||
|
||||
vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
|
||||
/*
|
||||
* allow vmalloc area to occupy up to 1/2 of
|
||||
* the rest virtual space left.
|
||||
*/
|
||||
vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
|
||||
}
|
||||
VMALLOC_START = VMALLOC_END - vmalloc_size;
|
||||
vspace_left = VMALLOC_START;
|
||||
}
|
||||
|
||||
pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
|
||||
/* split remaining virtual space between 1:1 mapping & vmemmap array */
|
||||
pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
|
||||
pages = SECTION_ALIGN_UP(pages);
|
||||
vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
|
||||
/* keep vmemmap left most starting from a fresh region table entry */
|
||||
vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
|
||||
/* take care that identity map is lower then vmemmap */
|
||||
ident_map_size = min(ident_map_size, vmemmap_off);
|
||||
/* keep vmemmap_start aligned to a top level region table entry */
|
||||
vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
|
||||
/* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
|
||||
vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
|
||||
/* make sure identity map doesn't overlay with vmemmap */
|
||||
ident_map_size = min(ident_map_size, vmemmap_start);
|
||||
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
|
||||
VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
|
||||
vmemmap = (struct page *)vmemmap_off;
|
||||
/* make sure vmemmap doesn't overlay with vmalloc area */
|
||||
VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
|
||||
vmemmap = (struct page *)vmemmap_start;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -74,6 +74,12 @@ void *kexec_file_add_components(struct kimage *image,
|
|||
int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
|
||||
unsigned long addr);
|
||||
|
||||
#define ARCH_HAS_KIMAGE_ARCH
|
||||
|
||||
struct kimage_arch {
|
||||
void *ipl_buf;
|
||||
};
|
||||
|
||||
extern const struct kexec_file_ops s390_kexec_image_ops;
|
||||
extern const struct kexec_file_ops s390_kexec_elf_ops;
|
||||
|
||||
|
|
|
@ -191,8 +191,8 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
|
|||
return rc;
|
||||
} else {
|
||||
/* Check for swapped kdump oldmem areas */
|
||||
if (oldmem_data.start && from - oldmem_data.size < oldmem_data.size) {
|
||||
from -= oldmem_data.size;
|
||||
if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
|
||||
from -= oldmem_data.start;
|
||||
len = min(count, oldmem_data.size - from);
|
||||
} else if (oldmem_data.start && from < oldmem_data.size) {
|
||||
len = min(count, oldmem_data.size - from);
|
||||
|
|
|
@ -2156,7 +2156,7 @@ void *ipl_report_finish(struct ipl_report *report)
|
|||
|
||||
buf = vzalloc(report->size);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
ptr = buf;
|
||||
|
||||
memcpy(ptr, report->ipib, report->ipib->hdr.len);
|
||||
|
@ -2195,6 +2195,7 @@ void *ipl_report_finish(struct ipl_report *report)
|
|||
}
|
||||
|
||||
BUG_ON(ptr > buf + report->size);
|
||||
out:
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/kexec.h>
|
||||
#include <linux/module_signature.h>
|
||||
#include <linux/verification.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/boot_data.h>
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/setup.h>
|
||||
|
@ -170,6 +171,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
|||
struct kexec_buf buf;
|
||||
unsigned long addr;
|
||||
void *ptr, *end;
|
||||
int ret;
|
||||
|
||||
buf.image = image;
|
||||
|
||||
|
@ -199,9 +201,13 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
|||
ptr += len;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
buf.buffer = ipl_report_finish(data->report);
|
||||
if (!buf.buffer)
|
||||
goto out;
|
||||
buf.bufsz = data->report->size;
|
||||
buf.memsz = buf.bufsz;
|
||||
image->arch.ipl_buf = buf.buffer;
|
||||
|
||||
data->memsz += buf.memsz;
|
||||
|
||||
|
@ -209,7 +215,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
|||
data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
|
||||
*lc_ipl_parmblock_ptr = (__u32)buf.mem;
|
||||
|
||||
return kexec_add_buffer(&buf);
|
||||
ret = kexec_add_buffer(&buf);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *kexec_file_add_components(struct kimage *image,
|
||||
|
@ -322,3 +330,11 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_kimage_file_post_load_cleanup(struct kimage *image)
|
||||
{
|
||||
vfree(image->arch.ipl_buf);
|
||||
image->arch.ipl_buf = NULL;
|
||||
|
||||
return kexec_image_post_load_cleanup_default(image);
|
||||
}
|
||||
|
|
|
@ -606,7 +606,7 @@ static void __init setup_resources(void)
|
|||
|
||||
static void __init setup_memory_end(void)
|
||||
{
|
||||
memblock_remove(ident_map_size, ULONG_MAX);
|
||||
memblock_remove(ident_map_size, PHYS_ADDR_MAX - ident_map_size);
|
||||
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
|
||||
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
|
||||
}
|
||||
|
@ -637,14 +637,6 @@ static struct notifier_block kdump_mem_nb = {
|
|||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Make sure that the area above identity mapping is protected
|
||||
*/
|
||||
static void __init reserve_above_ident_map(void)
|
||||
{
|
||||
memblock_reserve(ident_map_size, ULONG_MAX);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve memory for kdump kernel to be loaded with kexec
|
||||
*/
|
||||
|
@ -785,7 +777,6 @@ static void __init memblock_add_mem_detect_info(void)
|
|||
}
|
||||
memblock_set_bottom_up(false);
|
||||
memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
|
||||
memblock_dump_all();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -826,9 +817,6 @@ static void __init setup_memory(void)
|
|||
storage_key_init_range(start, end);
|
||||
|
||||
psw_set_key(PAGE_DEFAULT_KEY);
|
||||
|
||||
/* Only cosmetics */
|
||||
memblock_enforce_memory_limit(memblock_end_of_DRAM());
|
||||
}
|
||||
|
||||
static void __init relocate_amode31_section(void)
|
||||
|
@ -999,24 +987,24 @@ void __init setup_arch(char **cmdline_p)
|
|||
setup_control_program_code();
|
||||
|
||||
/* Do some memory reservations *before* memory is added to memblock */
|
||||
reserve_above_ident_map();
|
||||
reserve_kernel();
|
||||
reserve_initrd();
|
||||
reserve_certificate_list();
|
||||
reserve_mem_detect_info();
|
||||
memblock_set_current_limit(ident_map_size);
|
||||
memblock_allow_resize();
|
||||
|
||||
/* Get information about *all* installed memory */
|
||||
memblock_add_mem_detect_info();
|
||||
|
||||
free_mem_detect_info();
|
||||
setup_memory_end();
|
||||
memblock_dump_all();
|
||||
setup_memory();
|
||||
|
||||
relocate_amode31_section();
|
||||
setup_cr();
|
||||
|
||||
setup_uv();
|
||||
setup_memory_end();
|
||||
setup_memory();
|
||||
dma_contiguous_reserve(ident_map_size);
|
||||
vmcp_cma_reserve();
|
||||
if (MACHINE_HAS_EDAT2)
|
||||
|
|
|
@ -451,3 +451,4 @@
|
|||
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv sys_futex_waitv
|
||||
|
|
|
@ -84,7 +84,7 @@ static void default_trap_handler(struct pt_regs *regs)
|
|||
{
|
||||
if (user_mode(regs)) {
|
||||
report_user_fault(regs, SIGSEGV, 0);
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
} else
|
||||
die(regs, "Unknown program exception");
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ KBUILD_AFLAGS_32 += -m31 -s
|
|||
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
||||
KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
|
||||
|
||||
LDFLAGS_vdso32.so.dbg += -fPIC -shared -nostdlib -soname=linux-vdso32.so.1 \
|
||||
LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
|
||||
--hash-style=both --build-id=sha1 -melf_s390 -T
|
||||
|
||||
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
|
||||
|
|
|
@ -8,8 +8,9 @@ ARCH_REL_TYPE_ABS += R_390_GOT|R_390_PLT
|
|||
include $(srctree)/lib/vdso/Makefile
|
||||
obj-vdso64 = vdso_user_wrapper.o note.o
|
||||
obj-cvdso64 = vdso64_generic.o getcpu.o
|
||||
CFLAGS_REMOVE_getcpu.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
|
||||
CFLAGS_REMOVE_vdso64_generic.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
|
||||
VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) $(CC_FLAGS_CHECK_STACK)
|
||||
CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE)
|
||||
CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE)
|
||||
|
||||
# Build rules
|
||||
|
||||
|
@ -25,7 +26,7 @@ KBUILD_AFLAGS_64 += -m64 -s
|
|||
|
||||
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
||||
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
|
||||
ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \
|
||||
ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
|
||||
--hash-style=both --build-id=sha1 -T
|
||||
|
||||
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
|
||||
|
|
|
@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma,
|
|||
unsigned long start, unsigned long end);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
#define flush_icache_user_range flush_icache_range
|
||||
extern void flush_icache_page(struct vm_area_struct *vma,
|
||||
|
|
|
@ -451,3 +451,4 @@
|
|||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
|
|
@ -244,7 +244,7 @@ static int setup_frame(struct ksignal *ksig, struct pt_regs *regs,
|
|||
get_sigframe(ksig, regs, sigframe_size);
|
||||
|
||||
if (invalid_frame_pointer(sf, sigframe_size)) {
|
||||
force_fatal_sig(SIGILL);
|
||||
force_exit_sig(SIGILL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -336,7 +336,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
|
|||
sf = (struct rt_signal_frame __user *)
|
||||
get_sigframe(ksig, regs, sigframe_size);
|
||||
if (invalid_frame_pointer(sf, sigframe_size)) {
|
||||
force_fatal_sig(SIGILL);
|
||||
force_exit_sig(SIGILL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -494,3 +494,4 @@
|
|||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
|
|
@ -122,7 +122,7 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
|
|||
if ((sp & 7) ||
|
||||
copy_to_user((char __user *) sp, &tp->reg_window[window],
|
||||
sizeof(struct reg_window32))) {
|
||||
force_fatal_sig(SIGILL);
|
||||
force_exit_sig(SIGILL);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -193,7 +193,7 @@ config X86
|
|||
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
|
||||
select HAVE_SAMPLE_FTRACE_DIRECT if X86_64
|
||||
select HAVE_SAMPLE_FTRACE_MULTI_DIRECT if X86_64
|
||||
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64
|
||||
select HAVE_EBPF_JIT
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_EISA
|
||||
|
|
|
@ -226,7 +226,7 @@ bool emulate_vsyscall(unsigned long error_code,
|
|||
if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
|
||||
warn_bad_vsyscall(KERN_DEBUG, regs,
|
||||
"seccomp tried to change syscall nr or ip");
|
||||
force_fatal_sig(SIGSYS);
|
||||
force_exit_sig(SIGSYS);
|
||||
return true;
|
||||
}
|
||||
regs->orig_ax = -1;
|
||||
|
|
|
@ -2211,7 +2211,6 @@ intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int
|
|||
/* must not have branches... */
|
||||
local_irq_save(flags);
|
||||
__intel_pmu_disable_all(false); /* we don't care about BTS */
|
||||
__intel_pmu_pebs_disable_all();
|
||||
__intel_pmu_lbr_disable();
|
||||
/* ... until here */
|
||||
return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
|
||||
|
@ -2225,7 +2224,6 @@ intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned
|
|||
/* must not have branches... */
|
||||
local_irq_save(flags);
|
||||
__intel_pmu_disable_all(false); /* we don't care about BTS */
|
||||
__intel_pmu_pebs_disable_all();
|
||||
__intel_pmu_arch_lbr_disable();
|
||||
/* ... until here */
|
||||
return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
|
||||
|
|
|
@ -3608,6 +3608,9 @@ static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev
|
|||
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
||||
struct extra_reg *er;
|
||||
int idx = 0;
|
||||
/* Any of the CHA events may be filtered by Thread/Core-ID.*/
|
||||
if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
|
||||
idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
|
||||
|
||||
for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
|
||||
if (er->event != (event->hw.config & er->config_mask))
|
||||
|
@ -3675,6 +3678,7 @@ static struct event_constraint skx_uncore_iio_constraints[] = {
|
|||
UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
|
||||
UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
|
||||
UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
|
||||
UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
|
@ -4525,6 +4529,13 @@ static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
|
|||
pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
|
||||
}
|
||||
|
||||
static struct event_constraint snr_uncore_iio_constraints[] = {
|
||||
UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
|
||||
UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
|
||||
UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_iio = {
|
||||
.name = "iio",
|
||||
.num_counters = 4,
|
||||
|
@ -4536,6 +4547,7 @@ static struct intel_uncore_type snr_uncore_iio = {
|
|||
.event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
|
||||
.box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
|
||||
.msr_offset = SNR_IIO_MSR_OFFSET,
|
||||
.constraints = snr_uncore_iio_constraints,
|
||||
.ops = &ivbep_uncore_msr_ops,
|
||||
.format_group = &snr_uncore_iio_format_group,
|
||||
.attr_update = snr_iio_attr_update,
|
||||
|
|
|
@ -281,13 +281,13 @@ HYPERVISOR_callback_op(int cmd, void *arg)
|
|||
return _hypercall2(int, callback_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
static __always_inline int
|
||||
HYPERVISOR_set_debugreg(int reg, unsigned long value)
|
||||
{
|
||||
return _hypercall2(int, set_debugreg, reg, value);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
static __always_inline unsigned long
|
||||
HYPERVISOR_get_debugreg(int reg)
|
||||
{
|
||||
return _hypercall1(unsigned long, get_debugreg, reg);
|
||||
|
|
|
@ -64,6 +64,7 @@ void xen_arch_unregister_cpu(int num);
|
|||
|
||||
#ifdef CONFIG_PVH
|
||||
void __init xen_pvh_init(struct boot_params *boot_params);
|
||||
void __init mem_map_via_hcall(struct boot_params *boot_params_p);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
|
||||
|
|
|
@ -28,8 +28,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
|
|||
static LIST_HEAD(sgx_active_page_list);
|
||||
static DEFINE_SPINLOCK(sgx_reclaimer_lock);
|
||||
|
||||
/* The free page list lock protected variables prepend the lock. */
|
||||
static unsigned long sgx_nr_free_pages;
|
||||
static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
|
||||
|
||||
/* Nodes with one or more EPC sections. */
|
||||
static nodemask_t sgx_numa_mask;
|
||||
|
@ -403,14 +402,15 @@ skip:
|
|||
|
||||
spin_lock(&node->lock);
|
||||
list_add_tail(&epc_page->list, &node->free_page_list);
|
||||
sgx_nr_free_pages++;
|
||||
spin_unlock(&node->lock);
|
||||
atomic_long_inc(&sgx_nr_free_pages);
|
||||
}
|
||||
}
|
||||
|
||||
static bool sgx_should_reclaim(unsigned long watermark)
|
||||
{
|
||||
return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list);
|
||||
return atomic_long_read(&sgx_nr_free_pages) < watermark &&
|
||||
!list_empty(&sgx_active_page_list);
|
||||
}
|
||||
|
||||
static int ksgxd(void *p)
|
||||
|
@ -471,9 +471,9 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
|
|||
|
||||
page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list);
|
||||
list_del_init(&page->list);
|
||||
sgx_nr_free_pages--;
|
||||
|
||||
spin_unlock(&node->lock);
|
||||
atomic_long_dec(&sgx_nr_free_pages);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
@ -625,9 +625,9 @@ void sgx_free_epc_page(struct sgx_epc_page *page)
|
|||
spin_lock(&node->lock);
|
||||
|
||||
list_add_tail(&page->list, &node->free_page_list);
|
||||
sgx_nr_free_pages++;
|
||||
|
||||
spin_unlock(&node->lock);
|
||||
atomic_long_inc(&sgx_nr_free_pages);
|
||||
}
|
||||
|
||||
static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
|
||||
|
|
|
@ -964,6 +964,9 @@ unsigned long __get_wchan(struct task_struct *p)
|
|||
struct unwind_state state;
|
||||
unsigned long addr = 0;
|
||||
|
||||
if (!try_get_task_stack(p))
|
||||
return 0;
|
||||
|
||||
for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state);
|
||||
unwind_next_frame(&state)) {
|
||||
addr = unwind_get_return_address(&state);
|
||||
|
@ -974,6 +977,8 @@ unsigned long __get_wchan(struct task_struct *p)
|
|||
break;
|
||||
}
|
||||
|
||||
put_task_stack(p);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -742,6 +742,28 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static char *prepare_command_line(void)
|
||||
{
|
||||
#ifdef CONFIG_CMDLINE_BOOL
|
||||
#ifdef CONFIG_CMDLINE_OVERRIDE
|
||||
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
||||
#else
|
||||
if (builtin_cmdline[0]) {
|
||||
/* append boot loader cmdline to builtin */
|
||||
strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
|
||||
strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
|
||||
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
|
||||
|
||||
parse_early_param();
|
||||
|
||||
return command_line;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if we were loaded by an EFI loader. If so, then we have also been
|
||||
* passed the efi memmap, systab, etc., so we should use these data structures
|
||||
|
@ -830,6 +852,23 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
x86_init.oem.arch_setup();
|
||||
|
||||
/*
|
||||
* x86_configure_nx() is called before parse_early_param() (called by
|
||||
* prepare_command_line()) to detect whether hardware doesn't support
|
||||
* NX (so that the early EHCI debug console setup can safely call
|
||||
* set_fixmap()). It may then be called again from within noexec_setup()
|
||||
* during parsing early parameters to honor the respective command line
|
||||
* option.
|
||||
*/
|
||||
x86_configure_nx();
|
||||
|
||||
/*
|
||||
* This parses early params and it needs to run before
|
||||
* early_reserve_memory() because latter relies on such settings
|
||||
* supplied as early params.
|
||||
*/
|
||||
*cmdline_p = prepare_command_line();
|
||||
|
||||
/*
|
||||
* Do some memory reservations *before* memory is added to memblock, so
|
||||
* memblock allocations won't overwrite it.
|
||||
|
@ -863,33 +902,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
bss_resource.start = __pa_symbol(__bss_start);
|
||||
bss_resource.end = __pa_symbol(__bss_stop)-1;
|
||||
|
||||
#ifdef CONFIG_CMDLINE_BOOL
|
||||
#ifdef CONFIG_CMDLINE_OVERRIDE
|
||||
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
||||
#else
|
||||
if (builtin_cmdline[0]) {
|
||||
/* append boot loader cmdline to builtin */
|
||||
strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
|
||||
strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
|
||||
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
|
||||
*cmdline_p = command_line;
|
||||
|
||||
/*
|
||||
* x86_configure_nx() is called before parse_early_param() to detect
|
||||
* whether hardware doesn't support NX (so that the early EHCI debug
|
||||
* console setup can safely call set_fixmap()). It may then be called
|
||||
* again from within noexec_setup() during parsing early parameters
|
||||
* to honor the respective command line option.
|
||||
*/
|
||||
x86_configure_nx();
|
||||
|
||||
parse_early_param();
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/*
|
||||
* Memory used by the kernel cannot be hot-removed because Linux
|
||||
|
|
|
@ -160,7 +160,7 @@ Efault_end:
|
|||
user_access_end();
|
||||
Efault:
|
||||
pr_alert("could not access userspace vm86 info\n");
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
goto exit_vm86;
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*,
|
|||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *);
|
||||
void flush_dcache_folio(struct folio *);
|
||||
|
||||
void local_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
|
@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
|
|||
#define flush_cache_vunmap(start,end) do { } while (0)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
static inline void flush_dcache_folio(struct folio *folio) { }
|
||||
|
||||
#define flush_icache_range local_flush_icache_range
|
||||
#define flush_cache_page(vma, addr, pfn) do { } while (0)
|
||||
|
|
|
@ -419,3 +419,4 @@
|
|||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
|
12
block/bdev.c
12
block/bdev.c
|
@ -753,8 +753,7 @@ struct block_device *blkdev_get_no_open(dev_t dev)
|
|||
|
||||
if (!bdev)
|
||||
return NULL;
|
||||
if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
|
||||
!try_module_get(bdev->bd_disk->fops->owner)) {
|
||||
if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN)) {
|
||||
put_device(&bdev->bd_device);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -764,7 +763,6 @@ struct block_device *blkdev_get_no_open(dev_t dev)
|
|||
|
||||
void blkdev_put_no_open(struct block_device *bdev)
|
||||
{
|
||||
module_put(bdev->bd_disk->fops->owner);
|
||||
put_device(&bdev->bd_device);
|
||||
}
|
||||
|
||||
|
@ -820,12 +818,14 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
|
|||
ret = -ENXIO;
|
||||
if (!disk_live(disk))
|
||||
goto abort_claiming;
|
||||
if (!try_module_get(disk->fops->owner))
|
||||
goto abort_claiming;
|
||||
if (bdev_is_partition(bdev))
|
||||
ret = blkdev_get_part(bdev, mode);
|
||||
else
|
||||
ret = blkdev_get_whole(bdev, mode);
|
||||
if (ret)
|
||||
goto abort_claiming;
|
||||
goto put_module;
|
||||
if (mode & FMODE_EXCL) {
|
||||
bd_finish_claiming(bdev, holder);
|
||||
|
||||
|
@ -847,7 +847,8 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
|
|||
if (unblock_events)
|
||||
disk_unblock_events(disk);
|
||||
return bdev;
|
||||
|
||||
put_module:
|
||||
module_put(disk->fops->owner);
|
||||
abort_claiming:
|
||||
if (mode & FMODE_EXCL)
|
||||
bd_abort_claiming(bdev, holder);
|
||||
|
@ -956,6 +957,7 @@ void blkdev_put(struct block_device *bdev, fmode_t mode)
|
|||
blkdev_put_whole(bdev, mode);
|
||||
mutex_unlock(&disk->open_mutex);
|
||||
|
||||
module_put(disk->fops->owner);
|
||||
blkdev_put_no_open(bdev);
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_put);
|
||||
|
|
|
@ -640,7 +640,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|||
*/
|
||||
ret = blk_queue_enter(q, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto fail;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
|
@ -676,13 +676,13 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|||
new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
|
||||
if (unlikely(!new_blkg)) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
goto fail_exit_queue;
|
||||
}
|
||||
|
||||
if (radix_tree_preload(GFP_KERNEL)) {
|
||||
blkg_free(new_blkg);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
goto fail_exit_queue;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -722,9 +722,10 @@ fail_preloaded:
|
|||
fail_unlock:
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
rcu_read_unlock();
|
||||
fail_exit_queue:
|
||||
blk_queue_exit(q);
|
||||
fail:
|
||||
blkdev_put_no_open(bdev);
|
||||
blk_queue_exit(q);
|
||||
/*
|
||||
* If queue was bypassing, we should retry. Do so after a
|
||||
* short msleep(). It isn't strictly necessary but queue
|
||||
|
|
|
@ -363,8 +363,10 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
blk_sync_queue(q);
|
||||
if (queue_is_mq(q))
|
||||
if (queue_is_mq(q)) {
|
||||
blk_mq_cancel_work_sync(q);
|
||||
blk_mq_exit_queue(q);
|
||||
}
|
||||
|
||||
/*
|
||||
* In theory, request pool of sched_tags belongs to request queue.
|
||||
|
@ -1015,6 +1017,7 @@ EXPORT_SYMBOL(submit_bio);
|
|||
/**
|
||||
* bio_poll - poll for BIO completions
|
||||
* @bio: bio to poll for
|
||||
* @iob: batches of IO
|
||||
* @flags: BLK_POLL_* flags that control the behavior
|
||||
*
|
||||
* Poll for completions on queue associated with the bio. Returns number of
|
||||
|
|
|
@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
|||
* @rq is being submitted. Analyze what needs to be done and put it on the
|
||||
* right queue.
|
||||
*/
|
||||
bool blk_insert_flush(struct request *rq)
|
||||
void blk_insert_flush(struct request *rq)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
unsigned long fflags = q->queue_flags; /* may change, cache */
|
||||
|
@ -409,7 +409,7 @@ bool blk_insert_flush(struct request *rq)
|
|||
*/
|
||||
if (!policy) {
|
||||
blk_mq_end_request(rq, 0);
|
||||
return true;
|
||||
return;
|
||||
}
|
||||
|
||||
BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
|
||||
|
@ -420,8 +420,10 @@ bool blk_insert_flush(struct request *rq)
|
|||
* for normal execution.
|
||||
*/
|
||||
if ((policy & REQ_FSEQ_DATA) &&
|
||||
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH)))
|
||||
return false;
|
||||
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
||||
blk_mq_request_bypass_insert(rq, false, true);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* @rq should go through flush machinery. Mark it part of flush
|
||||
|
@ -437,8 +439,6 @@ bool blk_insert_flush(struct request *rq)
|
|||
spin_lock_irq(&fq->mq_flush_lock);
|
||||
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
|
||||
spin_unlock_irq(&fq->mq_flush_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2543,8 +2543,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool blk_mq_can_use_cached_rq(struct request *rq,
|
||||
struct bio *bio)
|
||||
static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
|
||||
{
|
||||
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
|
||||
return false;
|
||||
|
@ -2565,7 +2564,6 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
|
|||
bool checked = false;
|
||||
|
||||
if (plug) {
|
||||
|
||||
rq = rq_list_peek(&plug->cached_rq);
|
||||
if (rq && rq->q == q) {
|
||||
if (unlikely(!submit_bio_checks(bio)))
|
||||
|
@ -2587,12 +2585,14 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
|
|||
fallback:
|
||||
if (unlikely(bio_queue_enter(bio)))
|
||||
return NULL;
|
||||
if (!checked && !submit_bio_checks(bio))
|
||||
return NULL;
|
||||
if (unlikely(!checked && !submit_bio_checks(bio)))
|
||||
goto out_put;
|
||||
rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
|
||||
if (!rq)
|
||||
blk_queue_exit(q);
|
||||
if (rq)
|
||||
return rq;
|
||||
out_put:
|
||||
blk_queue_exit(q);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2647,8 +2647,10 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||
return;
|
||||
}
|
||||
|
||||
if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
|
||||
if (op_is_flush(bio->bi_opf)) {
|
||||
blk_insert_flush(rq);
|
||||
return;
|
||||
}
|
||||
|
||||
if (plug && (q->nr_hw_queues == 1 ||
|
||||
blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
|
||||
|
@ -4417,6 +4419,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_mq_rq_cpu);
|
||||
|
||||
void blk_mq_cancel_work_sync(struct request_queue *q)
|
||||
{
|
||||
if (queue_is_mq(q)) {
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&q->requeue_work);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init blk_mq_init(void)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -128,6 +128,8 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
|
|||
void blk_mq_free_plug_rqs(struct blk_plug *plug);
|
||||
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
|
||||
|
||||
void blk_mq_cancel_work_sync(struct request_queue *q);
|
||||
|
||||
void blk_mq_release(struct request_queue *q);
|
||||
|
||||
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
|
||||
|
|
|
@ -791,16 +791,6 @@ static void blk_release_queue(struct kobject *kobj)
|
|||
|
||||
blk_free_queue_stats(q->stats);
|
||||
|
||||
if (queue_is_mq(q)) {
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&q->requeue_work);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
}
|
||||
|
||||
blk_exit_queue(q);
|
||||
|
||||
blk_queue_free_zone_bitmaps(q);
|
||||
|
|
|
@ -271,7 +271,7 @@ void __blk_account_io_done(struct request *req, u64 now);
|
|||
*/
|
||||
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
|
||||
|
||||
bool blk_insert_flush(struct request *rq);
|
||||
void blk_insert_flush(struct request *rq);
|
||||
|
||||
int elevator_switch_mq(struct request_queue *q,
|
||||
struct elevator_type *new_e);
|
||||
|
|
|
@ -694,12 +694,18 @@ void elevator_init_mq(struct request_queue *q)
|
|||
if (!e)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We are called before adding disk, when there isn't any FS I/O,
|
||||
* so freezing queue plus canceling dispatch work is enough to
|
||||
* drain any dispatch activities originated from passthrough
|
||||
* requests, then no need to quiesce queue which may add long boot
|
||||
* latency, especially when lots of disks are involved.
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
blk_mq_cancel_work_sync(q);
|
||||
|
||||
err = blk_mq_init_sched(q, e);
|
||||
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
if (err) {
|
||||
|
|
|
@ -1111,6 +1111,8 @@ static void disk_release(struct device *dev)
|
|||
might_sleep();
|
||||
WARN_ON_ONCE(disk_live(disk));
|
||||
|
||||
blk_mq_cancel_work_sync(disk->queue);
|
||||
|
||||
disk_release_events(disk);
|
||||
kfree(disk->random);
|
||||
xa_destroy(&disk->part_tbl);
|
||||
|
|
|
@ -69,7 +69,14 @@ int ioprio_check_cap(int ioprio)
|
|||
|
||||
switch (class) {
|
||||
case IOPRIO_CLASS_RT:
|
||||
if (!capable(CAP_SYS_NICE) && !capable(CAP_SYS_ADMIN))
|
||||
/*
|
||||
* Originally this only checked for CAP_SYS_ADMIN,
|
||||
* which was implicitly allowed for pid 0 by security
|
||||
* modules such as SELinux. Make sure we check
|
||||
* CAP_SYS_ADMIN first to avoid a denial/avc for
|
||||
* possibly missing CAP_SYS_NICE permission.
|
||||
*/
|
||||
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
|
||||
return -EPERM;
|
||||
fallthrough;
|
||||
/* rt has prio field too */
|
||||
|
|
|
@ -998,7 +998,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
|
|||
static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
|
||||
{
|
||||
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
|
||||
struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
|
||||
struct cpc_register_resource *reg;
|
||||
|
||||
if (!cpc_desc) {
|
||||
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
reg = &cpc_desc->cpc_regs[reg_idx];
|
||||
|
||||
if (CPC_IN_PCC(reg)) {
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue