Merge branch '6.6.54'
Conflicts: drivers/md/dm.c kernel/sched/fair.c Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
commit
51d3da3439
|
@ -135,7 +135,6 @@ GTAGS
|
|||
# id-utils files
|
||||
ID
|
||||
|
||||
*.orig
|
||||
*~
|
||||
\#*#
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ KernelVersion:
|
|||
Contact: linux-iio@vger.kernel.org
|
||||
Description:
|
||||
Reading this returns the valid values that can be written to the
|
||||
on_altvoltage0_mode attribute:
|
||||
filter_mode attribute:
|
||||
|
||||
- auto -> Adjust bandpass filter to track changes in input clock rate.
|
||||
- manual -> disable/unregister the clock rate notifier / input clock tracking.
|
||||
|
|
|
@ -56,6 +56,8 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
|
|
@ -23,7 +23,6 @@ properties:
|
|||
- ak8963
|
||||
- ak09911
|
||||
- ak09912
|
||||
- ak09916
|
||||
deprecated: true
|
||||
|
||||
reg:
|
||||
|
|
|
@ -15,12 +15,19 @@ allOf:
|
|||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- nxp,imx8dxl-fspi
|
||||
- nxp,imx8mm-fspi
|
||||
- nxp,imx8mp-fspi
|
||||
- nxp,imx8qxp-fspi
|
||||
- nxp,lx2160a-fspi
|
||||
oneOf:
|
||||
- enum:
|
||||
- nxp,imx8dxl-fspi
|
||||
- nxp,imx8mm-fspi
|
||||
- nxp,imx8mp-fspi
|
||||
- nxp,imx8qxp-fspi
|
||||
- nxp,imx8ulp-fspi
|
||||
- nxp,lx2160a-fspi
|
||||
- items:
|
||||
- enum:
|
||||
- nxp,imx93-fspi
|
||||
- nxp,imx95-fspi
|
||||
- const: nxp,imx8mm-fspi
|
||||
|
||||
reg:
|
||||
items:
|
||||
|
|
|
@ -540,7 +540,7 @@ at module load time (for a module) with::
|
|||
alerts_broken
|
||||
|
||||
The addresses are normal I2C addresses. The adapter is the string
|
||||
name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
|
||||
name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name.
|
||||
It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
|
||||
spaces, so if the name is "This is an I2C chip" you can say
|
||||
adapter_name=ThisisanI2cchip. This is because it's hard to pass in
|
||||
|
|
|
@ -9,7 +9,7 @@ KVM Lock Overview
|
|||
|
||||
The acquisition orders for mutexes are as follows:
|
||||
|
||||
- cpus_read_lock() is taken outside kvm_lock
|
||||
- cpus_read_lock() is taken outside kvm_lock and kvm_usage_lock
|
||||
|
||||
- kvm->lock is taken outside vcpu->mutex
|
||||
|
||||
|
@ -24,6 +24,13 @@ The acquisition orders for mutexes are as follows:
|
|||
are taken on the waiting side when modifying memslots, so MMU notifiers
|
||||
must not take either kvm->slots_lock or kvm->slots_arch_lock.
|
||||
|
||||
cpus_read_lock() vs kvm_lock:
|
||||
|
||||
- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that
|
||||
being the official ordering, as it is quite easy to unknowingly trigger
|
||||
cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list,
|
||||
e.g. avoid complex operations when possible.
|
||||
|
||||
For SRCU:
|
||||
|
||||
- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
|
||||
|
@ -228,10 +235,17 @@ time it will be set using the Dirty tracking mechanism described above.
|
|||
:Type: mutex
|
||||
:Arch: any
|
||||
:Protects: - vm_list
|
||||
- kvm_usage_count
|
||||
|
||||
``kvm_usage_lock``
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
:Type: mutex
|
||||
:Arch: any
|
||||
:Protects: - kvm_usage_count
|
||||
- hardware virtualization enable/disable
|
||||
:Comment: KVM also disables CPU hotplug via cpus_read_lock() during
|
||||
enable/disable.
|
||||
:Comment: Exists because using kvm_lock leads to deadlock (see earlier comment
|
||||
on cpus_read_lock() vs kvm_lock). Note, KVM also disables CPU hotplug via
|
||||
cpus_read_lock() when enabling/disabling virtualization.
|
||||
|
||||
``kvm->mn_invalidate_lock``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@ -291,11 +305,12 @@ time it will be set using the Dirty tracking mechanism described above.
|
|||
wakeup.
|
||||
|
||||
``vendor_module_lock``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
:Type: mutex
|
||||
:Arch: x86
|
||||
:Protects: loading a vendor module (kvm_amd or kvm_intel)
|
||||
:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is
|
||||
taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and
|
||||
many operations need to take cpu_hotplug_lock when loading a vendor module,
|
||||
e.g. updating static calls.
|
||||
:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken
|
||||
in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while
|
||||
cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many
|
||||
operations need to take cpu_hotplug_lock when loading a vendor module, e.g.
|
||||
updating static calls.
|
||||
|
|
2
Makefile
2
Makefile
|
@ -8,7 +8,7 @@ else
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 53
|
||||
SUBLEVEL = 54
|
||||
EXTRAVERSION =
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
|
|
|
@ -1312,7 +1312,7 @@
|
|||
compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
|
||||
reg = <0xfffffe20 0x20>;
|
||||
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
|
||||
clocks = <&clk32k 0>;
|
||||
clocks = <&clk32k 1>;
|
||||
};
|
||||
|
||||
pit: timer@fffffe40 {
|
||||
|
@ -1338,7 +1338,7 @@
|
|||
compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc";
|
||||
reg = <0xfffffea8 0x100>;
|
||||
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
|
||||
clocks = <&clk32k 0>;
|
||||
clocks = <&clk32k 1>;
|
||||
};
|
||||
|
||||
watchdog: watchdog@ffffff80 {
|
||||
|
|
|
@ -272,7 +272,7 @@
|
|||
compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
|
||||
reg = <0xe001d020 0x30>;
|
||||
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clk32k 0>;
|
||||
clocks = <&clk32k 1>;
|
||||
};
|
||||
|
||||
clk32k: clock-controller@e001d050 {
|
||||
|
|
|
@ -366,7 +366,7 @@
|
|||
};
|
||||
|
||||
pinctrl_tsc: tscgrp {
|
||||
fsl,pin = <
|
||||
fsl,pins = <
|
||||
MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0
|
||||
MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0
|
||||
MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0
|
||||
|
|
|
@ -350,7 +350,7 @@
|
|||
|
||||
&iomuxc_lpsr {
|
||||
pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp {
|
||||
fsl,phy = <
|
||||
fsl,pins = <
|
||||
MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08
|
||||
>;
|
||||
};
|
||||
|
|
|
@ -359,7 +359,7 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
|
|||
u32 val = __raw_readl(psc->reg);
|
||||
u8 index = (val & psc->mask) >> psc->shift;
|
||||
|
||||
if (index > psc->num_div)
|
||||
if (index >= psc->num_div)
|
||||
return 0;
|
||||
|
||||
return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]);
|
||||
|
|
|
@ -66,6 +66,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
|
|||
return;
|
||||
}
|
||||
map = syscon_node_to_regmap(np);
|
||||
of_node_put(np);
|
||||
if (IS_ERR(map)) {
|
||||
pr_err("PLATSMP: No syscon regmap\n");
|
||||
return;
|
||||
|
|
|
@ -64,33 +64,37 @@
|
|||
|
||||
#ifdef CONFIG_AS_VFP_VMRS_FPINST
|
||||
|
||||
#define fmrx(_vfp_) ({ \
|
||||
u32 __v; \
|
||||
asm(".fpu vfpv2\n" \
|
||||
"vmrs %0, " #_vfp_ \
|
||||
: "=r" (__v) : : "cc"); \
|
||||
__v; \
|
||||
})
|
||||
#define fmrx(_vfp_) ({ \
|
||||
u32 __v; \
|
||||
asm volatile (".fpu vfpv2\n" \
|
||||
"vmrs %0, " #_vfp_ \
|
||||
: "=r" (__v) : : "cc"); \
|
||||
__v; \
|
||||
})
|
||||
|
||||
#define fmxr(_vfp_,_var_) \
|
||||
asm(".fpu vfpv2\n" \
|
||||
"vmsr " #_vfp_ ", %0" \
|
||||
: : "r" (_var_) : "cc")
|
||||
#define fmxr(_vfp_, _var_) ({ \
|
||||
asm volatile (".fpu vfpv2\n" \
|
||||
"vmsr " #_vfp_ ", %0" \
|
||||
: : "r" (_var_) : "cc"); \
|
||||
})
|
||||
|
||||
#else
|
||||
|
||||
#define vfpreg(_vfp_) #_vfp_
|
||||
|
||||
#define fmrx(_vfp_) ({ \
|
||||
u32 __v; \
|
||||
asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \
|
||||
: "=r" (__v) : : "cc"); \
|
||||
__v; \
|
||||
})
|
||||
#define fmrx(_vfp_) ({ \
|
||||
u32 __v; \
|
||||
asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \
|
||||
"cr0, 0 @ fmrx %0, " #_vfp_ \
|
||||
: "=r" (__v) : : "cc"); \
|
||||
__v; \
|
||||
})
|
||||
|
||||
#define fmxr(_vfp_,_var_) \
|
||||
asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \
|
||||
: : "r" (_var_) : "cc")
|
||||
#define fmxr(_vfp_, _var_) ({ \
|
||||
asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \
|
||||
"cr0, 0 @ fmxr " #_vfp_ ", %0" \
|
||||
: : "r" (_var_) : "cc"); \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -422,7 +422,7 @@ config AMPERE_ERRATUM_AC03_CPU_38
|
|||
default y
|
||||
help
|
||||
This option adds an alternative code sequence to work around Ampere
|
||||
erratum AC03_CPU_38 on AmpereOne.
|
||||
errata AC03_CPU_38 and AC04_CPU_10 on AmpereOne.
|
||||
|
||||
The affected design reports FEAT_HAFDBS as not implemented in
|
||||
ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
device_type = "memory";
|
||||
reg = <0x0 0x80000000 0x3da00000>,
|
||||
<0x0 0xc0000000 0x40000000>,
|
||||
<0x8 0x80000000 0x40000000>;
|
||||
<0x8 0x80000000 0x80000000>;
|
||||
};
|
||||
|
||||
gpio-keys {
|
||||
|
|
|
@ -731,7 +731,7 @@
|
|||
opp-900000000-3 {
|
||||
opp-hz = /bits/ 64 <900000000>;
|
||||
opp-microvolt = <850000>;
|
||||
opp-supported-hw = <0x8>;
|
||||
opp-supported-hw = <0xcf>;
|
||||
};
|
||||
|
||||
opp-900000000-4 {
|
||||
|
@ -743,13 +743,13 @@
|
|||
opp-900000000-5 {
|
||||
opp-hz = /bits/ 64 <900000000>;
|
||||
opp-microvolt = <825000>;
|
||||
opp-supported-hw = <0x30>;
|
||||
opp-supported-hw = <0x20>;
|
||||
};
|
||||
|
||||
opp-950000000-3 {
|
||||
opp-hz = /bits/ 64 <950000000>;
|
||||
opp-microvolt = <900000>;
|
||||
opp-supported-hw = <0x8>;
|
||||
opp-supported-hw = <0xcf>;
|
||||
};
|
||||
|
||||
opp-950000000-4 {
|
||||
|
@ -761,13 +761,13 @@
|
|||
opp-950000000-5 {
|
||||
opp-hz = /bits/ 64 <950000000>;
|
||||
opp-microvolt = <850000>;
|
||||
opp-supported-hw = <0x30>;
|
||||
opp-supported-hw = <0x20>;
|
||||
};
|
||||
|
||||
opp-1000000000-3 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <950000>;
|
||||
opp-supported-hw = <0x8>;
|
||||
opp-supported-hw = <0xcf>;
|
||||
};
|
||||
|
||||
opp-1000000000-4 {
|
||||
|
@ -779,7 +779,7 @@
|
|||
opp-1000000000-5 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <875000>;
|
||||
opp-supported-hw = <0x30>;
|
||||
opp-supported-hw = <0x20>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -1312,6 +1312,7 @@
|
|||
usb2-lpm-disable;
|
||||
vusb33-supply = <&mt6359_vusb_ldo_reg>;
|
||||
vbus-supply = <&usb_vbus>;
|
||||
mediatek,u3p-dis-msk = <1>;
|
||||
};
|
||||
|
||||
#include <arm/cros-ec-keyboard.dtsi>
|
||||
|
|
|
@ -2766,10 +2766,10 @@
|
|||
compatible = "mediatek,mt8195-dp-intf";
|
||||
reg = <0 0x1c015000 0 0x1000>;
|
||||
interrupts = <GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
clocks = <&vdosys0 CLK_VDO0_DP_INTF0>,
|
||||
<&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
|
||||
clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
|
||||
<&vdosys0 CLK_VDO0_DP_INTF0>,
|
||||
<&apmixedsys CLK_APMIXED_TVDPLL1>;
|
||||
clock-names = "engine", "pixel", "pll";
|
||||
clock-names = "pixel", "engine", "pll";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -3036,10 +3036,10 @@
|
|||
reg = <0 0x1c113000 0 0x1000>;
|
||||
interrupts = <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
|
||||
clocks = <&vdosys1 CLK_VDO1_DP_INTF0_MM>,
|
||||
<&vdosys1 CLK_VDO1_DPINTF>,
|
||||
clocks = <&vdosys1 CLK_VDO1_DPINTF>,
|
||||
<&vdosys1 CLK_VDO1_DP_INTF0_MM>,
|
||||
<&apmixedsys CLK_APMIXED_TVDPLL2>;
|
||||
clock-names = "engine", "pixel", "pll";
|
||||
clock-names = "pixel", "engine", "pll";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
|
|
@ -1951,6 +1951,7 @@
|
|||
reg = <0x0 0x15000000 0x0 0x100000>;
|
||||
#iommu-cells = <2>;
|
||||
#global-interrupts = <2>;
|
||||
dma-coherent;
|
||||
|
||||
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
@ -2089,6 +2090,7 @@
|
|||
reg = <0x0 0x15200000 0x0 0x80000>;
|
||||
#iommu-cells = <2>;
|
||||
#global-interrupts = <2>;
|
||||
dma-coherent;
|
||||
|
||||
interrupts = <GIC_SPI 920 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 921 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
|
|
@ -145,8 +145,8 @@
|
|||
#interrupt-cells = <3>;
|
||||
#address-cells = <0>;
|
||||
interrupt-controller;
|
||||
reg = <0x0 0x11900000 0 0x40000>,
|
||||
<0x0 0x11940000 0 0x60000>;
|
||||
reg = <0x0 0x11900000 0 0x20000>,
|
||||
<0x0 0x11940000 0 0x40000>;
|
||||
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -997,8 +997,8 @@
|
|||
#interrupt-cells = <3>;
|
||||
#address-cells = <0>;
|
||||
interrupt-controller;
|
||||
reg = <0x0 0x11900000 0 0x40000>,
|
||||
<0x0 0x11940000 0 0x60000>;
|
||||
reg = <0x0 0x11900000 0 0x20000>,
|
||||
<0x0 0x11940000 0 0x40000>;
|
||||
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
|
||||
};
|
||||
|
||||
|
|
|
@ -1004,8 +1004,8 @@
|
|||
#interrupt-cells = <3>;
|
||||
#address-cells = <0>;
|
||||
interrupt-controller;
|
||||
reg = <0x0 0x11900000 0 0x40000>,
|
||||
<0x0 0x11940000 0 0x60000>;
|
||||
reg = <0x0 0x11900000 0 0x20000>,
|
||||
<0x0 0x11940000 0 0x40000>;
|
||||
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
|
||||
};
|
||||
|
||||
|
|
|
@ -32,12 +32,12 @@
|
|||
backlight: edp-backlight {
|
||||
compatible = "pwm-backlight";
|
||||
power-supply = <&vcc_12v>;
|
||||
pwms = <&pwm0 0 740740 0>;
|
||||
pwms = <&pwm0 0 125000 0>;
|
||||
};
|
||||
|
||||
bat: battery {
|
||||
compatible = "simple-battery";
|
||||
charge-full-design-microamp-hours = <9800000>;
|
||||
charge-full-design-microamp-hours = <10000000>;
|
||||
voltage-max-design-microvolt = <4350000>;
|
||||
voltage-min-design-microvolt = <3000000>;
|
||||
};
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
/ {
|
||||
model = "Hardkernel ODROID-M1";
|
||||
compatible = "rockchip,rk3568-odroid-m1", "rockchip,rk3568";
|
||||
compatible = "hardkernel,odroid-m1", "rockchip,rk3568";
|
||||
|
||||
aliases {
|
||||
ethernet0 = &gmac0;
|
||||
|
|
|
@ -123,7 +123,7 @@
|
|||
no-map;
|
||||
};
|
||||
|
||||
c66_1_dma_memory_region: c66-dma-memory@a6000000 {
|
||||
c66_0_dma_memory_region: c66-dma-memory@a6000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0x00 0xa6000000 0x00 0x100000>;
|
||||
no-map;
|
||||
|
@ -135,7 +135,7 @@
|
|||
no-map;
|
||||
};
|
||||
|
||||
c66_0_dma_memory_region: c66-dma-memory@a7000000 {
|
||||
c66_1_dma_memory_region: c66-dma-memory@a7000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0x00 0xa7000000 0x00 0x100000>;
|
||||
no-map;
|
||||
|
|
|
@ -119,7 +119,7 @@
|
|||
no-map;
|
||||
};
|
||||
|
||||
c66_1_dma_memory_region: c66-dma-memory@a6000000 {
|
||||
c66_0_dma_memory_region: c66-dma-memory@a6000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0x00 0xa6000000 0x00 0x100000>;
|
||||
no-map;
|
||||
|
@ -131,7 +131,7 @@
|
|||
no-map;
|
||||
};
|
||||
|
||||
c66_0_dma_memory_region: c66-dma-memory@a7000000 {
|
||||
c66_1_dma_memory_region: c66-dma-memory@a7000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0x00 0xa7000000 0x00 0x100000>;
|
||||
no-map;
|
||||
|
|
|
@ -150,6 +150,7 @@
|
|||
#define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039
|
||||
|
||||
#define AMPERE_CPU_PART_AMPERE1 0xAC3
|
||||
#define AMPERE_CPU_PART_AMPERE1A 0xAC4
|
||||
|
||||
|
||||
#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
|
||||
|
@ -226,6 +227,7 @@
|
|||
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
|
||||
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
|
||||
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
|
||||
#define MIDR_AMPERE1A MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1A)
|
||||
#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
|
||||
|
||||
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
|
||||
|
|
|
@ -10,63 +10,63 @@
|
|||
#include <asm/memory.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define ESR_ELx_EC_UNKNOWN (0x00)
|
||||
#define ESR_ELx_EC_WFx (0x01)
|
||||
#define ESR_ELx_EC_UNKNOWN UL(0x00)
|
||||
#define ESR_ELx_EC_WFx UL(0x01)
|
||||
/* Unallocated EC: 0x02 */
|
||||
#define ESR_ELx_EC_CP15_32 (0x03)
|
||||
#define ESR_ELx_EC_CP15_64 (0x04)
|
||||
#define ESR_ELx_EC_CP14_MR (0x05)
|
||||
#define ESR_ELx_EC_CP14_LS (0x06)
|
||||
#define ESR_ELx_EC_FP_ASIMD (0x07)
|
||||
#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
|
||||
#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
|
||||
#define ESR_ELx_EC_CP15_32 UL(0x03)
|
||||
#define ESR_ELx_EC_CP15_64 UL(0x04)
|
||||
#define ESR_ELx_EC_CP14_MR UL(0x05)
|
||||
#define ESR_ELx_EC_CP14_LS UL(0x06)
|
||||
#define ESR_ELx_EC_FP_ASIMD UL(0x07)
|
||||
#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */
|
||||
#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */
|
||||
/* Unallocated EC: 0x0A - 0x0B */
|
||||
#define ESR_ELx_EC_CP14_64 (0x0C)
|
||||
#define ESR_ELx_EC_BTI (0x0D)
|
||||
#define ESR_ELx_EC_ILL (0x0E)
|
||||
#define ESR_ELx_EC_CP14_64 UL(0x0C)
|
||||
#define ESR_ELx_EC_BTI UL(0x0D)
|
||||
#define ESR_ELx_EC_ILL UL(0x0E)
|
||||
/* Unallocated EC: 0x0F - 0x10 */
|
||||
#define ESR_ELx_EC_SVC32 (0x11)
|
||||
#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
|
||||
#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SVC32 UL(0x11)
|
||||
#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */
|
||||
#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */
|
||||
/* Unallocated EC: 0x14 */
|
||||
#define ESR_ELx_EC_SVC64 (0x15)
|
||||
#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SYS64 (0x18)
|
||||
#define ESR_ELx_EC_SVE (0x19)
|
||||
#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
|
||||
#define ESR_ELx_EC_SVC64 UL(0x15)
|
||||
#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SYS64 UL(0x18)
|
||||
#define ESR_ELx_EC_SVE UL(0x19)
|
||||
#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */
|
||||
/* Unallocated EC: 0x1B */
|
||||
#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
|
||||
#define ESR_ELx_EC_SME (0x1D)
|
||||
#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */
|
||||
#define ESR_ELx_EC_SME UL(0x1D)
|
||||
/* Unallocated EC: 0x1E */
|
||||
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
|
||||
#define ESR_ELx_EC_IABT_LOW (0x20)
|
||||
#define ESR_ELx_EC_IABT_CUR (0x21)
|
||||
#define ESR_ELx_EC_PC_ALIGN (0x22)
|
||||
#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */
|
||||
#define ESR_ELx_EC_IABT_LOW UL(0x20)
|
||||
#define ESR_ELx_EC_IABT_CUR UL(0x21)
|
||||
#define ESR_ELx_EC_PC_ALIGN UL(0x22)
|
||||
/* Unallocated EC: 0x23 */
|
||||
#define ESR_ELx_EC_DABT_LOW (0x24)
|
||||
#define ESR_ELx_EC_DABT_CUR (0x25)
|
||||
#define ESR_ELx_EC_SP_ALIGN (0x26)
|
||||
#define ESR_ELx_EC_MOPS (0x27)
|
||||
#define ESR_ELx_EC_FP_EXC32 (0x28)
|
||||
#define ESR_ELx_EC_DABT_LOW UL(0x24)
|
||||
#define ESR_ELx_EC_DABT_CUR UL(0x25)
|
||||
#define ESR_ELx_EC_SP_ALIGN UL(0x26)
|
||||
#define ESR_ELx_EC_MOPS UL(0x27)
|
||||
#define ESR_ELx_EC_FP_EXC32 UL(0x28)
|
||||
/* Unallocated EC: 0x29 - 0x2B */
|
||||
#define ESR_ELx_EC_FP_EXC64 (0x2C)
|
||||
#define ESR_ELx_EC_FP_EXC64 UL(0x2C)
|
||||
/* Unallocated EC: 0x2D - 0x2E */
|
||||
#define ESR_ELx_EC_SERROR (0x2F)
|
||||
#define ESR_ELx_EC_BREAKPT_LOW (0x30)
|
||||
#define ESR_ELx_EC_BREAKPT_CUR (0x31)
|
||||
#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
|
||||
#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
|
||||
#define ESR_ELx_EC_WATCHPT_LOW (0x34)
|
||||
#define ESR_ELx_EC_WATCHPT_CUR (0x35)
|
||||
#define ESR_ELx_EC_SERROR UL(0x2F)
|
||||
#define ESR_ELx_EC_BREAKPT_LOW UL(0x30)
|
||||
#define ESR_ELx_EC_BREAKPT_CUR UL(0x31)
|
||||
#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32)
|
||||
#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33)
|
||||
#define ESR_ELx_EC_WATCHPT_LOW UL(0x34)
|
||||
#define ESR_ELx_EC_WATCHPT_CUR UL(0x35)
|
||||
/* Unallocated EC: 0x36 - 0x37 */
|
||||
#define ESR_ELx_EC_BKPT32 (0x38)
|
||||
#define ESR_ELx_EC_BKPT32 UL(0x38)
|
||||
/* Unallocated EC: 0x39 */
|
||||
#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
|
||||
#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */
|
||||
/* Unallocated EC: 0x3B */
|
||||
#define ESR_ELx_EC_BRK64 (0x3C)
|
||||
#define ESR_ELx_EC_BRK64 UL(0x3C)
|
||||
/* Unallocated EC: 0x3D - 0x3F */
|
||||
#define ESR_ELx_EC_MAX (0x3F)
|
||||
#define ESR_ELx_EC_MAX UL(0x3F)
|
||||
|
||||
#define ESR_ELx_EC_SHIFT (26)
|
||||
#define ESR_ELx_EC_WIDTH (6)
|
||||
|
|
|
@ -312,10 +312,10 @@ struct zt_context {
|
|||
((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \
|
||||
/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
|
||||
|
||||
#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
|
||||
#define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES))
|
||||
|
||||
#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
|
||||
(SVE_SIG_ZREG_SIZE(vq) * n))
|
||||
(SVE_SIG_ZREG_SIZE(vq) * (n)))
|
||||
|
||||
#define ZA_SIG_CONTEXT_SIZE(vq) \
|
||||
(ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
|
||||
|
@ -326,7 +326,7 @@ struct zt_context {
|
|||
|
||||
#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
|
||||
|
||||
#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n)
|
||||
#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n))
|
||||
|
||||
#define ZT_SIG_CONTEXT_SIZE(n) \
|
||||
(sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n))
|
||||
|
|
|
@ -472,6 +472,14 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
|
|||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
|
||||
static const struct midr_range erratum_ac03_cpu_38_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
|
||||
MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
{
|
||||
|
@ -789,7 +797,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
{
|
||||
.desc = "AmpereOne erratum AC03_CPU_38",
|
||||
.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
|
||||
ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
|
||||
ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -415,9 +415,9 @@ out:
|
|||
return;
|
||||
}
|
||||
|
||||
static __always_inline void do_ffa_mem_xfer(const u64 func_id,
|
||||
struct arm_smccc_res *res,
|
||||
struct kvm_cpu_context *ctxt)
|
||||
static void __do_ffa_mem_xfer(const u64 func_id,
|
||||
struct arm_smccc_res *res,
|
||||
struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
DECLARE_REG(u32, len, ctxt, 1);
|
||||
DECLARE_REG(u32, fraglen, ctxt, 2);
|
||||
|
@ -428,9 +428,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
|
|||
u32 offset, nr_ranges;
|
||||
int ret = 0;
|
||||
|
||||
BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
|
||||
func_id != FFA_FN64_MEM_LEND);
|
||||
|
||||
if (addr_mbz || npages_mbz || fraglen > len ||
|
||||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
|
||||
ret = FFA_RET_INVALID_PARAMETERS;
|
||||
|
@ -449,6 +446,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (len > ffa_desc_buf.len) {
|
||||
ret = FFA_RET_NO_MEMORY;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
buf = hyp_buffers.tx;
|
||||
memcpy(buf, host_buffers.tx, fraglen);
|
||||
|
||||
|
@ -498,6 +500,13 @@ err_unshare:
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
#define do_ffa_mem_xfer(fid, res, ctxt) \
|
||||
do { \
|
||||
BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \
|
||||
(fid) != FFA_FN64_MEM_LEND); \
|
||||
__do_ffa_mem_xfer((fid), (res), (ctxt)); \
|
||||
} while (0);
|
||||
|
||||
static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
|
||||
struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
|
|
|
@ -116,7 +116,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
|
|||
{
|
||||
/* regs will be equal to current_pt_regs() */
|
||||
struct kernel_clone_args args = {
|
||||
.flags = regs->d1 & ~CSIGNAL,
|
||||
.flags = (u32)(regs->d1) & ~CSIGNAL,
|
||||
.pidfd = (int __user *)regs->d3,
|
||||
.child_tid = (int __user *)regs->d4,
|
||||
.parent_tid = (int __user *)regs->d3,
|
||||
|
|
|
@ -96,6 +96,7 @@ config CRYPTO_AES_PPC_SPE
|
|||
|
||||
config CRYPTO_AES_GCM_P10
|
||||
tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
|
||||
depends on BROKEN
|
||||
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_ALGAPI
|
||||
|
|
|
@ -39,6 +39,12 @@
|
|||
#define STDX_BE stringify_in_c(stdbrx)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
#define DS_FORM_CONSTRAINT "Z<>"
|
||||
#else
|
||||
#define DS_FORM_CONSTRAINT "YZ<>"
|
||||
#endif
|
||||
|
||||
#else /* 32-bit */
|
||||
|
||||
/* operations for longs and pointers */
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <asm/cmpxchg.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/asm-const.h>
|
||||
#include <asm/asm-compat.h>
|
||||
|
||||
/*
|
||||
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
|
||||
|
@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
|
|||
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
|
||||
__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
|
||||
else
|
||||
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
|
||||
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
|
||||
|
||||
return t;
|
||||
}
|
||||
|
@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
|
|||
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
|
||||
__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
|
||||
else
|
||||
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
|
||||
__asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP(op, asm_op) \
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/extable.h>
|
||||
#include <asm/kup.h>
|
||||
#include <asm/asm-compat.h>
|
||||
|
||||
#ifdef __powerpc64__
|
||||
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
|
||||
|
@ -92,12 +93,6 @@ __pu_failed: \
|
|||
: label)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
#define DS_FORM_CONSTRAINT "Z<>"
|
||||
#else
|
||||
#define DS_FORM_CONSTRAINT "YZ<>"
|
||||
#endif
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#ifdef CONFIG_PPC_KERNEL_PREFIXED
|
||||
#define __put_user_asm2_goto(x, ptr, label) \
|
||||
|
|
|
@ -41,12 +41,12 @@
|
|||
#include "head_32.h"
|
||||
|
||||
.macro compare_to_kernel_boundary scratch, addr
|
||||
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
|
||||
#if CONFIG_TASK_SIZE <= 0x80000000 && MODULES_VADDR >= 0x80000000
|
||||
/* By simply checking Address >= 0x80000000, we know if its a kernel address */
|
||||
not. \scratch, \addr
|
||||
#else
|
||||
rlwinm \scratch, \addr, 16, 0xfff8
|
||||
cmpli cr0, \scratch, PAGE_OFFSET@h
|
||||
cmpli cr0, \scratch, TASK_SIZE@h
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
|
|||
mfspr r10, SPRN_SRR0
|
||||
mtspr SPRN_MD_EPN, r10
|
||||
rlwinm r11, r10, 16, 0xfff8
|
||||
cmpli cr1, r11, PAGE_OFFSET@h
|
||||
cmpli cr1, r11, TASK_SIZE@h
|
||||
mfspr r11, SPRN_M_TWB /* Get level 1 table */
|
||||
blt+ cr1, 3f
|
||||
|
||||
|
|
|
@ -38,11 +38,7 @@
|
|||
.else
|
||||
addi r4, r5, VDSO_DATA_OFFSET
|
||||
.endif
|
||||
#ifdef __powerpc64__
|
||||
bl CFUNC(DOTSYM(\funct))
|
||||
#else
|
||||
bl \funct
|
||||
#endif
|
||||
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
|
||||
#ifdef __powerpc64__
|
||||
PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
|
||||
|
|
|
@ -149,11 +149,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
|
|||
|
||||
mmu_mapin_immr();
|
||||
|
||||
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
|
||||
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true);
|
||||
if (debug_pagealloc_enabled_or_kfence()) {
|
||||
top = boundary;
|
||||
} else {
|
||||
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
|
||||
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true);
|
||||
mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#define __KVM_VCPU_RISCV_PMU_H
|
||||
|
||||
#include <linux/perf/riscv_pmu.h>
|
||||
#include <asm/kvm_vcpu_insn.h>
|
||||
#include <asm/sbi.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_PMU_SBI
|
||||
|
@ -57,11 +58,11 @@ struct kvm_pmu {
|
|||
|
||||
#if defined(CONFIG_32BIT)
|
||||
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
|
||||
{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
|
||||
{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
|
||||
{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
|
||||
{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
|
||||
#else
|
||||
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
|
||||
{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
|
||||
{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
|
||||
#endif
|
||||
|
||||
int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
|
||||
|
@ -92,8 +93,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
|
|||
struct kvm_pmu {
|
||||
};
|
||||
|
||||
static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
|
||||
unsigned long *val, unsigned long new_val,
|
||||
unsigned long wr_mask)
|
||||
{
|
||||
if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
|
||||
*val = 0;
|
||||
return KVM_INSN_CONTINUE_NEXT_SEPC;
|
||||
} else {
|
||||
return KVM_INSN_ILLEGAL_TRAP;
|
||||
}
|
||||
}
|
||||
|
||||
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
|
||||
{.base = 0, .count = 0, .func = NULL },
|
||||
{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
|
||||
|
||||
static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
|
||||
static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
|
||||
|
|
|
@ -62,7 +62,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
|||
perf_callchain_store(entry, regs->epc);
|
||||
|
||||
fp = user_backtrace(entry, fp, regs->ra);
|
||||
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
|
||||
while (fp && !(fp & 0x7) && entry->nr < entry->max_stack)
|
||||
fp = user_backtrace(entry, fp, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -91,8 +91,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
run->riscv_sbi.args[3] = cp->a3;
|
||||
run->riscv_sbi.args[4] = cp->a4;
|
||||
run->riscv_sbi.args[5] = cp->a5;
|
||||
run->riscv_sbi.ret[0] = cp->a0;
|
||||
run->riscv_sbi.ret[1] = cp->a1;
|
||||
run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
|
||||
run->riscv_sbi.ret[1] = 0;
|
||||
}
|
||||
|
||||
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/insn.h>
|
||||
#include <asm/insn-eval.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
/* MMIO direction */
|
||||
#define EPT_READ 0
|
||||
|
@ -405,6 +406,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!fault_in_kernel_space(ve->gla)) {
|
||||
WARN_ONCE(1, "Access to userspace address is not supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reject EPT violation #VEs that split pages.
|
||||
*
|
||||
|
|
|
@ -1602,6 +1602,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
|
|||
* see comment in intel_pt_interrupt().
|
||||
*/
|
||||
WRITE_ONCE(pt->handle_nmi, 0);
|
||||
barrier();
|
||||
|
||||
pt_config_stop(event);
|
||||
|
||||
|
@ -1653,11 +1654,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
|
|||
return 0;
|
||||
|
||||
/*
|
||||
* Here, handle_nmi tells us if the tracing is on
|
||||
* There is no PT interrupt in this mode, so stop the trace and it will
|
||||
* remain stopped while the buffer is copied.
|
||||
*/
|
||||
if (READ_ONCE(pt->handle_nmi))
|
||||
pt_config_stop(event);
|
||||
|
||||
pt_config_stop(event);
|
||||
pt_read_offset(buf);
|
||||
pt_update_head(pt);
|
||||
|
||||
|
@ -1669,11 +1669,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
|
|||
ret = perf_output_copy_aux(&pt->handle, handle, from, to);
|
||||
|
||||
/*
|
||||
* If the tracing was on when we turned up, restart it.
|
||||
* Compiler barrier not needed as we couldn't have been
|
||||
* preempted by anything that touches pt->handle_nmi.
|
||||
* Here, handle_nmi tells us if the tracing was on.
|
||||
* If the tracing was on, restart it.
|
||||
*/
|
||||
if (pt->handle_nmi)
|
||||
if (READ_ONCE(pt->handle_nmi))
|
||||
pt_config_start(event);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -166,6 +166,14 @@ void acpi_generic_reduced_hw_init(void);
|
|||
void x86_default_set_root_pointer(u64 addr);
|
||||
u64 x86_default_get_root_pointer(void);
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
/* A Xen PV domain needs a special acpi_os_ioremap() handling. */
|
||||
extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys,
|
||||
acpi_size size);
|
||||
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
|
||||
#define acpi_os_ioremap acpi_os_ioremap
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_ACPI */
|
||||
|
||||
#define acpi_lapic 0
|
||||
|
|
|
@ -63,7 +63,11 @@ extern u64 arch_irq_stat(void);
|
|||
#define local_softirq_pending_ref pcpu_hot.softirq_pending
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
static inline void kvm_set_cpu_l1tf_flush_l1d(void)
|
||||
/*
|
||||
* This function is called from noinstr interrupt contexts
|
||||
* and must be inlined to not get instrumentation.
|
||||
*/
|
||||
static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void)
|
||||
{
|
||||
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
|
||||
}
|
||||
|
@ -78,7 +82,7 @@ static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
|
|||
return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
|
||||
}
|
||||
#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
|
||||
static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
|
||||
static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
|
||||
#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
|
||||
|
||||
#endif /* _ASM_X86_HARDIRQ_H */
|
||||
|
|
|
@ -13,15 +13,18 @@
|
|||
|
||||
#include <asm/irq_stack.h>
|
||||
|
||||
typedef void (*idtentry_t)(struct pt_regs *regs);
|
||||
|
||||
/**
|
||||
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
|
||||
* No error code pushed by hardware
|
||||
* @vector: Vector number (ignored for C)
|
||||
* @func: Function name of the entry point
|
||||
*
|
||||
* Declares three functions:
|
||||
* Declares four functions:
|
||||
* - The ASM entry point: asm_##func
|
||||
* - The XEN PV trap entry point: xen_##func (maybe unused)
|
||||
* - The C handler called from the FRED event dispatcher (maybe unused)
|
||||
* - The C handler called from the ASM entry point
|
||||
*
|
||||
* Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it
|
||||
|
@ -31,6 +34,7 @@
|
|||
#define DECLARE_IDTENTRY(vector, func) \
|
||||
asmlinkage void asm_##func(void); \
|
||||
asmlinkage void xen_asm_##func(void); \
|
||||
void fred_##func(struct pt_regs *regs); \
|
||||
__visible void func(struct pt_regs *regs)
|
||||
|
||||
/**
|
||||
|
@ -137,6 +141,17 @@ static __always_inline void __##func(struct pt_regs *regs, \
|
|||
#define DEFINE_IDTENTRY_RAW(func) \
|
||||
__visible noinstr void func(struct pt_regs *regs)
|
||||
|
||||
/**
|
||||
* DEFINE_FREDENTRY_RAW - Emit code for raw FRED entry points
|
||||
* @func: Function name of the entry point
|
||||
*
|
||||
* @func is called from the FRED event dispatcher with interrupts disabled.
|
||||
*
|
||||
* See @DEFINE_IDTENTRY_RAW for further details.
|
||||
*/
|
||||
#define DEFINE_FREDENTRY_RAW(func) \
|
||||
noinstr void fred_##func(struct pt_regs *regs)
|
||||
|
||||
/**
|
||||
* DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points
|
||||
* Error code pushed by hardware
|
||||
|
@ -197,8 +212,8 @@ __visible noinstr void func(struct pt_regs *regs, \
|
|||
irqentry_state_t state = irqentry_enter(regs); \
|
||||
u32 vector = (u32)(u8)error_code; \
|
||||
\
|
||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||
instrumentation_begin(); \
|
||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||
run_irq_on_irqstack_cond(__##func, regs, vector); \
|
||||
instrumentation_end(); \
|
||||
irqentry_exit(regs, state); \
|
||||
|
@ -233,17 +248,27 @@ static noinline void __##func(struct pt_regs *regs, u32 vector)
|
|||
#define DEFINE_IDTENTRY_SYSVEC(func) \
|
||||
static void __##func(struct pt_regs *regs); \
|
||||
\
|
||||
static __always_inline void instr_##func(struct pt_regs *regs) \
|
||||
{ \
|
||||
run_sysvec_on_irqstack_cond(__##func, regs); \
|
||||
} \
|
||||
\
|
||||
__visible noinstr void func(struct pt_regs *regs) \
|
||||
{ \
|
||||
irqentry_state_t state = irqentry_enter(regs); \
|
||||
\
|
||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||
instrumentation_begin(); \
|
||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||
run_sysvec_on_irqstack_cond(__##func, regs); \
|
||||
instr_##func (regs); \
|
||||
instrumentation_end(); \
|
||||
irqentry_exit(regs, state); \
|
||||
} \
|
||||
\
|
||||
void fred_##func(struct pt_regs *regs) \
|
||||
{ \
|
||||
instr_##func (regs); \
|
||||
} \
|
||||
\
|
||||
static noinline void __##func(struct pt_regs *regs)
|
||||
|
||||
/**
|
||||
|
@ -260,19 +285,29 @@ static noinline void __##func(struct pt_regs *regs)
|
|||
#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \
|
||||
static __always_inline void __##func(struct pt_regs *regs); \
|
||||
\
|
||||
static __always_inline void instr_##func(struct pt_regs *regs) \
|
||||
{ \
|
||||
__irq_enter_raw(); \
|
||||
__##func (regs); \
|
||||
__irq_exit_raw(); \
|
||||
} \
|
||||
\
|
||||
__visible noinstr void func(struct pt_regs *regs) \
|
||||
{ \
|
||||
irqentry_state_t state = irqentry_enter(regs); \
|
||||
\
|
||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||
instrumentation_begin(); \
|
||||
__irq_enter_raw(); \
|
||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||
__##func (regs); \
|
||||
__irq_exit_raw(); \
|
||||
instr_##func (regs); \
|
||||
instrumentation_end(); \
|
||||
irqentry_exit(regs, state); \
|
||||
} \
|
||||
\
|
||||
void fred_##func(struct pt_regs *regs) \
|
||||
{ \
|
||||
instr_##func (regs); \
|
||||
} \
|
||||
\
|
||||
static __always_inline void __##func(struct pt_regs *regs)
|
||||
|
||||
/**
|
||||
|
@ -410,15 +445,18 @@ __visible noinstr void func(struct pt_regs *regs, \
|
|||
/* C-Code mapping */
|
||||
#define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
|
||||
#define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
|
||||
#define DEFINE_FREDENTRY_NMI DEFINE_FREDENTRY_RAW
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST
|
||||
#define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST
|
||||
#define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST
|
||||
#define DEFINE_FREDENTRY_MCE DEFINE_FREDENTRY_RAW
|
||||
|
||||
#define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST
|
||||
#define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST
|
||||
#define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST
|
||||
#define DEFINE_FREDENTRY_DEBUG DEFINE_FREDENTRY_RAW
|
||||
#endif
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
@ -655,23 +693,36 @@ DECLARE_IDTENTRY(RESCHEDULE_VECTOR, sysvec_reschedule_ipi);
|
|||
DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot);
|
||||
DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single);
|
||||
DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function);
|
||||
#else
|
||||
# define fred_sysvec_reschedule_ipi NULL
|
||||
# define fred_sysvec_reboot NULL
|
||||
# define fred_sysvec_call_function_single NULL
|
||||
# define fred_sysvec_call_function NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
# ifdef CONFIG_X86_MCE_THRESHOLD
|
||||
DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold);
|
||||
# else
|
||||
# define fred_sysvec_threshold NULL
|
||||
# endif
|
||||
|
||||
# ifdef CONFIG_X86_MCE_AMD
|
||||
DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR, sysvec_deferred_error);
|
||||
# else
|
||||
# define fred_sysvec_deferred_error NULL
|
||||
# endif
|
||||
|
||||
# ifdef CONFIG_X86_THERMAL_VECTOR
|
||||
DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR, sysvec_thermal);
|
||||
# else
|
||||
# define fred_sysvec_thermal NULL
|
||||
# endif
|
||||
|
||||
# ifdef CONFIG_IRQ_WORK
|
||||
DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
|
||||
# else
|
||||
# define fred_sysvec_irq_work NULL
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
@ -679,12 +730,16 @@ DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
|
|||
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi);
|
||||
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi);
|
||||
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi);
|
||||
#else
|
||||
# define fred_sysvec_kvm_posted_intr_ipi NULL
|
||||
# define fred_sysvec_kvm_posted_intr_wakeup_ipi NULL
|
||||
# define fred_sysvec_kvm_posted_intr_nested_ipi NULL
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
|
||||
DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
|
||||
DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
|
||||
DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACRN_GUEST)
|
||||
|
|
|
@ -1901,3 +1901,14 @@ u64 x86_default_get_root_pointer(void)
|
|||
{
|
||||
return boot_params.acpi_rsdp_addr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return ioremap_cache(phys, size);
|
||||
}
|
||||
|
||||
void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) =
|
||||
x86_acpi_os_ioremap;
|
||||
EXPORT_SYMBOL_GPL(acpi_os_ioremap);
|
||||
#endif
|
||||
|
|
|
@ -474,24 +474,25 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void)
|
|||
{
|
||||
struct sgx_epc_page *page;
|
||||
int nid_of_current = numa_node_id();
|
||||
int nid = nid_of_current;
|
||||
int nid_start, nid;
|
||||
|
||||
if (node_isset(nid_of_current, sgx_numa_mask)) {
|
||||
page = __sgx_alloc_epc_page_from_node(nid_of_current);
|
||||
if (page)
|
||||
return page;
|
||||
}
|
||||
|
||||
/* Fall back to the non-local NUMA nodes: */
|
||||
while (true) {
|
||||
nid = next_node_in(nid, sgx_numa_mask);
|
||||
if (nid == nid_of_current)
|
||||
break;
|
||||
/*
|
||||
* Try local node first. If it doesn't have an EPC section,
|
||||
* fall back to the non-local NUMA nodes.
|
||||
*/
|
||||
if (node_isset(nid_of_current, sgx_numa_mask))
|
||||
nid_start = nid_of_current;
|
||||
else
|
||||
nid_start = next_node_in(nid_of_current, sgx_numa_mask);
|
||||
|
||||
nid = nid_start;
|
||||
do {
|
||||
page = __sgx_alloc_epc_page_from_node(nid);
|
||||
if (page)
|
||||
return page;
|
||||
}
|
||||
|
||||
nid = next_node_in(nid, sgx_numa_mask);
|
||||
} while (nid != nid_start);
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/serial_8250.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/acpi.h>
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/range.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/pci-direct.h>
|
||||
#include <linux/sort.h>
|
||||
|
|
|
@ -750,6 +750,27 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
|
|||
|
||||
#define LAM_U57_BITS 6
|
||||
|
||||
static void enable_lam_func(void *__mm)
|
||||
{
|
||||
struct mm_struct *mm = __mm;
|
||||
|
||||
if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) {
|
||||
write_cr3(__read_cr3() | mm->context.lam_cr3_mask);
|
||||
set_tlbstate_lam_mode(mm);
|
||||
}
|
||||
}
|
||||
|
||||
static void mm_enable_lam(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* Even though the process must still be single-threaded at this
|
||||
* point, kernel threads may be using the mm. IPI those kernel
|
||||
* threads if they exist.
|
||||
*/
|
||||
on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true);
|
||||
set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
|
||||
}
|
||||
|
||||
static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_LAM))
|
||||
|
@ -766,6 +787,10 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
|
|||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
|
||||
/*
|
||||
* MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from
|
||||
* being enabled unless the process is single threaded:
|
||||
*/
|
||||
if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) {
|
||||
mmap_write_unlock(mm);
|
||||
return -EBUSY;
|
||||
|
@ -782,9 +807,7 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_cr3(__read_cr3() | mm->context.lam_cr3_mask);
|
||||
set_tlbstate_lam_mode(mm);
|
||||
set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
|
||||
mm_enable_lam(mm);
|
||||
|
||||
mmap_write_unlock(mm);
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@
|
|||
#include <linux/stackprotector.h>
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/mc146818rtc.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/cacheinfo.h>
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/ioport.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/bios_ebda.h>
|
||||
|
|
|
@ -2441,6 +2441,29 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
|
||||
|
||||
#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13))
|
||||
|
||||
int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
|
||||
{
|
||||
if (data & X2APIC_ICR_RESERVED_BITS)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but
|
||||
* only AMD requires it to be zero, Intel essentially just ignores the
|
||||
* bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled,
|
||||
* the CPU performs the reserved bits checks, i.e. the underlying CPU
|
||||
* behavior will "win". Arbitrarily clear the BUSY bit, as there is no
|
||||
* sane way to provide consistent behavior with respect to hardware.
|
||||
*/
|
||||
data &= ~APIC_ICR_BUSY;
|
||||
|
||||
kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
|
||||
kvm_lapic_set_reg64(apic, APIC_ICR, data);
|
||||
trace_kvm_apic_write(APIC_ICR, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* emulate APIC access in a trap manner */
|
||||
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
|
||||
{
|
||||
|
@ -2458,7 +2481,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
|
|||
* maybe-unecessary write, and both are in the noise anyways.
|
||||
*/
|
||||
if (apic_x2apic_mode(apic) && offset == APIC_ICR)
|
||||
kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
|
||||
WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR)));
|
||||
else
|
||||
kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
|
||||
}
|
||||
|
@ -3161,16 +3184,6 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
|
||||
{
|
||||
data &= ~APIC_ICR_BUSY;
|
||||
|
||||
kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
|
||||
kvm_lapic_set_reg64(apic, APIC_ICR, data);
|
||||
trace_kvm_apic_write(APIC_ICR, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
|
||||
{
|
||||
u32 low;
|
||||
|
|
|
@ -497,9 +497,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
{
|
||||
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
|
||||
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
|
||||
unsigned long new_lam = mm_lam_cr3_mask(next);
|
||||
bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy);
|
||||
unsigned cpu = smp_processor_id();
|
||||
unsigned long new_lam;
|
||||
u64 next_tlb_gen;
|
||||
bool need_flush;
|
||||
u16 new_asid;
|
||||
|
@ -622,9 +622,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
|
||||
}
|
||||
|
||||
/*
|
||||
* Start remote flushes and then read tlb_gen.
|
||||
*/
|
||||
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
|
||||
if (next != &init_mm)
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
|
||||
|
@ -636,6 +634,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
barrier();
|
||||
}
|
||||
|
||||
new_lam = mm_lam_cr3_mask(next);
|
||||
set_tlbstate_lam_mode(next);
|
||||
if (need_flush) {
|
||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
||||
|
|
|
@ -980,7 +980,7 @@ static void amd_rp_pme_suspend(struct pci_dev *dev)
|
|||
return;
|
||||
|
||||
rp = pcie_find_root_port(dev);
|
||||
if (!rp->pm_cap)
|
||||
if (!rp || !rp->pm_cap)
|
||||
return;
|
||||
|
||||
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
|
||||
|
@ -994,7 +994,7 @@ static void amd_rp_pme_resume(struct pci_dev *dev)
|
|||
u16 pmc;
|
||||
|
||||
rp = pcie_find_root_port(dev);
|
||||
if (!rp->pm_cap)
|
||||
if (!rp || !rp->pm_cap)
|
||||
return;
|
||||
|
||||
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
|
||||
|
|
|
@ -2019,10 +2019,7 @@ void __init xen_reserve_special_pages(void)
|
|||
|
||||
void __init xen_pt_check_e820(void)
|
||||
{
|
||||
if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
|
||||
xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
|
||||
BUG();
|
||||
}
|
||||
xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table");
|
||||
}
|
||||
|
||||
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
|
||||
|
|
|
@ -70,6 +70,7 @@
|
|||
#include <linux/memblock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <asm/setup.h>
|
||||
|
@ -80,6 +81,7 @@
|
|||
#include <asm/xen/hypervisor.h>
|
||||
#include <xen/balloon.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/hvc-console.h>
|
||||
|
||||
#include "multicalls.h"
|
||||
#include "xen-ops.h"
|
||||
|
@ -794,6 +796,102 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Remapped non-RAM areas */
|
||||
#define NR_NONRAM_REMAP 4
|
||||
static struct nonram_remap {
|
||||
phys_addr_t maddr;
|
||||
phys_addr_t paddr;
|
||||
size_t size;
|
||||
} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init;
|
||||
static unsigned int nr_nonram_remap __ro_after_init;
|
||||
|
||||
/*
|
||||
* Do the real remapping of non-RAM regions as specified in the
|
||||
* xen_nonram_remap[] array.
|
||||
* In case of an error just crash the system.
|
||||
*/
|
||||
void __init xen_do_remap_nonram(void)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int remapped = 0;
|
||||
const struct nonram_remap *remap = xen_nonram_remap;
|
||||
unsigned long pfn, mfn, end_pfn;
|
||||
|
||||
for (i = 0; i < nr_nonram_remap; i++) {
|
||||
end_pfn = PFN_UP(remap->paddr + remap->size);
|
||||
pfn = PFN_DOWN(remap->paddr);
|
||||
mfn = PFN_DOWN(remap->maddr);
|
||||
while (pfn < end_pfn) {
|
||||
if (!set_phys_to_machine(pfn, mfn))
|
||||
panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n",
|
||||
pfn, mfn);
|
||||
|
||||
pfn++;
|
||||
mfn++;
|
||||
remapped++;
|
||||
}
|
||||
|
||||
remap++;
|
||||
}
|
||||
|
||||
pr_info("Remapped %u non-RAM page(s)\n", remapped);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/*
|
||||
* Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM
|
||||
* regions into account.
|
||||
* Any attempt to map an area crossing a remap boundary will produce a
|
||||
* WARN() splat.
|
||||
* phys is related to remap->maddr on input and will be rebased to remap->paddr.
|
||||
*/
|
||||
static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys,
|
||||
acpi_size size)
|
||||
{
|
||||
unsigned int i;
|
||||
const struct nonram_remap *remap = xen_nonram_remap;
|
||||
|
||||
for (i = 0; i < nr_nonram_remap; i++) {
|
||||
if (phys + size > remap->maddr &&
|
||||
phys < remap->maddr + remap->size) {
|
||||
WARN_ON(phys < remap->maddr ||
|
||||
phys + size > remap->maddr + remap->size);
|
||||
phys += remap->paddr - remap->maddr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return x86_acpi_os_ioremap(phys, size);
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
/*
|
||||
* Add a new non-RAM remap entry.
|
||||
* In case of no free entry found, just crash the system.
|
||||
*/
|
||||
void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
|
||||
unsigned long size)
|
||||
{
|
||||
BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK));
|
||||
|
||||
if (nr_nonram_remap == NR_NONRAM_REMAP) {
|
||||
xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/* Switch to the Xen acpi_os_ioremap() variant. */
|
||||
if (nr_nonram_remap == 0)
|
||||
acpi_os_ioremap = xen_acpi_os_ioremap;
|
||||
#endif
|
||||
|
||||
xen_nonram_remap[nr_nonram_remap].maddr = maddr;
|
||||
xen_nonram_remap[nr_nonram_remap].paddr = paddr;
|
||||
xen_nonram_remap[nr_nonram_remap].size = size;
|
||||
|
||||
nr_nonram_remap++;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
#include "debugfs.h"
|
||||
|
|
|
@ -15,12 +15,12 @@
|
|||
#include <linux/cpuidle.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/memory_hotplug.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/elf.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/numa.h>
|
||||
#include <asm/idtentry.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
@ -47,6 +47,9 @@ bool xen_pv_pci_possible;
|
|||
/* E820 map used during setting up memory. */
|
||||
static struct e820_table xen_e820_table __initdata;
|
||||
|
||||
/* Number of initially usable memory pages. */
|
||||
static unsigned long ini_nr_pages __initdata;
|
||||
|
||||
/*
|
||||
* Buffer used to remap identity mapped pages. We only need the virtual space.
|
||||
* The physical page behind this address is remapped as needed to different
|
||||
|
@ -213,7 +216,7 @@ static int __init xen_free_mfn(unsigned long mfn)
|
|||
* as a fallback if the remapping fails.
|
||||
*/
|
||||
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
|
||||
unsigned long end_pfn, unsigned long nr_pages)
|
||||
unsigned long end_pfn)
|
||||
{
|
||||
unsigned long pfn, end;
|
||||
int ret;
|
||||
|
@ -221,7 +224,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
|
|||
WARN_ON(start_pfn > end_pfn);
|
||||
|
||||
/* Release pages first. */
|
||||
end = min(end_pfn, nr_pages);
|
||||
end = min(end_pfn, ini_nr_pages);
|
||||
for (pfn = start_pfn; pfn < end; pfn++) {
|
||||
unsigned long mfn = pfn_to_mfn(pfn);
|
||||
|
||||
|
@ -342,15 +345,14 @@ static void __init xen_do_set_identity_and_remap_chunk(
|
|||
* to Xen and not remapped.
|
||||
*/
|
||||
static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
|
||||
unsigned long remap_pfn)
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
unsigned long i = 0;
|
||||
unsigned long n = end_pfn - start_pfn;
|
||||
|
||||
if (remap_pfn == 0)
|
||||
remap_pfn = nr_pages;
|
||||
remap_pfn = ini_nr_pages;
|
||||
|
||||
while (i < n) {
|
||||
unsigned long cur_pfn = start_pfn + i;
|
||||
|
@ -359,19 +361,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
|||
unsigned long remap_range_size;
|
||||
|
||||
/* Do not remap pages beyond the current allocation */
|
||||
if (cur_pfn >= nr_pages) {
|
||||
if (cur_pfn >= ini_nr_pages) {
|
||||
/* Identity map remaining pages */
|
||||
set_phys_range_identity(cur_pfn, cur_pfn + size);
|
||||
break;
|
||||
}
|
||||
if (cur_pfn + size > nr_pages)
|
||||
size = nr_pages - cur_pfn;
|
||||
if (cur_pfn + size > ini_nr_pages)
|
||||
size = ini_nr_pages - cur_pfn;
|
||||
|
||||
remap_range_size = xen_find_pfn_range(&remap_pfn);
|
||||
if (!remap_range_size) {
|
||||
pr_warn("Unable to find available pfn range, not remapping identity pages\n");
|
||||
xen_set_identity_and_release_chunk(cur_pfn,
|
||||
cur_pfn + left, nr_pages);
|
||||
cur_pfn + left);
|
||||
break;
|
||||
}
|
||||
/* Adjust size to fit in current e820 RAM region */
|
||||
|
@ -398,18 +400,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
|||
}
|
||||
|
||||
static unsigned long __init xen_count_remap_pages(
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
|
||||
unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned long remap_pages)
|
||||
{
|
||||
if (start_pfn >= nr_pages)
|
||||
if (start_pfn >= ini_nr_pages)
|
||||
return remap_pages;
|
||||
|
||||
return remap_pages + min(end_pfn, nr_pages) - start_pfn;
|
||||
return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
|
||||
}
|
||||
|
||||
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
|
||||
static unsigned long __init xen_foreach_remap_area(
|
||||
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned long nr_pages, unsigned long last_val))
|
||||
unsigned long last_val))
|
||||
{
|
||||
phys_addr_t start = 0;
|
||||
unsigned long ret_val = 0;
|
||||
|
@ -437,8 +439,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
|
|||
end_pfn = PFN_UP(entry->addr);
|
||||
|
||||
if (start_pfn < end_pfn)
|
||||
ret_val = func(start_pfn, end_pfn, nr_pages,
|
||||
ret_val);
|
||||
ret_val = func(start_pfn, end_pfn, ret_val);
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
|
@ -495,6 +496,8 @@ void __init xen_remap_memory(void)
|
|||
set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
|
||||
|
||||
pr_info("Remapped %ld page(s)\n", remapped);
|
||||
|
||||
xen_do_remap_nonram();
|
||||
}
|
||||
|
||||
static unsigned long __init xen_get_pages_limit(void)
|
||||
|
@ -568,7 +571,7 @@ static void __init xen_ignore_unusable(void)
|
|||
}
|
||||
}
|
||||
|
||||
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
|
||||
static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
|
||||
{
|
||||
struct e820_entry *entry;
|
||||
unsigned mapcnt;
|
||||
|
@ -625,6 +628,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
|
||||
* Note that the E820 map is modified accordingly, but the P2M map isn't yet.
|
||||
* The adaption of the P2M must be deferred until page allocation is possible.
|
||||
*/
|
||||
static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
|
||||
{
|
||||
struct e820_entry *entry;
|
||||
unsigned int mapcnt;
|
||||
phys_addr_t mem_end = PFN_PHYS(ini_nr_pages);
|
||||
phys_addr_t swap_addr, swap_size, entry_end;
|
||||
|
||||
swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr);
|
||||
swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size);
|
||||
entry = xen_e820_table.entries;
|
||||
|
||||
for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
|
||||
entry_end = entry->addr + entry->size;
|
||||
if (entry->type == E820_TYPE_RAM && entry->size >= swap_size &&
|
||||
entry_end - swap_size >= mem_end) {
|
||||
/* Reduce RAM entry by needed space (whole pages). */
|
||||
entry->size -= swap_size;
|
||||
|
||||
/* Add new entry at the end of E820 map. */
|
||||
entry = xen_e820_table.entries +
|
||||
xen_e820_table.nr_entries;
|
||||
xen_e820_table.nr_entries++;
|
||||
|
||||
/* Fill new entry (keep size and page offset). */
|
||||
entry->type = swap_entry->type;
|
||||
entry->addr = entry_end - swap_size +
|
||||
swap_addr - swap_entry->addr;
|
||||
entry->size = swap_entry->size;
|
||||
|
||||
/* Convert old entry to RAM, align to pages. */
|
||||
swap_entry->type = E820_TYPE_RAM;
|
||||
swap_entry->addr = swap_addr;
|
||||
swap_entry->size = swap_size;
|
||||
|
||||
/* Remember PFN<->MFN relation for P2M update. */
|
||||
xen_add_remap_nonram(swap_addr, entry_end - swap_size,
|
||||
swap_size);
|
||||
|
||||
/* Order E820 table and merge entries. */
|
||||
e820__update_table(&xen_e820_table);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
entry++;
|
||||
}
|
||||
|
||||
xen_raw_console_write("No suitable area found for required E820 entry remapping action\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Look for non-RAM memory types in a specific guest physical area and move
|
||||
* those away if possible (ACPI NVS only for now).
|
||||
*/
|
||||
static void __init xen_e820_resolve_conflicts(phys_addr_t start,
|
||||
phys_addr_t size)
|
||||
{
|
||||
struct e820_entry *entry;
|
||||
unsigned int mapcnt;
|
||||
phys_addr_t end;
|
||||
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
end = start + size;
|
||||
entry = xen_e820_table.entries;
|
||||
|
||||
for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
|
||||
if (entry->addr >= end)
|
||||
return;
|
||||
|
||||
if (entry->addr + entry->size > start &&
|
||||
entry->type == E820_TYPE_NVS)
|
||||
xen_e820_swap_entry_with_ram(entry);
|
||||
|
||||
entry++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for an area in physical memory to be usable for non-movable purposes.
|
||||
* An area is considered to usable if the used E820 map lists it to be RAM or
|
||||
* some other type which can be moved to higher PFNs while keeping the MFNs.
|
||||
* In case the area is not usable, crash the system with an error message.
|
||||
*/
|
||||
void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
|
||||
const char *component)
|
||||
{
|
||||
xen_e820_resolve_conflicts(start, size);
|
||||
|
||||
if (!xen_is_e820_reserved(start, size))
|
||||
return;
|
||||
|
||||
xen_raw_console_write("Xen hypervisor allocated ");
|
||||
xen_raw_console_write(component);
|
||||
xen_raw_console_write(" memory conflicts with E820 map\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Like memcpy, but with physical addresses for dest and src.
|
||||
*/
|
||||
|
@ -684,20 +792,20 @@ static void __init xen_reserve_xen_mfnlist(void)
|
|||
**/
|
||||
char * __init xen_memory_setup(void)
|
||||
{
|
||||
unsigned long max_pfn, pfn_s, n_pfns;
|
||||
unsigned long pfn_s, n_pfns;
|
||||
phys_addr_t mem_end, addr, size, chunk_size;
|
||||
u32 type;
|
||||
int rc;
|
||||
struct xen_memory_map memmap;
|
||||
unsigned long max_pages;
|
||||
unsigned long extra_pages = 0;
|
||||
unsigned long maxmem_pages;
|
||||
int i;
|
||||
int op;
|
||||
|
||||
xen_parse_512gb();
|
||||
max_pfn = xen_get_pages_limit();
|
||||
max_pfn = min(max_pfn, xen_start_info->nr_pages);
|
||||
mem_end = PFN_PHYS(max_pfn);
|
||||
ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
|
||||
mem_end = PFN_PHYS(ini_nr_pages);
|
||||
|
||||
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
|
||||
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
|
||||
|
@ -747,13 +855,35 @@ char * __init xen_memory_setup(void)
|
|||
/* Make sure the Xen-supplied memory map is well-ordered. */
|
||||
e820__update_table(&xen_e820_table);
|
||||
|
||||
/*
|
||||
* Check whether the kernel itself conflicts with the target E820 map.
|
||||
* Failing now is better than running into weird problems later due
|
||||
* to relocating (and even reusing) pages with kernel text or data.
|
||||
*/
|
||||
xen_chk_is_e820_usable(__pa_symbol(_text),
|
||||
__pa_symbol(_end) - __pa_symbol(_text),
|
||||
"kernel");
|
||||
|
||||
/*
|
||||
* Check for a conflict of the xen_start_info memory with the target
|
||||
* E820 map.
|
||||
*/
|
||||
xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info),
|
||||
"xen_start_info");
|
||||
|
||||
/*
|
||||
* Check for a conflict of the hypervisor supplied page tables with
|
||||
* the target E820 map.
|
||||
*/
|
||||
xen_pt_check_e820();
|
||||
|
||||
max_pages = xen_get_max_pages();
|
||||
|
||||
/* How many extra pages do we need due to remapping? */
|
||||
max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
|
||||
max_pages += xen_foreach_remap_area(xen_count_remap_pages);
|
||||
|
||||
if (max_pages > max_pfn)
|
||||
extra_pages += max_pages - max_pfn;
|
||||
if (max_pages > ini_nr_pages)
|
||||
extra_pages += max_pages - ini_nr_pages;
|
||||
|
||||
/*
|
||||
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
|
||||
|
@ -762,8 +892,8 @@ char * __init xen_memory_setup(void)
|
|||
* Make sure we have no memory above max_pages, as this area
|
||||
* isn't handled by the p2m management.
|
||||
*/
|
||||
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
|
||||
extra_pages, max_pages - max_pfn);
|
||||
maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
|
||||
extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
|
||||
i = 0;
|
||||
addr = xen_e820_table.entries[0].addr;
|
||||
size = xen_e820_table.entries[0].size;
|
||||
|
@ -819,23 +949,6 @@ char * __init xen_memory_setup(void)
|
|||
|
||||
e820__update_table(e820_table);
|
||||
|
||||
/*
|
||||
* Check whether the kernel itself conflicts with the target E820 map.
|
||||
* Failing now is better than running into weird problems later due
|
||||
* to relocating (and even reusing) pages with kernel text or data.
|
||||
*/
|
||||
if (xen_is_e820_reserved(__pa_symbol(_text),
|
||||
__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
|
||||
xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for a conflict of the hypervisor supplied page tables with
|
||||
* the target E820 map.
|
||||
*/
|
||||
xen_pt_check_e820();
|
||||
|
||||
xen_reserve_xen_mfnlist();
|
||||
|
||||
/* Check for a conflict of the initrd with the target E820 map. */
|
||||
|
@ -863,7 +976,7 @@ char * __init xen_memory_setup(void)
|
|||
* Set identity map on non-RAM pages and prepare remapping the
|
||||
* underlying RAM.
|
||||
*/
|
||||
xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
|
||||
xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
|
||||
|
||||
pr_info("Released %ld page(s)\n", xen_released_pages);
|
||||
|
||||
|
|
|
@ -43,8 +43,12 @@ void xen_mm_unpin_all(void);
|
|||
#ifdef CONFIG_X86_64
|
||||
void __init xen_relocate_p2m(void);
|
||||
#endif
|
||||
void __init xen_do_remap_nonram(void);
|
||||
void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
|
||||
unsigned long size);
|
||||
|
||||
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size);
|
||||
void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
|
||||
const char *component);
|
||||
unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
|
||||
void __init xen_inv_extra_mem(void);
|
||||
void __init xen_remap_memory(void);
|
||||
|
|
|
@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||
struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
|
||||
|
||||
/* if a merge has already been setup, then proceed with that first */
|
||||
if (bfqq->new_bfqq)
|
||||
return bfqq->new_bfqq;
|
||||
new_bfqq = bfqq->new_bfqq;
|
||||
if (new_bfqq) {
|
||||
while (new_bfqq->new_bfqq)
|
||||
new_bfqq = new_bfqq->new_bfqq;
|
||||
return new_bfqq;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check delayed stable merge for rotational or non-queueing
|
||||
|
@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
|||
bfq_put_queue(bfqq);
|
||||
}
|
||||
|
||||
static void
|
||||
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
||||
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
||||
static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd,
|
||||
struct bfq_io_cq *bic,
|
||||
struct bfq_queue *bfqq)
|
||||
{
|
||||
struct bfq_queue *new_bfqq = bfqq->new_bfqq;
|
||||
|
||||
bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
|
||||
(unsigned long)new_bfqq->pid);
|
||||
/* Save weight raising and idle window of the merged queues */
|
||||
|
@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
|||
bfq_reassign_last_bfqq(bfqq, new_bfqq);
|
||||
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
|
||||
return new_bfqq;
|
||||
}
|
||||
|
||||
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
||||
|
@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
|||
* fulfilled, i.e., bic can be redirected to new_bfqq
|
||||
* and bfqq can be put.
|
||||
*/
|
||||
bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
|
||||
new_bfqq);
|
||||
/*
|
||||
* If we get here, bio will be queued into new_queue,
|
||||
* so use new_bfqq to decide whether bio and rq can be
|
||||
* merged.
|
||||
*/
|
||||
bfqq = new_bfqq;
|
||||
while (bfqq != new_bfqq)
|
||||
bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
|
||||
|
||||
/*
|
||||
* Change also bqfd->bio_bfqq, as
|
||||
|
@ -5699,9 +5701,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||
* state before killing it.
|
||||
*/
|
||||
bfqq->bic = bic;
|
||||
bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
|
||||
|
||||
return new_bfqq;
|
||||
return bfq_merge_bfqqs(bfqd, bic, bfqq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6156,6 +6156,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
|
|||
bool waiting, idle_timer_disabled = false;
|
||||
|
||||
if (new_bfqq) {
|
||||
struct bfq_queue *old_bfqq = bfqq;
|
||||
/*
|
||||
* Release the request's reference to the old bfqq
|
||||
* and make sure one is taken to the shared queue.
|
||||
|
@ -6172,18 +6173,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
|
|||
* new_bfqq.
|
||||
*/
|
||||
if (bic_to_bfqq(RQ_BIC(rq), true,
|
||||
bfq_actuator_index(bfqd, rq->bio)) == bfqq)
|
||||
bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
|
||||
bfqq, new_bfqq);
|
||||
bfq_actuator_index(bfqd, rq->bio)) == bfqq) {
|
||||
while (bfqq != new_bfqq)
|
||||
bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
|
||||
}
|
||||
|
||||
bfq_clear_bfqq_just_created(bfqq);
|
||||
bfq_clear_bfqq_just_created(old_bfqq);
|
||||
/*
|
||||
* rq is about to be enqueued into new_bfqq,
|
||||
* release rq reference on bfqq
|
||||
*/
|
||||
bfq_put_queue(bfqq);
|
||||
bfq_put_queue(old_bfqq);
|
||||
rq->elv.priv[1] = new_bfqq;
|
||||
bfqq = new_bfqq;
|
||||
}
|
||||
|
||||
bfq_update_io_thinktime(bfqd, bfqq);
|
||||
|
@ -6721,7 +6722,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
|
|||
{
|
||||
bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
|
||||
|
||||
if (bfqq_process_refs(bfqq) == 1) {
|
||||
if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) {
|
||||
bfqq->pid = current->pid;
|
||||
bfq_clear_bfqq_coop(bfqq);
|
||||
bfq_clear_bfqq_split_coop(bfqq);
|
||||
|
@ -6819,6 +6820,31 @@ static void bfq_prepare_request(struct request *rq)
|
|||
rq->elv.priv[0] = rq->elv.priv[1] = NULL;
|
||||
}
|
||||
|
||||
static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
|
||||
{
|
||||
struct bfq_queue *new_bfqq = bfqq->new_bfqq;
|
||||
struct bfq_queue *waker_bfqq = bfqq->waker_bfqq;
|
||||
|
||||
if (!waker_bfqq)
|
||||
return NULL;
|
||||
|
||||
while (new_bfqq) {
|
||||
if (new_bfqq == waker_bfqq) {
|
||||
/*
|
||||
* If waker_bfqq is in the merge chain, and current
|
||||
* is the only procress.
|
||||
*/
|
||||
if (bfqq_process_refs(waker_bfqq) == 1)
|
||||
return NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
new_bfqq = new_bfqq->new_bfqq;
|
||||
}
|
||||
|
||||
return waker_bfqq;
|
||||
}
|
||||
|
||||
/*
|
||||
* If needed, init rq, allocate bfq data structures associated with
|
||||
* rq, and increment reference counters in the destination bfq_queue
|
||||
|
@ -6880,7 +6906,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
|
|||
/* If the queue was seeky for too long, break it apart. */
|
||||
if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
|
||||
!bic->bfqq_data[a_idx].stably_merged) {
|
||||
struct bfq_queue *old_bfqq = bfqq;
|
||||
struct bfq_queue *waker_bfqq = bfq_waker_bfqq(bfqq);
|
||||
|
||||
/* Update bic before losing reference to bfqq */
|
||||
if (bfq_bfqq_in_large_burst(bfqq))
|
||||
|
@ -6900,7 +6926,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
|
|||
bfqq_already_existing = true;
|
||||
|
||||
if (!bfqq_already_existing) {
|
||||
bfqq->waker_bfqq = old_bfqq->waker_bfqq;
|
||||
bfqq->waker_bfqq = waker_bfqq;
|
||||
bfqq->tentative_waker_bfqq = NULL;
|
||||
|
||||
/*
|
||||
|
@ -6910,7 +6936,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
|
|||
* woken_list of the waker. See
|
||||
* bfq_check_waker for details.
|
||||
*/
|
||||
if (bfqq->waker_bfqq)
|
||||
if (waker_bfqq)
|
||||
hlist_add_head(&bfqq->woken_list_node,
|
||||
&bfqq->waker_bfqq->woken_list);
|
||||
}
|
||||
|
@ -6932,7 +6958,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
|
|||
* addition, if the queue has also just been split, we have to
|
||||
* resume its state.
|
||||
*/
|
||||
if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
|
||||
if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq &&
|
||||
bfqq_process_refs(bfqq) == 1) {
|
||||
bfqq->bic = bic;
|
||||
if (split) {
|
||||
/*
|
||||
|
|
|
@ -577,9 +577,11 @@ static bool blk_add_partition(struct gendisk *disk,
|
|||
|
||||
part = add_partition(disk, p, from, size, state->parts[p].flags,
|
||||
&state->parts[p].info);
|
||||
if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
|
||||
printk(KERN_ERR " %s: p%d could not be added: %ld\n",
|
||||
disk->disk_name, p, -PTR_ERR(part));
|
||||
if (IS_ERR(part)) {
|
||||
if (PTR_ERR(part) != -ENXIO) {
|
||||
printk(KERN_ERR " %s: p%d could not be added: %pe\n",
|
||||
disk->disk_name, p, part);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,17 +60,18 @@ struct key *find_asymmetric_key(struct key *keyring,
|
|||
char *req, *p;
|
||||
int len;
|
||||
|
||||
WARN_ON(!id_0 && !id_1 && !id_2);
|
||||
|
||||
if (id_0) {
|
||||
lookup = id_0->data;
|
||||
len = id_0->len;
|
||||
} else if (id_1) {
|
||||
lookup = id_1->data;
|
||||
len = id_1->len;
|
||||
} else {
|
||||
} else if (id_2) {
|
||||
lookup = id_2->data;
|
||||
len = id_2->len;
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* Construct an identifier "id:<keyid>". */
|
||||
|
|
31
crypto/xor.c
31
crypto/xor.c
|
@ -83,33 +83,30 @@ static void __init
|
|||
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
|
||||
{
|
||||
int speed;
|
||||
int i, j;
|
||||
ktime_t min, start, diff;
|
||||
unsigned long reps;
|
||||
ktime_t min, start, t0;
|
||||
|
||||
tmpl->next = template_list;
|
||||
template_list = tmpl;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
min = (ktime_t)S64_MAX;
|
||||
for (i = 0; i < 3; i++) {
|
||||
start = ktime_get();
|
||||
for (j = 0; j < REPS; j++) {
|
||||
mb(); /* prevent loop optimization */
|
||||
tmpl->do_2(BENCH_SIZE, b1, b2);
|
||||
mb();
|
||||
}
|
||||
diff = ktime_sub(ktime_get(), start);
|
||||
if (diff < min)
|
||||
min = diff;
|
||||
}
|
||||
reps = 0;
|
||||
t0 = ktime_get();
|
||||
/* delay start until time has advanced */
|
||||
while ((start = ktime_get()) == t0)
|
||||
cpu_relax();
|
||||
do {
|
||||
mb(); /* prevent loop optimization */
|
||||
tmpl->do_2(BENCH_SIZE, b1, b2);
|
||||
mb();
|
||||
} while (reps++ < REPS || (t0 = ktime_get()) == start);
|
||||
min = ktime_sub(t0, start);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
|
||||
if (!min)
|
||||
min = 1;
|
||||
speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
|
||||
speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
|
||||
tmpl->speed = speed;
|
||||
|
||||
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);
|
||||
|
|
|
@ -167,8 +167,11 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
|
|||
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
|
||||
|
||||
/* Shift and apply the mask for CPC reads/writes */
|
||||
#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
|
||||
#define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \
|
||||
GENMASK(((reg)->bit_width) - 1, 0))
|
||||
#define MASK_VAL_WRITE(reg, prev_val, val) \
|
||||
((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \
|
||||
((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
|
||||
|
||||
static ssize_t show_feedback_ctrs(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
|
@ -852,6 +855,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|||
|
||||
/* Store CPU Logical ID */
|
||||
cpc_ptr->cpu_id = pr->id;
|
||||
spin_lock_init(&cpc_ptr->rmw_lock);
|
||||
|
||||
/* Parse PSD data for this CPU */
|
||||
ret = acpi_get_psd(cpc_ptr, handle);
|
||||
|
@ -1057,7 +1061,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
|
|||
}
|
||||
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
|
||||
*val = MASK_VAL(reg, *val);
|
||||
*val = MASK_VAL_READ(reg, *val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1066,9 +1070,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
|
|||
{
|
||||
int ret_val = 0;
|
||||
int size;
|
||||
u64 prev_val;
|
||||
void __iomem *vaddr = NULL;
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
|
||||
struct cpc_reg *reg = ®_res->cpc_entry.reg;
|
||||
struct cpc_desc *cpc_desc;
|
||||
|
||||
size = GET_BIT_WIDTH(reg);
|
||||
|
||||
|
@ -1101,8 +1107,34 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
|
|||
return acpi_os_write_memory((acpi_physical_address)reg->address,
|
||||
val, size);
|
||||
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
|
||||
val = MASK_VAL(reg, val);
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
cpc_desc = per_cpu(cpc_desc_ptr, cpu);
|
||||
if (!cpc_desc) {
|
||||
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
spin_lock(&cpc_desc->rmw_lock);
|
||||
switch (size) {
|
||||
case 8:
|
||||
prev_val = readb_relaxed(vaddr);
|
||||
break;
|
||||
case 16:
|
||||
prev_val = readw_relaxed(vaddr);
|
||||
break;
|
||||
case 32:
|
||||
prev_val = readl_relaxed(vaddr);
|
||||
break;
|
||||
case 64:
|
||||
prev_val = readq_relaxed(vaddr);
|
||||
break;
|
||||
default:
|
||||
spin_unlock(&cpc_desc->rmw_lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
val = MASK_VAL_WRITE(reg, prev_val, val);
|
||||
val |= prev_val;
|
||||
}
|
||||
|
||||
switch (size) {
|
||||
case 8:
|
||||
|
@ -1129,6 +1161,9 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
|
|||
break;
|
||||
}
|
||||
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
|
||||
spin_unlock(&cpc_desc->rmw_lock);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
|
|
@ -544,8 +544,9 @@ int acpi_device_setup_files(struct acpi_device *dev)
|
|||
* If device has _STR, 'description' file is created
|
||||
*/
|
||||
if (acpi_has_method(dev->handle, "_STR")) {
|
||||
status = acpi_evaluate_object(dev->handle, "_STR",
|
||||
NULL, &buffer);
|
||||
status = acpi_evaluate_object_typed(dev->handle, "_STR",
|
||||
NULL, &buffer,
|
||||
ACPI_TYPE_BUFFER);
|
||||
if (ACPI_FAILURE(status))
|
||||
buffer.pointer = NULL;
|
||||
dev->pnp.str_obj = buffer.pointer;
|
||||
|
|
|
@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
|
|||
struct tps68470_pmic_opregion *opregion;
|
||||
acpi_status status;
|
||||
|
||||
if (!dev || !tps68470_regmap) {
|
||||
dev_warn(dev, "dev or regmap is NULL\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!tps68470_regmap)
|
||||
return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
|
||||
|
||||
if (!handle) {
|
||||
dev_warn(dev, "acpi handle is NULL\n");
|
||||
|
|
|
@ -508,6 +508,12 @@ static const struct dmi_system_id maingear_laptop[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
|
||||
.matches = {
|
||||
|
|
|
@ -618,6 +618,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
|
|||
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
/*
|
||||
* If the scmd was added to EH, via ata_qc_schedule_eh() ->
|
||||
* scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will
|
||||
* have set DID_TIME_OUT (since libata does not have an abort
|
||||
* handler). Thus, to clear DID_TIME_OUT, clear the host byte.
|
||||
*/
|
||||
set_host_byte(scmd, DID_OK);
|
||||
|
||||
ata_qc_for_each_raw(ap, qc, i) {
|
||||
if (qc->flags & ATA_QCFLAG_ACTIVE &&
|
||||
qc->scsicmd == scmd)
|
||||
|
|
|
@ -1725,9 +1725,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
|
|||
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
|
||||
} else if (is_error && !have_sense) {
|
||||
ata_gen_ata_sense(qc);
|
||||
} else {
|
||||
/* Keep the SCSI ML and status byte, clear host byte. */
|
||||
cmd->result &= 0x0000ffff;
|
||||
}
|
||||
|
||||
ata_qc_done(qc);
|
||||
|
@ -2393,7 +2390,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
|
|||
case ALL_SUB_MPAGES:
|
||||
n = ata_msense_control_spg0(dev, buf, changeable);
|
||||
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
|
||||
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
|
||||
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE);
|
||||
n += ata_msense_control_ata_feature(dev, buf + n);
|
||||
return n;
|
||||
default:
|
||||
|
|
|
@ -4550,9 +4550,11 @@ EXPORT_SYMBOL_GPL(device_destroy);
|
|||
*/
|
||||
int device_rename(struct device *dev, const char *new_name)
|
||||
{
|
||||
struct subsys_private *sp = NULL;
|
||||
struct kobject *kobj = &dev->kobj;
|
||||
char *old_device_name = NULL;
|
||||
int error;
|
||||
bool is_link_renamed = false;
|
||||
|
||||
dev = get_device(dev);
|
||||
if (!dev)
|
||||
|
@ -4567,7 +4569,7 @@ int device_rename(struct device *dev, const char *new_name)
|
|||
}
|
||||
|
||||
if (dev->class) {
|
||||
struct subsys_private *sp = class_to_subsys(dev->class);
|
||||
sp = class_to_subsys(dev->class);
|
||||
|
||||
if (!sp) {
|
||||
error = -EINVAL;
|
||||
|
@ -4576,16 +4578,19 @@ int device_rename(struct device *dev, const char *new_name)
|
|||
|
||||
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
|
||||
new_name, kobject_namespace(kobj));
|
||||
subsys_put(sp);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
is_link_renamed = true;
|
||||
}
|
||||
|
||||
error = kobject_rename(kobj, new_name);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
if (error && is_link_renamed)
|
||||
sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name,
|
||||
old_device_name, kobject_namespace(kobj));
|
||||
subsys_put(sp);
|
||||
|
||||
put_device(dev);
|
||||
|
||||
kfree(old_device_name);
|
||||
|
|
|
@ -844,6 +844,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
|
|||
{}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Reject firmware file names with ".." path components.
|
||||
* There are drivers that construct firmware file names from device-supplied
|
||||
* strings, and we don't want some device to be able to tell us "I would like to
|
||||
* be sent my firmware from ../../../etc/shadow, please".
|
||||
*
|
||||
* Search for ".." surrounded by either '/' or start/end of string.
|
||||
*
|
||||
* This intentionally only looks at the firmware name, not at the firmware base
|
||||
* directory or at symlink contents.
|
||||
*/
|
||||
static bool name_contains_dotdot(const char *name)
|
||||
{
|
||||
size_t name_len = strlen(name);
|
||||
|
||||
return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
|
||||
strstr(name, "/../") != NULL ||
|
||||
(name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
|
||||
}
|
||||
|
||||
/* called from request_firmware() and request_firmware_work_func() */
|
||||
static int
|
||||
_request_firmware(const struct firmware **firmware_p, const char *name,
|
||||
|
@ -864,6 +884,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (name_contains_dotdot(name)) {
|
||||
dev_warn(device,
|
||||
"Firmware load for '%s' refused, path contains '..' component\n",
|
||||
name);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = _request_firmware_prepare(&fw, name, device, buf, size,
|
||||
offset, opt_flags);
|
||||
if (ret <= 0) /* error or already assigned */
|
||||
|
@ -941,6 +969,8 @@ out:
|
|||
* @name will be used as $FIRMWARE in the uevent environment and
|
||||
* should be distinctive enough not to be confused with any other
|
||||
* firmware image for this or any other device.
|
||||
* It must not contain any ".." path components - "foo/bar..bin" is
|
||||
* allowed, but "foo/../bar.bin" is not.
|
||||
*
|
||||
* Caller must hold the reference count of @device.
|
||||
*
|
||||
|
|
|
@ -66,27 +66,31 @@ int module_add_driver(struct module *mod, struct device_driver *drv)
|
|||
driver_name = make_driver_name(drv);
|
||||
if (!driver_name) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
goto out_remove_kobj;
|
||||
}
|
||||
|
||||
module_create_drivers_dir(mk);
|
||||
if (!mk->drivers_dir) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
goto out_free_driver_name;
|
||||
}
|
||||
|
||||
ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_remove_drivers_dir;
|
||||
|
||||
kfree(driver_name);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
sysfs_remove_link(&drv->p->kobj, "module");
|
||||
|
||||
out_remove_drivers_dir:
|
||||
sysfs_remove_link(mk->drivers_dir, driver_name);
|
||||
|
||||
out_free_driver_name:
|
||||
kfree(driver_name);
|
||||
|
||||
out_remove_kobj:
|
||||
sysfs_remove_link(&drv->p->kobj, "module");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -3135,7 +3135,7 @@ static int genpd_summary_one(struct seq_file *s,
|
|||
else
|
||||
snprintf(state, sizeof(state), "%s",
|
||||
status_lookup[genpd->status]);
|
||||
seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
|
||||
seq_printf(s, "%-30s %-49s %u", genpd->name, state, genpd->performance_state);
|
||||
|
||||
/*
|
||||
* Modifications on the list require holding locks on both
|
||||
|
|
|
@ -3392,10 +3392,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
|
|||
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
|
||||
{
|
||||
unsigned long flags;
|
||||
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
|
||||
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
|
||||
spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (val == 0) {
|
||||
drbd_uuid_move_history(device);
|
||||
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
|
||||
|
|
|
@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
|
|||
ns.disk == D_OUTDATED)
|
||||
rv = SS_CONNECTED_OUTDATES;
|
||||
|
||||
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
|
||||
else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
|
||||
(nc->verify_alg[0] == 0))
|
||||
rv = SS_NO_VERIFY_ALG;
|
||||
|
||||
|
|
|
@ -181,6 +181,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd)
|
|||
{
|
||||
struct request *req = blk_mq_rq_from_pdu(cmd);
|
||||
|
||||
lockdep_assert_held(&cmd->lock);
|
||||
|
||||
/*
|
||||
* Clear INFLIGHT flag so that this cmd won't be completed in
|
||||
* normal completion path
|
||||
*
|
||||
* INFLIGHT flag will be set when the cmd is queued to nbd next
|
||||
* time.
|
||||
*/
|
||||
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
|
||||
|
||||
if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
|
||||
blk_mq_requeue_request(req, true);
|
||||
}
|
||||
|
@ -462,8 +473,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
|
|||
nbd_mark_nsock_dead(nbd, nsock, 1);
|
||||
mutex_unlock(&nsock->tx_lock);
|
||||
}
|
||||
mutex_unlock(&cmd->lock);
|
||||
nbd_requeue_cmd(cmd);
|
||||
mutex_unlock(&cmd->lock);
|
||||
nbd_config_put(nbd);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
|
|
|
@ -68,9 +68,6 @@ struct ublk_rq_data {
|
|||
struct llist_node node;
|
||||
|
||||
struct kref ref;
|
||||
__u64 sector;
|
||||
__u32 operation;
|
||||
__u32 nr_zones;
|
||||
};
|
||||
|
||||
struct ublk_uring_cmd_pdu {
|
||||
|
@ -215,6 +212,33 @@ static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
|
|||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
||||
struct ublk_zoned_report_desc {
|
||||
__u64 sector;
|
||||
__u32 operation;
|
||||
__u32 nr_zones;
|
||||
};
|
||||
|
||||
static DEFINE_XARRAY(ublk_zoned_report_descs);
|
||||
|
||||
static int ublk_zoned_insert_report_desc(const struct request *req,
|
||||
struct ublk_zoned_report_desc *desc)
|
||||
{
|
||||
return xa_insert(&ublk_zoned_report_descs, (unsigned long)req,
|
||||
desc, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc(
|
||||
const struct request *req)
|
||||
{
|
||||
return xa_erase(&ublk_zoned_report_descs, (unsigned long)req);
|
||||
}
|
||||
|
||||
static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc(
|
||||
const struct request *req)
|
||||
{
|
||||
return xa_load(&ublk_zoned_report_descs, (unsigned long)req);
|
||||
}
|
||||
|
||||
static int ublk_get_nr_zones(const struct ublk_device *ub)
|
||||
{
|
||||
const struct ublk_param_basic *p = &ub->params.basic;
|
||||
|
@ -321,7 +345,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
|
|||
unsigned int zones_in_request =
|
||||
min_t(unsigned int, remaining_zones, max_zones_per_request);
|
||||
struct request *req;
|
||||
struct ublk_rq_data *pdu;
|
||||
struct ublk_zoned_report_desc desc;
|
||||
blk_status_t status;
|
||||
|
||||
memset(buffer, 0, buffer_length);
|
||||
|
@ -332,20 +356,23 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
|
|||
goto out;
|
||||
}
|
||||
|
||||
pdu = blk_mq_rq_to_pdu(req);
|
||||
pdu->operation = UBLK_IO_OP_REPORT_ZONES;
|
||||
pdu->sector = sector;
|
||||
pdu->nr_zones = zones_in_request;
|
||||
desc.operation = UBLK_IO_OP_REPORT_ZONES;
|
||||
desc.sector = sector;
|
||||
desc.nr_zones = zones_in_request;
|
||||
ret = ublk_zoned_insert_report_desc(req, &desc);
|
||||
if (ret)
|
||||
goto free_req;
|
||||
|
||||
ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
|
||||
GFP_KERNEL);
|
||||
if (ret) {
|
||||
blk_mq_free_request(req);
|
||||
goto out;
|
||||
}
|
||||
if (ret)
|
||||
goto erase_desc;
|
||||
|
||||
status = blk_execute_rq(req, 0);
|
||||
ret = blk_status_to_errno(status);
|
||||
erase_desc:
|
||||
ublk_zoned_erase_report_desc(req);
|
||||
free_req:
|
||||
blk_mq_free_request(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -379,7 +406,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
|
|||
{
|
||||
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
|
||||
struct ublk_io *io = &ubq->ios[req->tag];
|
||||
struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req);
|
||||
struct ublk_zoned_report_desc *desc;
|
||||
u32 ublk_op;
|
||||
|
||||
switch (req_op(req)) {
|
||||
|
@ -402,12 +429,15 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
|
|||
ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
|
||||
break;
|
||||
case REQ_OP_DRV_IN:
|
||||
ublk_op = pdu->operation;
|
||||
desc = ublk_zoned_get_report_desc(req);
|
||||
if (!desc)
|
||||
return BLK_STS_IOERR;
|
||||
ublk_op = desc->operation;
|
||||
switch (ublk_op) {
|
||||
case UBLK_IO_OP_REPORT_ZONES:
|
||||
iod->op_flags = ublk_op | ublk_req_build_flags(req);
|
||||
iod->nr_zones = pdu->nr_zones;
|
||||
iod->start_sector = pdu->sector;
|
||||
iod->nr_zones = desc->nr_zones;
|
||||
iod->start_sector = desc->sector;
|
||||
return BLK_STS_OK;
|
||||
default:
|
||||
return BLK_STS_IOERR;
|
||||
|
|
|
@ -1352,7 +1352,10 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
|
|||
if (!urb)
|
||||
return -ENOMEM;
|
||||
|
||||
size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
|
||||
/* Use maximum HCI Event size so the USB stack handles
|
||||
* ZPL/short-transfer automatically.
|
||||
*/
|
||||
size = HCI_MAX_EVENT_SIZE;
|
||||
|
||||
buf = kmalloc(size, mem_flags);
|
||||
if (!buf) {
|
||||
|
|
|
@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev)
|
|||
return -ENODEV;
|
||||
}
|
||||
map = syscon_node_to_regmap(syscon);
|
||||
of_node_put(syscon);
|
||||
if (IS_ERR(map)) {
|
||||
dev_err(dev,
|
||||
"could not find Integrator/AP system controller\n");
|
||||
|
|
|
@ -578,6 +578,15 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
|
|||
.mru_default = 32768,
|
||||
};
|
||||
|
||||
static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
|
||||
.name = "telit-fe990a",
|
||||
.config = &modem_telit_fn990_config,
|
||||
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
|
||||
.dma_data_width = 32,
|
||||
.sideband_wake = false,
|
||||
.mru_default = 32768,
|
||||
};
|
||||
|
||||
/* Keep the list sorted based on the PID. New VID should be added as the last entry */
|
||||
static const struct pci_device_id mhi_pci_id_table[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
|
||||
|
@ -595,9 +604,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
|
|||
/* Telit FN990 */
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
|
||||
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
|
||||
/* Telit FE990 */
|
||||
/* Telit FE990A */
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
|
||||
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
|
||||
.driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
|
||||
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
|
||||
|
|
|
@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng)
|
|||
return ret;
|
||||
|
||||
ret = reset_control_reset(priv->reset);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
clk_disable_unprepare(priv->clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (priv->mask_interrupts) {
|
||||
/* mask the interrupt */
|
||||
|
|
|
@ -624,6 +624,7 @@ static int __maybe_unused cctrng_resume(struct device *dev)
|
|||
/* wait for Cryptocell reset completion */
|
||||
if (!cctrng_wait_for_reset_completion(drvdata)) {
|
||||
dev_err(dev, "Cryptocell reset not completed");
|
||||
clk_disable_unprepare(drvdata->clk);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
|
|||
dev_set_drvdata(&pdev->dev, priv);
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
devm_pm_runtime_enable(&pdev->dev);
|
||||
|
||||
dev_info(&pdev->dev, "registered RNG driver\n");
|
||||
|
||||
|
|
|
@ -47,6 +47,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
|
|||
|
||||
if (!ret)
|
||||
ret = tpm2_commit_space(chip, space, buf, &len);
|
||||
else
|
||||
tpm2_flush_space(chip);
|
||||
|
||||
out_rc:
|
||||
return ret ? ret : len;
|
||||
|
|
|
@ -166,6 +166,9 @@ void tpm2_flush_space(struct tpm_chip *chip)
|
|||
struct tpm_space *space = &chip->work_space;
|
||||
int i;
|
||||
|
||||
if (!space)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
|
||||
if (space->context_tbl[i] && ~space->context_tbl[i])
|
||||
tpm2_flush_context(chip, space->context_tbl[i]);
|
||||
|
|
|
@ -66,6 +66,7 @@ enum pll_component_id {
|
|||
PLL_COMPID_FRAC,
|
||||
PLL_COMPID_DIV0,
|
||||
PLL_COMPID_DIV1,
|
||||
PLL_COMPID_MAX,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -165,7 +166,7 @@ static struct sama7g5_pll {
|
|||
u8 t;
|
||||
u8 eid;
|
||||
u8 safe_div;
|
||||
} sama7g5_plls[][PLL_ID_MAX] = {
|
||||
} sama7g5_plls[][PLL_COMPID_MAX] = {
|
||||
[PLL_ID_CPU] = {
|
||||
[PLL_COMPID_FRAC] = {
|
||||
.n = "cpupll_fracck",
|
||||
|
@ -1038,7 +1039,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
|
|||
sama7g5_pmc->chws[PMC_MAIN] = hw;
|
||||
|
||||
for (i = 0; i < PLL_ID_MAX; i++) {
|
||||
for (j = 0; j < 3; j++) {
|
||||
for (j = 0; j < PLL_COMPID_MAX; j++) {
|
||||
struct clk_hw *parent_hw;
|
||||
|
||||
if (!sama7g5_plls[i][j].n)
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "../clk-fractional-divider.h"
|
||||
#include "clk.h"
|
||||
|
||||
#define PCG_PR_MASK BIT(31)
|
||||
#define PCG_PCS_SHIFT 24
|
||||
#define PCG_PCS_MASK 0x7
|
||||
#define PCG_CGC_SHIFT 30
|
||||
|
@ -78,6 +79,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name,
|
|||
struct clk_hw *hw;
|
||||
u32 val;
|
||||
|
||||
val = readl(reg);
|
||||
if (!(val & PCG_PR_MASK)) {
|
||||
pr_info("PCC PR is 0 for clk:%s, bypass\n", name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mux_present) {
|
||||
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
|
||||
if (!mux)
|
||||
|
|
|
@ -204,6 +204,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = {
|
|||
.determine_rate = imx8m_clk_composite_mux_determine_rate,
|
||||
};
|
||||
|
||||
static int imx8m_clk_composite_gate_enable(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_gate *gate = to_clk_gate(hw);
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(gate->lock, flags);
|
||||
|
||||
val = readl(gate->reg);
|
||||
val |= BIT(gate->bit_idx);
|
||||
writel(val, gate->reg);
|
||||
|
||||
spin_unlock_irqrestore(gate->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx8m_clk_composite_gate_disable(struct clk_hw *hw)
|
||||
{
|
||||
/* composite clk requires the disable hook */
|
||||
}
|
||||
|
||||
static const struct clk_ops imx8m_clk_composite_gate_ops = {
|
||||
.enable = imx8m_clk_composite_gate_enable,
|
||||
.disable = imx8m_clk_composite_gate_disable,
|
||||
.is_enabled = clk_gate_is_enabled,
|
||||
};
|
||||
|
||||
struct clk_hw *__imx8m_clk_hw_composite(const char *name,
|
||||
const char * const *parent_names,
|
||||
int num_parents, void __iomem *reg,
|
||||
|
@ -217,10 +245,11 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
|
|||
struct clk_mux *mux = NULL;
|
||||
const struct clk_ops *divider_ops;
|
||||
const struct clk_ops *mux_ops;
|
||||
const struct clk_ops *gate_ops;
|
||||
|
||||
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
|
||||
if (!mux)
|
||||
goto fail;
|
||||
return ERR_CAST(hw);
|
||||
|
||||
mux_hw = &mux->hw;
|
||||
mux->reg = reg;
|
||||
|
@ -230,7 +259,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
|
|||
|
||||
div = kzalloc(sizeof(*div), GFP_KERNEL);
|
||||
if (!div)
|
||||
goto fail;
|
||||
goto free_mux;
|
||||
|
||||
div_hw = &div->hw;
|
||||
div->reg = reg;
|
||||
|
@ -257,28 +286,32 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
|
|||
div->flags = CLK_DIVIDER_ROUND_CLOSEST;
|
||||
|
||||
/* skip registering the gate ops if M4 is enabled */
|
||||
if (!mcore_booted) {
|
||||
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
|
||||
if (!gate)
|
||||
goto fail;
|
||||
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
|
||||
if (!gate)
|
||||
goto free_div;
|
||||
|
||||
gate_hw = &gate->hw;
|
||||
gate->reg = reg;
|
||||
gate->bit_idx = PCG_CGC_SHIFT;
|
||||
gate->lock = &imx_ccm_lock;
|
||||
}
|
||||
gate_hw = &gate->hw;
|
||||
gate->reg = reg;
|
||||
gate->bit_idx = PCG_CGC_SHIFT;
|
||||
gate->lock = &imx_ccm_lock;
|
||||
if (!mcore_booted)
|
||||
gate_ops = &clk_gate_ops;
|
||||
else
|
||||
gate_ops = &imx8m_clk_composite_gate_ops;
|
||||
|
||||
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
|
||||
mux_hw, mux_ops, div_hw,
|
||||
divider_ops, gate_hw, &clk_gate_ops, flags);
|
||||
divider_ops, gate_hw, gate_ops, flags);
|
||||
if (IS_ERR(hw))
|
||||
goto fail;
|
||||
goto free_gate;
|
||||
|
||||
return hw;
|
||||
|
||||
fail:
|
||||
free_gate:
|
||||
kfree(gate);
|
||||
free_div:
|
||||
kfree(div);
|
||||
free_mux:
|
||||
kfree(mux);
|
||||
return ERR_CAST(hw);
|
||||
}
|
||||
|
|
|
@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
|
|||
|
||||
static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
|
||||
{
|
||||
/*
|
||||
* Skip disable the root clock gate if mcore enabled.
|
||||
* The root clock may be used by the mcore.
|
||||
*/
|
||||
if (mcore_booted)
|
||||
return;
|
||||
|
||||
imx93_clk_composite_gate_endisable(hw, 0);
|
||||
}
|
||||
|
||||
|
@ -222,7 +229,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
|
|||
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
|
||||
mux_hw, &clk_mux_ro_ops, div_hw,
|
||||
&clk_divider_ro_ops, NULL, NULL, flags);
|
||||
} else if (!mcore_booted) {
|
||||
} else {
|
||||
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
|
||||
if (!gate)
|
||||
goto fail;
|
||||
|
@ -238,12 +245,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
|
|||
&imx93_clk_composite_divider_ops, gate_hw,
|
||||
&imx93_clk_composite_gate_ops,
|
||||
flags | CLK_SET_RATE_NO_REPARENT);
|
||||
} else {
|
||||
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
|
||||
mux_hw, &imx93_clk_composite_mux_ops, div_hw,
|
||||
&imx93_clk_composite_divider_ops, NULL,
|
||||
&imx93_clk_composite_gate_ops,
|
||||
flags | CLK_SET_RATE_NO_REPARENT);
|
||||
}
|
||||
|
||||
if (IS_ERR(hw))
|
||||
|
|
|
@ -291,6 +291,10 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw)
|
|||
if (val & POWERUP_MASK)
|
||||
return 0;
|
||||
|
||||
if (pll->flags & CLK_FRACN_GPPLL_FRACN)
|
||||
writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR),
|
||||
pll->base + PLL_NUMERATOR);
|
||||
|
||||
val |= CLKMUX_BYPASS;
|
||||
writel_relaxed(val, pll->base + PLL_CTRL);
|
||||
|
||||
|
|
|
@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
|
|||
|
||||
clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk);
|
||||
|
||||
clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk);
|
||||
clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk);
|
||||
clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk);
|
||||
clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk);
|
||||
|
||||
imx_register_uart_clocks();
|
||||
}
|
||||
|
|
|
@ -146,6 +146,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
|
|||
PDM_SEL, 2, 0 \
|
||||
}
|
||||
|
||||
#define CLK_GATE_PARENT(gname, cname, pname) \
|
||||
{ \
|
||||
gname"_cg", \
|
||||
IMX8MP_CLK_AUDIOMIX_##cname, \
|
||||
{ .fw_name = pname, .name = pname }, NULL, 1, \
|
||||
CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \
|
||||
1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \
|
||||
}
|
||||
|
||||
struct clk_imx8mp_audiomix_sel {
|
||||
const char *name;
|
||||
int clkid;
|
||||
|
@ -163,14 +172,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
|
|||
CLK_GATE("earc", EARC_IPG),
|
||||
CLK_GATE("ocrama", OCRAMA_IPG),
|
||||
CLK_GATE("aud2htx", AUD2HTX_IPG),
|
||||
CLK_GATE("earc_phy", EARC_PHY),
|
||||
CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
|
||||
CLK_GATE("sdma2", SDMA2_ROOT),
|
||||
CLK_GATE("sdma3", SDMA3_ROOT),
|
||||
CLK_GATE("spba2", SPBA2_ROOT),
|
||||
CLK_GATE("dsp", DSP_ROOT),
|
||||
CLK_GATE("dspdbg", DSPDBG_ROOT),
|
||||
CLK_GATE("edma", EDMA_ROOT),
|
||||
CLK_GATE("audpll", AUDPLL_ROOT),
|
||||
CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
|
||||
CLK_GATE("mu2", MU2_ROOT),
|
||||
CLK_GATE("mu3", MU3_ROOT),
|
||||
CLK_PDM,
|
||||
|
|
|
@ -551,8 +551,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
|
|||
|
||||
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
|
||||
|
||||
hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
|
||||
hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
|
||||
hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
|
||||
hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
|
||||
hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
|
||||
hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
|
||||
hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);
|
||||
|
|
|
@ -165,8 +165,8 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
|
|||
imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
|
||||
imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
|
||||
imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
|
||||
imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
|
||||
imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
|
||||
imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
|
||||
|
||||
/* Audio SS */
|
||||
imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
|
||||
|
@ -199,18 +199,18 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
|
|||
imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
|
||||
|
||||
/* Display controller SS */
|
||||
imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
|
||||
imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
|
||||
imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL);
|
||||
imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL);
|
||||
imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS);
|
||||
imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
|
||||
imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
|
||||
imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS);
|
||||
|
||||
imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
|
||||
imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
|
||||
imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL);
|
||||
imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL);
|
||||
imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS);
|
||||
imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
|
||||
imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
|
||||
imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS);
|
||||
|
||||
/* MIPI-LVDS SS */
|
||||
|
|
|
@ -1757,6 +1757,58 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
|
|||
};
|
||||
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
|
||||
|
||||
/**
|
||||
* clk_lucid_5lpe_pll_configure - configure the lucid 5lpe pll
|
||||
*
|
||||
* @pll: clk alpha pll
|
||||
* @regmap: register map
|
||||
* @config: configuration to apply for pll
|
||||
*/
|
||||
void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
|
||||
const struct alpha_pll_config *config)
|
||||
{
|
||||
/*
|
||||
* If the bootloader left the PLL enabled it's likely that there are
|
||||
* RCGs that will lock up if we disable the PLL below.
|
||||
*/
|
||||
if (trion_pll_is_enabled(pll, regmap)) {
|
||||
pr_debug("Lucid 5LPE PLL is already enabled, skipping configuration\n");
|
||||
return;
|
||||
}
|
||||
|
||||
clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
|
||||
regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
|
||||
clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
|
||||
clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
|
||||
config->config_ctl_val);
|
||||
clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
|
||||
config->config_ctl_hi_val);
|
||||
clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll),
|
||||
config->config_ctl_hi1_val);
|
||||
clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
|
||||
config->user_ctl_val);
|
||||
clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll),
|
||||
config->user_ctl_hi_val);
|
||||
clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll),
|
||||
config->user_ctl_hi1_val);
|
||||
clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
|
||||
config->test_ctl_val);
|
||||
clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
|
||||
config->test_ctl_hi_val);
|
||||
clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll),
|
||||
config->test_ctl_hi1_val);
|
||||
|
||||
/* Disable PLL output */
|
||||
regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
|
||||
|
||||
/* Set operation mode to OFF */
|
||||
regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
|
||||
|
||||
/* Place the PLL in STANDBY mode */
|
||||
regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clk_lucid_5lpe_pll_configure);
|
||||
|
||||
static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
|
||||
|
|
|
@ -198,6 +198,8 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
|
|||
|
||||
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
|
||||
const struct alpha_pll_config *config);
|
||||
void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
|
||||
const struct alpha_pll_config *config);
|
||||
void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
|
||||
const struct alpha_pll_config *config);
|
||||
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue