Merge remote-tracking branch 'torvalds/master' into perf/core
Get fixes sent via perf/urgent, etc. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
467cd948f8
1
.mailmap
1
.mailmap
|
@ -45,6 +45,7 @@ Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com>
|
|||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
|
||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
|
||||
Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com>
|
||||
André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com>
|
||||
Andy Adamson <andros@citi.umich.edu>
|
||||
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
|
||||
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
|
||||
|
|
|
@ -55,8 +55,6 @@ allOf:
|
|||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 7
|
||||
maxItems: 7
|
||||
items:
|
||||
- description: 32k osc
|
||||
- description: 25m osc
|
||||
|
@ -66,8 +64,6 @@ allOf:
|
|||
- description: ext3 clock input
|
||||
- description: ext4 clock input
|
||||
clock-names:
|
||||
minItems: 7
|
||||
maxItems: 7
|
||||
items:
|
||||
- const: ckil
|
||||
- const: osc_25m
|
||||
|
|
|
@ -95,7 +95,6 @@ then:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
items:
|
||||
- description: Functional clock
|
||||
- description: EXTAL input clock
|
||||
|
@ -104,7 +103,6 @@ then:
|
|||
|
||||
clock-names:
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
items:
|
||||
- const: fck
|
||||
# The LVDS encoder can use the EXTAL or DU_DOTCLKINx clocks.
|
||||
|
@ -128,12 +126,10 @@ then:
|
|||
else:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 1
|
||||
items:
|
||||
- description: Functional clock
|
||||
|
||||
clock-names:
|
||||
maxItems: 1
|
||||
items:
|
||||
- const: fck
|
||||
|
||||
|
|
|
@ -109,7 +109,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
items:
|
||||
- description: Functional clock
|
||||
- description: DU_DOTCLKIN0 input clock
|
||||
|
@ -117,7 +116,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
items:
|
||||
- const: du.0
|
||||
- pattern: '^dclkin\.[01]$'
|
||||
|
@ -159,7 +157,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: Functional clock for DU1
|
||||
|
@ -168,7 +165,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- const: du.0
|
||||
- const: du.1
|
||||
|
@ -216,7 +212,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: Functional clock for DU1
|
||||
|
@ -225,7 +220,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- const: du.0
|
||||
- const: du.1
|
||||
|
@ -271,7 +265,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: Functional clock for DU1
|
||||
|
@ -280,7 +273,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- const: du.0
|
||||
- const: du.1
|
||||
|
@ -327,7 +319,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: Functional clock for DU1
|
||||
|
@ -336,7 +327,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- const: du.0
|
||||
- const: du.1
|
||||
|
@ -386,7 +376,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 3
|
||||
maxItems: 6
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: Functional clock for DU1
|
||||
|
@ -397,7 +386,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 3
|
||||
maxItems: 6
|
||||
items:
|
||||
- const: du.0
|
||||
- const: du.1
|
||||
|
@ -448,7 +436,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 4
|
||||
maxItems: 8
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: Functional clock for DU1
|
||||
|
@ -461,7 +448,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 4
|
||||
maxItems: 8
|
||||
items:
|
||||
- const: du.0
|
||||
- const: du.1
|
||||
|
@ -525,7 +511,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 3
|
||||
maxItems: 6
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: Functional clock for DU1
|
||||
|
@ -536,7 +521,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 3
|
||||
maxItems: 6
|
||||
items:
|
||||
- const: du.0
|
||||
- const: du.1
|
||||
|
@ -596,7 +580,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 3
|
||||
maxItems: 6
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: Functional clock for DU1
|
||||
|
@ -607,7 +590,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 3
|
||||
maxItems: 6
|
||||
items:
|
||||
- const: du.0
|
||||
- const: du.1
|
||||
|
@ -666,14 +648,12 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: DU_DOTCLKIN0 input clock
|
||||
|
||||
clock-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
items:
|
||||
- const: du.0
|
||||
- const: dclkin.0
|
||||
|
@ -723,7 +703,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- description: Functional clock for DU0
|
||||
- description: Functional clock for DU1
|
||||
|
@ -732,7 +711,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
items:
|
||||
- const: du.0
|
||||
- const: du.1
|
||||
|
@ -791,7 +769,6 @@ allOf:
|
|||
- description: Functional clock
|
||||
|
||||
clock-names:
|
||||
maxItems: 1
|
||||
items:
|
||||
- const: du.0
|
||||
|
||||
|
|
|
@ -58,10 +58,9 @@ patternProperties:
|
|||
description: |
|
||||
The value (two's complement) to be programmed in the channel specific N correction register.
|
||||
For remote channels only.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
items:
|
||||
minimum: 0
|
||||
maximum: 255
|
||||
$ref: /schemas/types.yaml#/definitions/int32
|
||||
minimum: -128
|
||||
maximum: 127
|
||||
|
||||
required:
|
||||
- reg
|
||||
|
|
|
@ -138,7 +138,6 @@ allOf:
|
|||
- const: bus
|
||||
- const: adc
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
interrupts:
|
||||
items:
|
||||
|
@ -170,7 +169,6 @@ allOf:
|
|||
- const: bus
|
||||
- const: adc
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
interrupts:
|
||||
items:
|
||||
|
|
|
@ -43,8 +43,6 @@ patternProperties:
|
|||
- 4 # LED output FLASH1
|
||||
- 5 # LED output FLASH2
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- "#address-cells"
|
||||
|
|
|
@ -202,22 +202,17 @@ allOf:
|
|||
clocks:
|
||||
items:
|
||||
- description: module clock
|
||||
minItems: 1
|
||||
maxItems: 1
|
||||
else:
|
||||
properties:
|
||||
clocks:
|
||||
items:
|
||||
- description: module clock
|
||||
- description: timeout clock
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: sdhci
|
||||
- const: tmclk
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
required:
|
||||
- clock-names
|
||||
|
||||
|
|
|
@ -147,8 +147,6 @@ allOf:
|
|||
- description: SoC gpmi io clock
|
||||
- description: SoC gpmi bch apb clock
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
items:
|
||||
- const: gpmi_io
|
||||
- const: gpmi_bch_apb
|
||||
|
|
|
@ -80,8 +80,6 @@ if:
|
|||
then:
|
||||
properties:
|
||||
interrupts:
|
||||
minItems: 4
|
||||
maxItems: 4
|
||||
items:
|
||||
- description: Error and status IRQ
|
||||
- description: Message object IRQ
|
||||
|
@ -91,7 +89,6 @@ then:
|
|||
else:
|
||||
properties:
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
items:
|
||||
- description: Error and status IRQ
|
||||
|
||||
|
|
|
@ -142,7 +142,6 @@ examples:
|
|||
device_type = "pci";
|
||||
reg = <0x0 0x0 0x0 0x0 0x0>;
|
||||
reset-gpios = <&pinctrl_ap 152 0>;
|
||||
max-link-speed = <2>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
|
@ -153,7 +152,6 @@ examples:
|
|||
device_type = "pci";
|
||||
reg = <0x800 0x0 0x0 0x0 0x0>;
|
||||
reset-gpios = <&pinctrl_ap 153 0>;
|
||||
max-link-speed = <2>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
|
@ -164,7 +162,6 @@ examples:
|
|||
device_type = "pci";
|
||||
reg = <0x1000 0x0 0x0 0x0 0x0>;
|
||||
reset-gpios = <&pinctrl_ap 33 0>;
|
||||
max-link-speed = <1>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
|
|
|
@ -102,19 +102,17 @@ if:
|
|||
then:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 2
|
||||
minItems: 2
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: "phy"
|
||||
- const: "phy-ctrl"
|
||||
minItems: 2
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
reg-names:
|
||||
maxItems: 1
|
||||
items:
|
||||
- const: "phy"
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
|
|
@ -52,11 +52,19 @@ properties:
|
|||
hardware supporting it the pull strength in Ohm.
|
||||
|
||||
drive-push-pull:
|
||||
type: boolean
|
||||
oneOf:
|
||||
- type: boolean
|
||||
- $ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [ 0, 1 ]
|
||||
deprecated: true
|
||||
description: drive actively high and low
|
||||
|
||||
drive-open-drain:
|
||||
type: boolean
|
||||
oneOf:
|
||||
- type: boolean
|
||||
- $ref: /schemas/types.yaml#/definitions/uint32
|
||||
const: 1 # No known cases of 0
|
||||
deprecated: true
|
||||
description: drive with open drain
|
||||
|
||||
drive-open-source:
|
||||
|
|
|
@ -71,7 +71,6 @@ allOf:
|
|||
then:
|
||||
properties:
|
||||
clock-output-names:
|
||||
minItems: 1
|
||||
maxItems: 1
|
||||
|
||||
- if:
|
||||
|
@ -102,7 +101,6 @@ allOf:
|
|||
properties:
|
||||
clock-output-names:
|
||||
minItems: 3
|
||||
maxItems: 3
|
||||
|
||||
- if:
|
||||
properties:
|
||||
|
@ -113,16 +111,12 @@ allOf:
|
|||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 3
|
||||
maxItems: 3
|
||||
items:
|
||||
- description: Bus clock for register access
|
||||
- description: 24 MHz oscillator
|
||||
- description: 32 kHz clock from the CCU
|
||||
|
||||
clock-names:
|
||||
minItems: 3
|
||||
maxItems: 3
|
||||
items:
|
||||
- const: bus
|
||||
- const: hosc
|
||||
|
@ -142,7 +136,6 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 3
|
||||
maxItems: 4
|
||||
items:
|
||||
- description: Bus clock for register access
|
||||
- description: 24 MHz oscillator
|
||||
|
@ -151,7 +144,6 @@ allOf:
|
|||
|
||||
clock-names:
|
||||
minItems: 3
|
||||
maxItems: 4
|
||||
items:
|
||||
- const: bus
|
||||
- const: hosc
|
||||
|
@ -174,14 +166,12 @@ allOf:
|
|||
then:
|
||||
properties:
|
||||
interrupts:
|
||||
minItems: 1
|
||||
maxItems: 1
|
||||
|
||||
else:
|
||||
properties:
|
||||
interrupts:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
|
||||
required:
|
||||
- "#clock-cells"
|
||||
|
|
|
@ -100,7 +100,6 @@ allOf:
|
|||
maxItems: 3
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 3
|
||||
items:
|
||||
- const: uart
|
||||
- pattern: '^clk_uart_baud[0-1]$'
|
||||
|
@ -118,11 +117,8 @@ allOf:
|
|||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
items:
|
||||
- const: uart
|
||||
- const: clk_uart_baud0
|
||||
|
|
|
@ -89,7 +89,6 @@ allOf:
|
|||
properties:
|
||||
dmas:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
items:
|
||||
- description: RX DMA Channel
|
||||
- description: TX DMA Channel
|
||||
|
|
|
@ -80,7 +80,6 @@ allOf:
|
|||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 6
|
||||
items:
|
||||
- description: AUXCLK clock for McASP used by CPB audio
|
||||
- description: Parent for CPB_McASP auxclk (for 48KHz)
|
||||
|
@ -107,7 +106,6 @@ allOf:
|
|||
then:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 4
|
||||
items:
|
||||
- description: AUXCLK clock for McASP used by CPB audio
|
||||
- description: Parent for CPB_McASP auxclk (for 48KHz)
|
||||
|
|
|
@ -67,7 +67,6 @@ then:
|
|||
properties:
|
||||
reg:
|
||||
minItems: 2
|
||||
maxItems: 3
|
||||
items:
|
||||
- description: TSC1 registers
|
||||
- description: TSC2 registers
|
||||
|
|
|
@ -43,6 +43,9 @@ properties:
|
|||
- const: phy_clk
|
||||
- const: ref_clk
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ required:
|
|||
- interrupts
|
||||
- phys
|
||||
- phy-names
|
||||
- reg
|
||||
|
||||
allOf:
|
||||
- if:
|
||||
|
|
|
@ -5986,16 +5986,16 @@ should put the acknowledged interrupt vector into the 'epr' field.
|
|||
#define KVM_SYSTEM_EVENT_RESET 2
|
||||
#define KVM_SYSTEM_EVENT_CRASH 3
|
||||
__u32 type;
|
||||
__u64 flags;
|
||||
__u32 ndata;
|
||||
__u64 data[16];
|
||||
} system_event;
|
||||
|
||||
If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered
|
||||
a system-level event using some architecture specific mechanism (hypercall
|
||||
or some special instruction). In case of ARM64, this is triggered using
|
||||
HVC instruction based PSCI call from the vcpu. The 'type' field describes
|
||||
the system-level event type. The 'flags' field describes architecture
|
||||
specific flags for the system-level event.
|
||||
HVC instruction based PSCI call from the vcpu.
|
||||
|
||||
The 'type' field describes the system-level event type.
|
||||
Valid values for 'type' are:
|
||||
|
||||
- KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the
|
||||
|
@ -6010,10 +6010,20 @@ Valid values for 'type' are:
|
|||
to ignore the request, or to gather VM memory core dump and/or
|
||||
reset/shutdown of the VM.
|
||||
|
||||
Valid flags are:
|
||||
If KVM_CAP_SYSTEM_EVENT_DATA is present, the 'data' field can contain
|
||||
architecture specific information for the system-level event. Only
|
||||
the first `ndata` items (possibly zero) of the data array are valid.
|
||||
|
||||
- KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (arm64 only) -- the guest issued
|
||||
a SYSTEM_RESET2 call according to v1.1 of the PSCI specification.
|
||||
- for arm64, data[0] is set to KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 if
|
||||
the guest issued a SYSTEM_RESET2 call according to v1.1 of the PSCI
|
||||
specification.
|
||||
|
||||
- for RISC-V, data[0] is set to the value of the second argument of the
|
||||
``sbi_system_reset`` call.
|
||||
|
||||
Previous versions of Linux defined a `flags` member in this struct. The
|
||||
field is now aliased to `data[0]`. Userspace can assume that it is only
|
||||
written if ndata is greater than 0.
|
||||
|
||||
::
|
||||
|
||||
|
|
29
MAINTAINERS
29
MAINTAINERS
|
@ -5917,7 +5917,7 @@ R: Benjamin Gaignard <benjamin.gaignard@collabora.com>
|
|||
R: Liam Mark <lmark@codeaurora.org>
|
||||
R: Laura Abbott <labbott@redhat.com>
|
||||
R: Brian Starkey <Brian.Starkey@arm.com>
|
||||
R: John Stultz <john.stultz@linaro.org>
|
||||
R: John Stultz <jstultz@google.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
|
||||
|
@ -6587,7 +6587,7 @@ F: drivers/gpu/drm/gma500/
|
|||
DRM DRIVERS FOR HISILICON
|
||||
M: Xinliang Liu <xinliang.liu@linaro.org>
|
||||
M: Tian Tao <tiantao6@hisilicon.com>
|
||||
R: John Stultz <john.stultz@linaro.org>
|
||||
R: John Stultz <jstultz@google.com>
|
||||
R: Xinwei Kong <kong.kongxinwei@hisilicon.com>
|
||||
R: Chen Feng <puck.chen@hisilicon.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
|
@ -7499,7 +7499,7 @@ F: Documentation/hwmon/f71805f.rst
|
|||
F: drivers/hwmon/f71805f.c
|
||||
|
||||
FADDR2LINE
|
||||
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
M: Josh Poimboeuf <jpoimboe@kernel.org>
|
||||
S: Maintained
|
||||
F: scripts/faddr2line
|
||||
|
||||
|
@ -8112,7 +8112,7 @@ M: Ingo Molnar <mingo@redhat.com>
|
|||
R: Peter Zijlstra <peterz@infradead.org>
|
||||
R: Darren Hart <dvhart@infradead.org>
|
||||
R: Davidlohr Bueso <dave@stgolabs.net>
|
||||
R: André Almeida <andrealmeid@collabora.com>
|
||||
R: André Almeida <andrealmeid@igalia.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
|
||||
|
@ -8385,7 +8385,7 @@ M: Linus Walleij <linus.walleij@linaro.org>
|
|||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
F: Documentation/ABI/obsolete/sysfs-gpio
|
||||
F: Documentation/ABI/testing/gpio-cdev
|
||||
F: Documentation/admin-guide/gpio/
|
||||
|
@ -8848,7 +8848,7 @@ F: Documentation/devicetree/bindings/net/hisilicon*.txt
|
|||
F: drivers/net/ethernet/hisilicon/
|
||||
|
||||
HIKEY960 ONBOARD USB GPIO HUB DRIVER
|
||||
M: John Stultz <john.stultz@linaro.org>
|
||||
M: John Stultz <jstultz@google.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/misc/hisi_hikey_usb.c
|
||||
|
@ -11348,7 +11348,7 @@ F: drivers/mmc/host/litex_mmc.c
|
|||
N: litex
|
||||
|
||||
LIVE PATCHING
|
||||
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
M: Josh Poimboeuf <jpoimboe@kernel.org>
|
||||
M: Jiri Kosina <jikos@kernel.org>
|
||||
M: Miroslav Benes <mbenes@suse.cz>
|
||||
M: Petr Mladek <pmladek@suse.com>
|
||||
|
@ -14224,7 +14224,7 @@ F: lib/objagg.c
|
|||
F: lib/test_objagg.c
|
||||
|
||||
OBJTOOL
|
||||
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
M: Josh Poimboeuf <jpoimboe@kernel.org>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
S: Supported
|
||||
F: tools/objtool/
|
||||
|
@ -18792,7 +18792,7 @@ F: include/dt-bindings/reset/starfive-jh7100.h
|
|||
|
||||
STATIC BRANCH/CALL
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
M: Josh Poimboeuf <jpoimboe@kernel.org>
|
||||
M: Jason Baron <jbaron@akamai.com>
|
||||
R: Steven Rostedt <rostedt@goodmis.org>
|
||||
R: Ard Biesheuvel <ardb@kernel.org>
|
||||
|
@ -19793,7 +19793,7 @@ F: drivers/net/wireless/ti/
|
|||
F: include/linux/wl12xx.h
|
||||
|
||||
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
|
||||
M: John Stultz <john.stultz@linaro.org>
|
||||
M: John Stultz <jstultz@google.com>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
R: Stephen Boyd <sboyd@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
@ -21443,6 +21443,15 @@ F: arch/x86/include/asm/uv/
|
|||
F: arch/x86/kernel/apic/x2apic_uv_x.c
|
||||
F: arch/x86/platform/uv/
|
||||
|
||||
X86 STACK UNWINDING
|
||||
M: Josh Poimboeuf <jpoimboe@kernel.org>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
S: Supported
|
||||
F: arch/x86/include/asm/unwind*.h
|
||||
F: arch/x86/kernel/dumpstack.c
|
||||
F: arch/x86/kernel/stacktrace.c
|
||||
F: arch/x86/kernel/unwind_*.c
|
||||
|
||||
X86 VDSO
|
||||
M: Andy Lutomirski <luto@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -40,6 +40,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
|||
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
|
||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -198,15 +198,15 @@ SYM_CODE_START(__kvm_hyp_host_vector)
|
|||
invalid_host_el2_vect // FIQ EL2h
|
||||
invalid_host_el2_vect // Error EL2h
|
||||
|
||||
host_el1_sync_vect // Synchronous 64-bit EL1
|
||||
invalid_host_el1_vect // IRQ 64-bit EL1
|
||||
invalid_host_el1_vect // FIQ 64-bit EL1
|
||||
invalid_host_el1_vect // Error 64-bit EL1
|
||||
host_el1_sync_vect // Synchronous 64-bit EL1/EL0
|
||||
invalid_host_el1_vect // IRQ 64-bit EL1/EL0
|
||||
invalid_host_el1_vect // FIQ 64-bit EL1/EL0
|
||||
invalid_host_el1_vect // Error 64-bit EL1/EL0
|
||||
|
||||
invalid_host_el1_vect // Synchronous 32-bit EL1
|
||||
invalid_host_el1_vect // IRQ 32-bit EL1
|
||||
invalid_host_el1_vect // FIQ 32-bit EL1
|
||||
invalid_host_el1_vect // Error 32-bit EL1
|
||||
host_el1_sync_vect // Synchronous 32-bit EL1/EL0
|
||||
invalid_host_el1_vect // IRQ 32-bit EL1/EL0
|
||||
invalid_host_el1_vect // FIQ 32-bit EL1/EL0
|
||||
invalid_host_el1_vect // Error 32-bit EL1/EL0
|
||||
SYM_CODE_END(__kvm_hyp_host_vector)
|
||||
|
||||
/*
|
||||
|
|
|
@ -145,6 +145,34 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|||
inject_abt64(vcpu, true, addr);
|
||||
}
|
||||
|
||||
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long addr, esr;
|
||||
|
||||
addr = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
|
||||
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
kvm_inject_pabt(vcpu, addr);
|
||||
else
|
||||
kvm_inject_dabt(vcpu, addr);
|
||||
|
||||
/*
|
||||
* If AArch64 or LPAE, set FSC to 0 to indicate an Address
|
||||
* Size Fault at level 0, as if exceeding PARange.
|
||||
*
|
||||
* Non-LPAE guests will only get the external abort, as there
|
||||
* is no way to to describe the ASF.
|
||||
*/
|
||||
if (vcpu_el1_is_32bit(vcpu) &&
|
||||
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
|
||||
return;
|
||||
|
||||
esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
|
||||
esr &= ~GENMASK_ULL(5, 0);
|
||||
vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_undefined - inject an undefined instruction into the guest
|
||||
* @vcpu: The vCPU in which to inject the exception
|
||||
|
|
|
@ -1337,6 +1337,25 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
|
||||
if (fault_status == FSC_FAULT) {
|
||||
/* Beyond sanitised PARange (which is the IPA limit) */
|
||||
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
|
||||
kvm_inject_size_fault(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Falls between the IPA range and the PARange? */
|
||||
if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
|
||||
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
|
||||
|
||||
if (is_iabt)
|
||||
kvm_inject_pabt(vcpu, fault_ipa);
|
||||
else
|
||||
kvm_inject_dabt(vcpu, fault_ipa);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Synchronous External Abort? */
|
||||
if (kvm_vcpu_abt_issea(vcpu)) {
|
||||
/*
|
||||
|
|
|
@ -177,6 +177,9 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
|
|||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
|
||||
|
||||
if (kvm_pmu_pmc_is_chained(pmc) &&
|
||||
|
@ -198,6 +201,9 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
|||
{
|
||||
u64 reg;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
|
||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
|
||||
__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
|
||||
|
@ -322,6 +328,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
|||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
|
||||
return;
|
||||
|
||||
|
@ -357,7 +366,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
|||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!val)
|
||||
if (!kvm_vcpu_has_pmu(vcpu) || !val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
|
@ -527,6 +536,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
|||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
int i;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
|
||||
return;
|
||||
|
||||
|
@ -576,6 +588,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_E) {
|
||||
kvm_pmu_enable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
|
@ -739,6 +754,9 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
|||
{
|
||||
u64 reg, mask;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
mask = ARMV8_PMU_EVTYPE_MASK;
|
||||
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
|
||||
mask |= kvm_pmu_event_mask(vcpu->kvm);
|
||||
|
@ -827,6 +845,9 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
|||
u64 val, mask = 0;
|
||||
int base, i, nr_events;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
if (!pmceid1) {
|
||||
val = read_sysreg(pmceid0_el0);
|
||||
base = 0;
|
||||
|
|
|
@ -181,7 +181,8 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
|
|||
|
||||
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
|
||||
vcpu->run->system_event.type = type;
|
||||
vcpu->run->system_event.flags = flags;
|
||||
vcpu->run->system_event.ndata = 1;
|
||||
vcpu->run->system_event.data[0] = flags;
|
||||
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,9 +40,9 @@
|
|||
typedef unsigned int cycles_t;
|
||||
|
||||
/*
|
||||
* On R4000/R4400 before version 5.0 an erratum exists such that if the
|
||||
* cycle counter is read in the exact moment that it is matching the
|
||||
* compare register, no interrupt will be generated.
|
||||
* On R4000/R4400 an erratum exists such that if the cycle counter is
|
||||
* read in the exact moment that it is matching the compare register,
|
||||
* no interrupt will be generated.
|
||||
*
|
||||
* There is a suggested workaround and also the erratum can't strike if
|
||||
* the compare interrupt isn't being used as the clock source device.
|
||||
|
@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid)
|
|||
if (!__builtin_constant_p(cpu_has_counter))
|
||||
asm volatile("" : "=m" (cpu_data[0].options));
|
||||
if (likely(cpu_has_counter &&
|
||||
prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
|
||||
prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
|
|
|
@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void)
|
|||
case CPU_R4400MC:
|
||||
/*
|
||||
* The published errata for the R4400 up to 3.0 say the CPU
|
||||
* has the mfc0 from count bug.
|
||||
* has the mfc0 from count bug. This seems the last version
|
||||
* produced.
|
||||
*/
|
||||
if ((current_cpu_data.processor_id & 0xff) <= 0x30)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* we assume newer revisions are ok
|
||||
*/
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -38,6 +38,7 @@ config PARISC
|
|||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_ARCH_TOPOLOGY if SMP
|
||||
select GENERIC_CPU_DEVICES if !SMP
|
||||
select GENERIC_LIB_DEVMEM_IS_ALLOWED
|
||||
select SYSCTL_ARCH_UNALIGN_ALLOW
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
|
|
|
@ -6,6 +6,9 @@ CONFIG_BSD_PROCESS_ACCT=y
|
|||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=16
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_EXPERT=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
|
@ -47,7 +50,6 @@ CONFIG_PARPORT=y
|
|||
CONFIG_PARPORT_PC=m
|
||||
CONFIG_PARPORT_1284=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=6144
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
|
|
|
@ -16,6 +16,7 @@ CONFIG_CGROUPS=y
|
|||
CONFIG_MEMCG=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_RELAY=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
|
@ -267,9 +268,9 @@ CONFIG_CRYPTO_DEFLATE=m
|
|||
CONFIG_CRC_CCITT=m
|
||||
CONFIG_LIBCRC32C=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
|
|
|
@ -160,7 +160,7 @@ extern void __update_cache(pte_t pte);
|
|||
#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
|
||||
#else
|
||||
#define MAX_ADDRBITS (BITS_PER_LONG)
|
||||
#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
|
||||
#define MAX_ADDRESS (1ULL << MAX_ADDRBITS)
|
||||
#define SPACEID_SHIFT 0
|
||||
#endif
|
||||
|
||||
|
|
|
@ -403,7 +403,7 @@ void __init parisc_setup_cache_timing(void)
|
|||
{
|
||||
unsigned long rangetime, alltime;
|
||||
unsigned long size;
|
||||
unsigned long threshold, threshold2;
|
||||
unsigned long threshold;
|
||||
|
||||
alltime = mfctl(16);
|
||||
flush_data_cache();
|
||||
|
@ -418,20 +418,8 @@ void __init parisc_setup_cache_timing(void)
|
|||
alltime, size, rangetime);
|
||||
|
||||
threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
|
||||
|
||||
/*
|
||||
* The threshold computed above isn't very reliable since the
|
||||
* flush times depend greatly on the percentage of dirty lines
|
||||
* in the flush range. Further, the whole cache time doesn't
|
||||
* include the time to refill lines that aren't in the mm/vma
|
||||
* being flushed. By timing glibc build and checks on mako cpus,
|
||||
* the following formula seems to work reasonably well. The
|
||||
* value from the timing calculation is too small, and increases
|
||||
* build and check times by almost a factor two.
|
||||
*/
|
||||
threshold2 = cache_info.dc_size * num_online_cpus();
|
||||
if (threshold2 > threshold)
|
||||
threshold = threshold2;
|
||||
if (threshold > cache_info.dc_size)
|
||||
threshold = cache_info.dc_size;
|
||||
if (threshold)
|
||||
parisc_cache_flush_threshold = threshold;
|
||||
printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
|
||||
|
|
|
@ -152,7 +152,7 @@ int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs)
|
|||
/* for absolute branch instructions we can copy iaoq_b. for relative
|
||||
* branch instructions we need to calculate the new address based on the
|
||||
* difference between iaoq_f and iaoq_b. We cannot use iaoq_b without
|
||||
* modificationt because it's based on our ainsn.insn address.
|
||||
* modifications because it's based on our ainsn.insn address.
|
||||
*/
|
||||
|
||||
if (p->post_handler)
|
||||
|
|
|
@ -40,7 +40,10 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
|
|||
|
||||
*need_unmap = 1;
|
||||
set_fixmap(fixmap, page_to_phys(page));
|
||||
raw_spin_lock_irqsave(&patch_lock, *flags);
|
||||
if (flags)
|
||||
raw_spin_lock_irqsave(&patch_lock, *flags);
|
||||
else
|
||||
__acquire(&patch_lock);
|
||||
|
||||
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
|
||||
}
|
||||
|
@ -49,7 +52,10 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
|
|||
{
|
||||
clear_fixmap(fixmap);
|
||||
|
||||
raw_spin_unlock_irqrestore(&patch_lock, *flags);
|
||||
if (flags)
|
||||
raw_spin_unlock_irqrestore(&patch_lock, *flags);
|
||||
else
|
||||
__release(&patch_lock);
|
||||
}
|
||||
|
||||
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
||||
|
@ -61,9 +67,8 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
|||
int mapped;
|
||||
|
||||
/* Make sure we don't have any aliases in cache */
|
||||
flush_kernel_dcache_range_asm(start, end);
|
||||
flush_kernel_icache_range_asm(start, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_kernel_vmap_range(addr, len);
|
||||
flush_icache_range(start, end);
|
||||
|
||||
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
|
||||
|
||||
|
@ -76,10 +81,8 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
|||
* We're crossing a page boundary, so
|
||||
* need to remap
|
||||
*/
|
||||
flush_kernel_dcache_range_asm((unsigned long)fixmap,
|
||||
(unsigned long)p);
|
||||
flush_tlb_kernel_range((unsigned long)fixmap,
|
||||
(unsigned long)p);
|
||||
flush_kernel_vmap_range((void *)fixmap,
|
||||
(p-fixmap) * sizeof(*p));
|
||||
if (mapped)
|
||||
patch_unmap(FIX_TEXT_POKE0, &flags);
|
||||
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
|
||||
|
@ -87,10 +90,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
|||
}
|
||||
}
|
||||
|
||||
flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
|
||||
flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
|
||||
flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
|
||||
if (mapped)
|
||||
patch_unmap(FIX_TEXT_POKE0, &flags);
|
||||
flush_icache_range(start, end);
|
||||
}
|
||||
|
||||
void __kprobes __patch_text(void *addr, u32 insn)
|
||||
|
|
|
@ -171,6 +171,7 @@ static int __init processor_probe(struct parisc_device *dev)
|
|||
p->cpu_num = cpu_info.cpu_num;
|
||||
p->cpu_loc = cpu_info.cpu_loc;
|
||||
|
||||
set_cpu_possible(cpuid, true);
|
||||
store_cpu_topology(cpuid);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -419,8 +420,7 @@ show_cpuinfo (struct seq_file *m, void *v)
|
|||
}
|
||||
seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
|
||||
|
||||
seq_printf(m, "model\t\t: %s\n"
|
||||
"model name\t: %s\n",
|
||||
seq_printf(m, "model\t\t: %s - %s\n",
|
||||
boot_cpu_data.pdc.sys_model_name,
|
||||
cpuinfo->dev ?
|
||||
cpuinfo->dev->name : "Unknown");
|
||||
|
@ -461,6 +461,13 @@ static struct parisc_driver cpu_driver __refdata = {
|
|||
*/
|
||||
void __init processor_init(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
reset_cpu_topology();
|
||||
|
||||
/* reset possible mask. We will mark those which are possible. */
|
||||
for_each_possible_cpu(cpu)
|
||||
set_cpu_possible(cpu, false);
|
||||
|
||||
register_parisc_driver(&cpu_driver);
|
||||
}
|
||||
|
|
|
@ -161,6 +161,8 @@ void __init setup_arch(char **cmdline_p)
|
|||
#ifdef CONFIG_PA11
|
||||
dma_ops_init();
|
||||
#endif
|
||||
|
||||
clear_sched_clock_stable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -251,13 +251,9 @@ void __init time_init(void)
|
|||
static int __init init_cr16_clocksource(void)
|
||||
{
|
||||
/*
|
||||
* The cr16 interval timers are not syncronized across CPUs, even if
|
||||
* they share the same socket.
|
||||
* The cr16 interval timers are not synchronized across CPUs.
|
||||
*/
|
||||
if (num_online_cpus() > 1 && !running_on_qemu) {
|
||||
/* mark sched_clock unstable */
|
||||
clear_sched_clock_stable();
|
||||
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
|
|
|
@ -469,7 +469,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
|
|||
* panic notifiers, and we should call panic
|
||||
* directly from the location that we wish.
|
||||
* e.g. We should not call panic from
|
||||
* parisc_terminate, but rather the oter way around.
|
||||
* parisc_terminate, but rather the other way around.
|
||||
* This hack works, prints the panic message twice,
|
||||
* and it enables reboot timers!
|
||||
*/
|
||||
|
|
|
@ -253,7 +253,7 @@ dbl_fadd(
|
|||
return(NOEXCEPTION);
|
||||
}
|
||||
right_exponent = 1; /* Set exponent to reflect different bias
|
||||
* with denomalized numbers. */
|
||||
* with denormalized numbers. */
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -256,7 +256,7 @@ dbl_fsub(
|
|||
return(NOEXCEPTION);
|
||||
}
|
||||
right_exponent = 1; /* Set exponent to reflect different bias
|
||||
* with denomalized numbers. */
|
||||
* with denormalized numbers. */
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -249,7 +249,7 @@ sgl_fadd(
|
|||
return(NOEXCEPTION);
|
||||
}
|
||||
right_exponent = 1; /* Set exponent to reflect different bias
|
||||
* with denomalized numbers. */
|
||||
* with denormalized numbers. */
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -252,7 +252,7 @@ sgl_fsub(
|
|||
return(NOEXCEPTION);
|
||||
}
|
||||
right_exponent = 1; /* Set exponent to reflect different bias
|
||||
* with denomalized numbers. */
|
||||
* with denormalized numbers. */
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -22,12 +22,15 @@
|
|||
.macro cvdso_call funct call_time=0
|
||||
.cfi_startproc
|
||||
PPC_STLU r1, -PPC_MIN_STKFRM(r1)
|
||||
.cfi_adjust_cfa_offset PPC_MIN_STKFRM
|
||||
mflr r0
|
||||
.cfi_register lr, r0
|
||||
PPC_STLU r1, -PPC_MIN_STKFRM(r1)
|
||||
.cfi_adjust_cfa_offset PPC_MIN_STKFRM
|
||||
PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
|
||||
.cfi_rel_offset lr, PPC_MIN_STKFRM + PPC_LR_STKOFF
|
||||
#ifdef __powerpc64__
|
||||
PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1)
|
||||
.cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT
|
||||
#endif
|
||||
get_datapage r5
|
||||
.ifeq \call_time
|
||||
|
@ -39,13 +42,15 @@
|
|||
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
|
||||
#ifdef __powerpc64__
|
||||
PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
|
||||
.cfi_restore r2
|
||||
#endif
|
||||
.ifeq \call_time
|
||||
cmpwi r3, 0
|
||||
.endif
|
||||
mtlr r0
|
||||
.cfi_restore lr
|
||||
addi r1, r1, 2 * PPC_MIN_STKFRM
|
||||
.cfi_restore lr
|
||||
.cfi_def_cfa_offset 0
|
||||
crclr so
|
||||
.ifeq \call_time
|
||||
beqlr+
|
||||
|
|
|
@ -462,7 +462,6 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
|
|||
{
|
||||
struct papr_scm_perf_stat *stat;
|
||||
struct papr_scm_perf_stats *stats;
|
||||
char *statid;
|
||||
int index, rc, count;
|
||||
u32 available_events;
|
||||
|
||||
|
@ -493,14 +492,12 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
|
|||
|
||||
for (index = 0, stat = stats->scm_statistic, count = 0;
|
||||
index < available_events; index++, ++stat) {
|
||||
statid = kzalloc(strlen(stat->stat_id) + 1, GFP_KERNEL);
|
||||
if (!statid) {
|
||||
p->nvdimm_events_map[count] = kmemdup_nul(stat->stat_id, 8, GFP_KERNEL);
|
||||
if (!p->nvdimm_events_map[count]) {
|
||||
rc = -ENOMEM;
|
||||
goto out_nvdimm_events_map;
|
||||
}
|
||||
|
||||
strcpy(statid, stat->stat_id);
|
||||
p->nvdimm_events_map[count] = statid;
|
||||
count++;
|
||||
}
|
||||
p->nvdimm_events_map[count] = NULL;
|
||||
|
|
|
@ -27,22 +27,31 @@ struct vas_caps_entry {
|
|||
|
||||
/*
|
||||
* This function is used to get the notification from the drmgr when
|
||||
* QoS credits are changed. Though receiving the target total QoS
|
||||
* credits here, get the official QoS capabilities from the hypervisor.
|
||||
* QoS credits are changed.
|
||||
*/
|
||||
static ssize_t update_total_credits_trigger(struct vas_cop_feat_caps *caps,
|
||||
static ssize_t update_total_credits_store(struct vas_cop_feat_caps *caps,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int err;
|
||||
u16 creds;
|
||||
|
||||
err = kstrtou16(buf, 0, &creds);
|
||||
/*
|
||||
* The user space interface from the management console
|
||||
* notifies OS with the new QoS credits and then the
|
||||
* hypervisor. So OS has to use this new credits value
|
||||
* and reconfigure VAS windows (close or reopen depends
|
||||
* on the credits available) instead of depending on VAS
|
||||
* QoS capabilities from the hypervisor.
|
||||
*/
|
||||
if (!err)
|
||||
err = vas_reconfig_capabilties(caps->win_type);
|
||||
err = vas_reconfig_capabilties(caps->win_type, creds);
|
||||
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
|
||||
pr_info("Set QoS total credits %u\n", creds);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -92,7 +101,7 @@ VAS_ATTR_RO(nr_total_credits);
|
|||
VAS_ATTR_RO(nr_used_credits);
|
||||
|
||||
static struct vas_sysfs_entry update_total_credits_attribute =
|
||||
__ATTR(update_total_credits, 0200, NULL, update_total_credits_trigger);
|
||||
__ATTR(update_total_credits, 0200, NULL, update_total_credits_store);
|
||||
|
||||
static struct attribute *vas_def_capab_attrs[] = {
|
||||
&nr_total_credits_attribute.attr,
|
||||
|
|
|
@ -779,10 +779,10 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
|
|||
* changes. Reconfig window configurations based on the credits
|
||||
* availability from this new capabilities.
|
||||
*/
|
||||
int vas_reconfig_capabilties(u8 type)
|
||||
int vas_reconfig_capabilties(u8 type, int new_nr_creds)
|
||||
{
|
||||
struct vas_cop_feat_caps *caps;
|
||||
int old_nr_creds, new_nr_creds;
|
||||
int old_nr_creds;
|
||||
struct vas_caps *vcaps;
|
||||
int rc = 0, nr_active_wins;
|
||||
|
||||
|
@ -795,12 +795,6 @@ int vas_reconfig_capabilties(u8 type)
|
|||
caps = &vcaps->caps;
|
||||
|
||||
mutex_lock(&vas_pseries_mutex);
|
||||
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, vcaps->feat,
|
||||
(u64)virt_to_phys(&hv_cop_caps));
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
|
||||
|
||||
old_nr_creds = atomic_read(&caps->nr_total_credits);
|
||||
|
||||
|
@ -832,7 +826,6 @@ int vas_reconfig_capabilties(u8 type)
|
|||
false);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&vas_pseries_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
@ -850,7 +843,7 @@ static int pseries_vas_notifier(struct notifier_block *nb,
|
|||
struct of_reconfig_data *rd = data;
|
||||
struct device_node *dn = rd->dn;
|
||||
const __be32 *intserv = NULL;
|
||||
int len, rc = 0;
|
||||
int new_nr_creds, len, rc = 0;
|
||||
|
||||
if ((action == OF_RECONFIG_ATTACH_NODE) ||
|
||||
(action == OF_RECONFIG_DETACH_NODE))
|
||||
|
@ -862,7 +855,15 @@ static int pseries_vas_notifier(struct notifier_block *nb,
|
|||
if (!intserv)
|
||||
return NOTIFY_OK;
|
||||
|
||||
rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE);
|
||||
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
|
||||
vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
|
||||
(u64)virt_to_phys(&hv_cop_caps));
|
||||
if (!rc) {
|
||||
new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
|
||||
rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE,
|
||||
new_nr_creds);
|
||||
}
|
||||
|
||||
if (rc)
|
||||
pr_err("Failed reconfig VAS capabilities with DLPAR\n");
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ struct pseries_vas_window {
|
|||
};
|
||||
|
||||
int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps);
|
||||
int vas_reconfig_capabilties(u8 type);
|
||||
int vas_reconfig_capabilties(u8 type, int new_nr_creds);
|
||||
int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps);
|
||||
|
||||
#ifdef CONFIG_PPC_VAS
|
||||
|
|
|
@ -83,7 +83,7 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
|
||||
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run,
|
||||
u32 type, u64 flags)
|
||||
u32 type, u64 reason)
|
||||
{
|
||||
unsigned long i;
|
||||
struct kvm_vcpu *tmp;
|
||||
|
@ -94,7 +94,8 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
|||
|
||||
memset(&run->system_event, 0, sizeof(run->system_event));
|
||||
run->system_event.type = type;
|
||||
run->system_event.flags = flags;
|
||||
run->system_event.ndata = 1;
|
||||
run->system_event.data[0] = reason;
|
||||
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
|
||||
}
|
||||
|
||||
|
|
|
@ -208,8 +208,25 @@ static void __init setup_bootmem(void)
|
|||
* early_init_fdt_reserve_self() since __pa() does
|
||||
* not work for DTB pointers that are fixmap addresses
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
|
||||
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
|
||||
if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
|
||||
/*
|
||||
* In case the DTB is not located in a memory region we won't
|
||||
* be able to locate it later on via the linear mapping and
|
||||
* get a segfault when accessing it via __va(dtb_early_pa).
|
||||
* To avoid this situation copy DTB to a memory region.
|
||||
* Note that memblock_phys_alloc will also reserve DTB region.
|
||||
*/
|
||||
if (!memblock_is_memory(dtb_early_pa)) {
|
||||
size_t fdt_size = fdt_totalsize(dtb_early_va);
|
||||
phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
|
||||
void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);
|
||||
|
||||
memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
|
||||
early_memunmap(new_dtb_early_va, fdt_size);
|
||||
_dtb_early_pa = new_dtb_early_pa;
|
||||
} else
|
||||
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
|
||||
}
|
||||
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
dma_contiguous_reserve(dma32_phys_limit);
|
||||
|
|
|
@ -30,6 +30,16 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
|
|||
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -ge, 1200, y), y)
|
||||
ifeq ($(call cc-ifversion, -lt, 1300, y), y)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
UTS_MACHINE := s390x
|
||||
STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
|
||||
CHECKFLAGS += -D__s390__ -D__s390x__
|
||||
|
|
|
@ -2384,7 +2384,16 @@ static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
|
|||
return -EINVAL;
|
||||
if (mop->size > MEM_OP_MAX_SIZE)
|
||||
return -E2BIG;
|
||||
if (kvm_s390_pv_is_protected(kvm))
|
||||
/*
|
||||
* This is technically a heuristic only, if the kvm->lock is not
|
||||
* taken, it is not guaranteed that the vm is/remains non-protected.
|
||||
* This is ok from a kernel perspective, wrongdoing is detected
|
||||
* on the access, -EFAULT is returned and the vm may crash the
|
||||
* next time it accesses the memory in question.
|
||||
* There is no sane usecase to do switching and a memop on two
|
||||
* different CPUs at the same time.
|
||||
*/
|
||||
if (kvm_s390_pv_get_handle(kvm))
|
||||
return -EINVAL;
|
||||
if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
|
||||
if (access_key_invalid(mop->key))
|
||||
|
|
|
@ -1183,6 +1183,7 @@ EXPORT_SYMBOL_GPL(gmap_read_table);
|
|||
static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
|
||||
struct gmap_rmap *rmap)
|
||||
{
|
||||
struct gmap_rmap *temp;
|
||||
void __rcu **slot;
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
|
@ -1190,6 +1191,12 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
|
|||
if (slot) {
|
||||
rmap->next = radix_tree_deref_slot_protected(slot,
|
||||
&sg->guest_table_lock);
|
||||
for (temp = rmap->next; temp; temp = temp->next) {
|
||||
if (temp->raddr == rmap->raddr) {
|
||||
kfree(rmap);
|
||||
return;
|
||||
}
|
||||
}
|
||||
radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
|
||||
} else {
|
||||
rmap->next = NULL;
|
||||
|
|
|
@ -1866,7 +1866,7 @@ config X86_KERNEL_IBT
|
|||
code with them to make this happen.
|
||||
|
||||
In addition to building the kernel with IBT, seal all functions that
|
||||
are not indirect call targets, avoiding them ever becomming one.
|
||||
are not indirect call targets, avoiding them ever becoming one.
|
||||
|
||||
This requires LTO like objtool runs and will slow down the build. It
|
||||
does significantly reduce the number of ENDBR instructions in the
|
||||
|
|
|
@ -337,6 +337,9 @@ SYM_CODE_END(ret_from_fork)
|
|||
|
||||
call \cfunc
|
||||
|
||||
/* For some configurations \cfunc ends up being a noreturn. */
|
||||
REACHABLE
|
||||
|
||||
jmp error_return
|
||||
.endm
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
* _G - parts with extra graphics on
|
||||
* _X - regular server parts
|
||||
* _D - micro server parts
|
||||
* _N,_P - other mobile parts
|
||||
*
|
||||
* Historical OPTDIFFs:
|
||||
*
|
||||
|
@ -107,8 +108,10 @@
|
|||
|
||||
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
|
||||
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
|
||||
#define INTEL_FAM6_ALDERLAKE_N 0xBE
|
||||
|
||||
#define INTEL_FAM6_RAPTORLAKE 0xB7
|
||||
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
|
|
|
@ -131,10 +131,12 @@ extern void __init load_ucode_bsp(void);
|
|||
extern void load_ucode_ap(void);
|
||||
void reload_early_microcode(void);
|
||||
extern bool initrd_gone;
|
||||
void microcode_bsp_resume(void);
|
||||
#else
|
||||
static inline void __init load_ucode_bsp(void) { }
|
||||
static inline void load_ucode_ap(void) { }
|
||||
static inline void reload_early_microcode(void) { }
|
||||
static inline void microcode_bsp_resume(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_MICROCODE_H */
|
||||
|
|
|
@ -559,10 +559,6 @@ static inline void update_page_count(int level, unsigned long pages) { }
|
|||
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
|
||||
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
|
||||
unsigned int *level);
|
||||
|
||||
struct mm_struct;
|
||||
extern pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
|
||||
unsigned int *level);
|
||||
extern pmd_t *lookup_pmd_address(unsigned long address);
|
||||
extern phys_addr_t slow_virt_to_phys(void *__address);
|
||||
extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
".align 4 \n" \
|
||||
".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
|
||||
STATIC_CALL_TRAMP_STR(name) ": \n" \
|
||||
ANNOTATE_NOENDBR \
|
||||
insns " \n" \
|
||||
".byte 0x53, 0x43, 0x54 \n" \
|
||||
".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
|
||||
|
|
|
@ -758,9 +758,9 @@ static struct subsys_interface mc_cpu_interface = {
|
|||
};
|
||||
|
||||
/**
|
||||
* mc_bp_resume - Update boot CPU microcode during resume.
|
||||
* microcode_bsp_resume - Update boot CPU microcode during resume.
|
||||
*/
|
||||
static void mc_bp_resume(void)
|
||||
void microcode_bsp_resume(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
|
@ -772,7 +772,7 @@ static void mc_bp_resume(void)
|
|||
}
|
||||
|
||||
static struct syscore_ops mc_syscore_ops = {
|
||||
.resume = mc_bp_resume,
|
||||
.resume = microcode_bsp_resume,
|
||||
};
|
||||
|
||||
static int mc_cpu_starting(unsigned int cpu)
|
||||
|
|
|
@ -41,17 +41,7 @@ struct fpu_state_config fpu_user_cfg __ro_after_init;
|
|||
*/
|
||||
struct fpstate init_fpstate __ro_after_init;
|
||||
|
||||
/*
|
||||
* Track whether the kernel is using the FPU state
|
||||
* currently.
|
||||
*
|
||||
* This flag is used:
|
||||
*
|
||||
* - by IRQ context code to potentially use the FPU
|
||||
* if it's unused.
|
||||
*
|
||||
* - to debug kernel_fpu_begin()/end() correctness
|
||||
*/
|
||||
/* Track in-kernel FPU usage */
|
||||
static DEFINE_PER_CPU(bool, in_kernel_fpu);
|
||||
|
||||
/*
|
||||
|
@ -59,42 +49,37 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu);
|
|||
*/
|
||||
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
|
||||
|
||||
static bool kernel_fpu_disabled(void)
|
||||
{
|
||||
return this_cpu_read(in_kernel_fpu);
|
||||
}
|
||||
|
||||
static bool interrupted_kernel_fpu_idle(void)
|
||||
{
|
||||
return !kernel_fpu_disabled();
|
||||
}
|
||||
|
||||
/*
|
||||
* Were we in user mode (or vm86 mode) when we were
|
||||
* interrupted?
|
||||
*
|
||||
* Doing kernel_fpu_begin/end() is ok if we are running
|
||||
* in an interrupt context from user mode - we'll just
|
||||
* save the FPU state as required.
|
||||
*/
|
||||
static bool interrupted_user_mode(void)
|
||||
{
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
return regs && user_mode(regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Can we use the FPU in kernel mode with the
|
||||
* whole "kernel_fpu_begin/end()" sequence?
|
||||
*
|
||||
* It's always ok in process context (ie "not interrupt")
|
||||
* but it is sometimes ok even from an irq.
|
||||
*/
|
||||
bool irq_fpu_usable(void)
|
||||
{
|
||||
return !in_interrupt() ||
|
||||
interrupted_user_mode() ||
|
||||
interrupted_kernel_fpu_idle();
|
||||
if (WARN_ON_ONCE(in_nmi()))
|
||||
return false;
|
||||
|
||||
/* In kernel FPU usage already active? */
|
||||
if (this_cpu_read(in_kernel_fpu))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* When not in NMI or hard interrupt context, FPU can be used in:
|
||||
*
|
||||
* - Task context except from within fpregs_lock()'ed critical
|
||||
* regions.
|
||||
*
|
||||
* - Soft interrupt processing context which cannot happen
|
||||
* while in a fpregs_lock()'ed critical region.
|
||||
*/
|
||||
if (!in_hardirq())
|
||||
return true;
|
||||
|
||||
/*
|
||||
* In hard interrupt context it's safe when soft interrupts
|
||||
* are enabled, which means the interrupt did not hit in
|
||||
* a fpregs_lock()'ed critical region.
|
||||
*/
|
||||
return !softirq_count();
|
||||
}
|
||||
EXPORT_SYMBOL(irq_fpu_usable);
|
||||
|
||||
|
|
|
@ -339,11 +339,11 @@ static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
|
|||
struct stack_info *info = &state->stack_info;
|
||||
void *addr = (void *)_addr;
|
||||
|
||||
if (!on_stack(info, addr, len) &&
|
||||
(get_stack_info(addr, state->task, info, &state->stack_mask)))
|
||||
return false;
|
||||
if (on_stack(info, addr, len))
|
||||
return true;
|
||||
|
||||
return true;
|
||||
return !get_stack_info(addr, state->task, info, &state->stack_mask) &&
|
||||
on_stack(info, addr, len);
|
||||
}
|
||||
|
||||
static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
|
||||
|
|
|
@ -887,6 +887,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
|||
union cpuid10_eax eax;
|
||||
union cpuid10_edx edx;
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
|
||||
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
perf_get_x86_pmu_capability(&cap);
|
||||
|
||||
/*
|
||||
|
@ -1085,12 +1090,21 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
|||
case 0x80000000:
|
||||
entry->eax = min(entry->eax, 0x80000021);
|
||||
/*
|
||||
* Serializing LFENCE is reported in a multitude of ways,
|
||||
* and NullSegClearsBase is not reported in CPUID on Zen2;
|
||||
* help userspace by providing the CPUID leaf ourselves.
|
||||
* Serializing LFENCE is reported in a multitude of ways, and
|
||||
* NullSegClearsBase is not reported in CPUID on Zen2; help
|
||||
* userspace by providing the CPUID leaf ourselves.
|
||||
*
|
||||
* However, only do it if the host has CPUID leaf 0x8000001d.
|
||||
* QEMU thinks that it can query the host blindly for that
|
||||
* CPUID leaf if KVM reports that it supports 0x8000001d or
|
||||
* above. The processor merrily returns values from the
|
||||
* highest Intel leaf which QEMU tries to use as the guest's
|
||||
* 0x8000001d. Even worse, this can result in an infinite
|
||||
* loop if said highest leaf has no subleaves indexed by ECX.
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
|
||||
|| !static_cpu_has_bug(X86_BUG_NULL_SEG))
|
||||
if (entry->eax >= 0x8000001d &&
|
||||
(static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
|
||||
|| !static_cpu_has_bug(X86_BUG_NULL_SEG)))
|
||||
entry->eax = max(entry->eax, 0x80000021);
|
||||
break;
|
||||
case 0x80000001:
|
||||
|
|
|
@ -65,6 +65,30 @@ static __always_inline u64 rsvd_bits(int s, int e)
|
|||
return ((2ULL << (e - s)) - 1) << s;
|
||||
}
|
||||
|
||||
/*
|
||||
* The number of non-reserved physical address bits irrespective of features
|
||||
* that repurpose legal bits, e.g. MKTME.
|
||||
*/
|
||||
extern u8 __read_mostly shadow_phys_bits;
|
||||
|
||||
static inline gfn_t kvm_mmu_max_gfn(void)
|
||||
{
|
||||
/*
|
||||
* Note that this uses the host MAXPHYADDR, not the guest's.
|
||||
* EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR;
|
||||
* assuming KVM is running on bare metal, guest accesses beyond
|
||||
* host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit
|
||||
* (either EPT Violation/Misconfig or #NPF), and so KVM will never
|
||||
* install a SPTE for such addresses. If KVM is running as a VM
|
||||
* itself, on the other hand, it might see a MAXPHYADDR that is less
|
||||
* than hardware's real MAXPHYADDR. Using the host MAXPHYADDR
|
||||
* disallows such SPTEs entirely and simplifies the TDP MMU.
|
||||
*/
|
||||
int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52;
|
||||
|
||||
return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1;
|
||||
}
|
||||
|
||||
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
|
||||
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
|
||||
|
||||
|
|
|
@ -473,30 +473,6 @@ retry:
|
|||
}
|
||||
#endif
|
||||
|
||||
static bool spte_has_volatile_bits(u64 spte)
|
||||
{
|
||||
if (!is_shadow_present_pte(spte))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Always atomically update spte if it can be updated
|
||||
* out of mmu-lock, it can ensure dirty bit is not lost,
|
||||
* also, it can help us to get a stable is_writable_pte()
|
||||
* to ensure tlb flush is not missed.
|
||||
*/
|
||||
if (spte_can_locklessly_be_made_writable(spte) ||
|
||||
is_access_track_spte(spte))
|
||||
return true;
|
||||
|
||||
if (spte_ad_enabled(spte)) {
|
||||
if ((spte & shadow_accessed_mask) == 0 ||
|
||||
(is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Rules for using mmu_spte_set:
|
||||
* Set the sptep from nonpresent to present.
|
||||
* Note: the sptep being assigned *must* be either not present
|
||||
|
@ -557,7 +533,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
|
|||
* we always atomically update it, see the comments in
|
||||
* spte_has_volatile_bits().
|
||||
*/
|
||||
if (spte_can_locklessly_be_made_writable(old_spte) &&
|
||||
if (is_mmu_writable_spte(old_spte) &&
|
||||
!is_writable_pte(new_spte))
|
||||
flush = true;
|
||||
|
||||
|
@ -591,7 +567,8 @@ static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
|
|||
u64 old_spte = *sptep;
|
||||
int level = sptep_to_sp(sptep)->role.level;
|
||||
|
||||
if (!spte_has_volatile_bits(old_spte))
|
||||
if (!is_shadow_present_pte(old_spte) ||
|
||||
!spte_has_volatile_bits(old_spte))
|
||||
__update_clear_spte_fast(sptep, 0ull);
|
||||
else
|
||||
old_spte = __update_clear_spte_slow(sptep, 0ull);
|
||||
|
@ -1187,7 +1164,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
|
|||
u64 spte = *sptep;
|
||||
|
||||
if (!is_writable_pte(spte) &&
|
||||
!(pt_protect && spte_can_locklessly_be_made_writable(spte)))
|
||||
!(pt_protect && is_mmu_writable_spte(spte)))
|
||||
return false;
|
||||
|
||||
rmap_printk("spte %p %llx\n", sptep, *sptep);
|
||||
|
@ -2804,8 +2781,12 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
|
|||
const struct kvm_memory_slot *slot)
|
||||
{
|
||||
unsigned long hva;
|
||||
pte_t *pte;
|
||||
int level;
|
||||
unsigned long flags;
|
||||
int level = PG_LEVEL_4K;
|
||||
pgd_t pgd;
|
||||
p4d_t p4d;
|
||||
pud_t pud;
|
||||
pmd_t pmd;
|
||||
|
||||
if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
|
||||
return PG_LEVEL_4K;
|
||||
|
@ -2820,10 +2801,43 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
|
|||
*/
|
||||
hva = __gfn_to_hva_memslot(slot, gfn);
|
||||
|
||||
pte = lookup_address_in_mm(kvm->mm, hva, &level);
|
||||
if (unlikely(!pte))
|
||||
return PG_LEVEL_4K;
|
||||
/*
|
||||
* Lookup the mapping level in the current mm. The information
|
||||
* may become stale soon, but it is safe to use as long as
|
||||
* 1) mmu_notifier_retry was checked after taking mmu_lock, and
|
||||
* 2) mmu_lock is taken now.
|
||||
*
|
||||
* We still need to disable IRQs to prevent concurrent tear down
|
||||
* of page tables.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
|
||||
if (pgd_none(pgd))
|
||||
goto out;
|
||||
|
||||
p4d = READ_ONCE(*p4d_offset(&pgd, hva));
|
||||
if (p4d_none(p4d) || !p4d_present(p4d))
|
||||
goto out;
|
||||
|
||||
pud = READ_ONCE(*pud_offset(&p4d, hva));
|
||||
if (pud_none(pud) || !pud_present(pud))
|
||||
goto out;
|
||||
|
||||
if (pud_large(pud)) {
|
||||
level = PG_LEVEL_1G;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pmd = READ_ONCE(*pmd_offset(&pud, hva));
|
||||
if (pmd_none(pmd) || !pmd_present(pmd))
|
||||
goto out;
|
||||
|
||||
if (pmd_large(pmd))
|
||||
level = PG_LEVEL_2M;
|
||||
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
return level;
|
||||
}
|
||||
|
||||
|
@ -2992,9 +3006,15 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa
|
|||
/*
|
||||
* If MMIO caching is disabled, emulate immediately without
|
||||
* touching the shadow page tables as attempting to install an
|
||||
* MMIO SPTE will just be an expensive nop.
|
||||
* MMIO SPTE will just be an expensive nop. Do not cache MMIO
|
||||
* whose gfn is greater than host.MAXPHYADDR, any guest that
|
||||
* generates such gfns is running nested and is being tricked
|
||||
* by L0 userspace (you can observe gfn > L1.MAXPHYADDR if
|
||||
* and only if L1's MAXPHYADDR is inaccurate with respect to
|
||||
* the hardware's).
|
||||
*/
|
||||
if (unlikely(!shadow_mmio_value)) {
|
||||
if (unlikely(!shadow_mmio_value) ||
|
||||
unlikely(fault->gfn > kvm_mmu_max_gfn())) {
|
||||
*ret_val = RET_PF_EMULATE;
|
||||
return true;
|
||||
}
|
||||
|
@ -3153,8 +3173,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
|||
* be removed in the fast path only if the SPTE was
|
||||
* write-protected for dirty-logging or access tracking.
|
||||
*/
|
||||
if (fault->write &&
|
||||
spte_can_locklessly_be_made_writable(spte)) {
|
||||
if (fault->write && is_mmu_writable_spte(spte)) {
|
||||
new_spte |= PT_WRITABLE_MASK;
|
||||
|
||||
/*
|
||||
|
|
|
@ -90,6 +90,34 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
|
|||
E820_TYPE_RAM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the SPTE has bits that may be set without holding mmu_lock.
|
||||
* The caller is responsible for checking if the SPTE is shadow-present, and
|
||||
* for determining whether or not the caller cares about non-leaf SPTEs.
|
||||
*/
|
||||
bool spte_has_volatile_bits(u64 spte)
|
||||
{
|
||||
/*
|
||||
* Always atomically update spte if it can be updated
|
||||
* out of mmu-lock, it can ensure dirty bit is not lost,
|
||||
* also, it can help us to get a stable is_writable_pte()
|
||||
* to ensure tlb flush is not missed.
|
||||
*/
|
||||
if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
|
||||
return true;
|
||||
|
||||
if (is_access_track_spte(spte))
|
||||
return true;
|
||||
|
||||
if (spte_ad_enabled(spte)) {
|
||||
if (!(spte & shadow_accessed_mask) ||
|
||||
(is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
const struct kvm_memory_slot *slot,
|
||||
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
|
||||
|
|
|
@ -201,12 +201,6 @@ static inline bool is_removed_spte(u64 spte)
|
|||
*/
|
||||
extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
|
||||
|
||||
/*
|
||||
* The number of non-reserved physical address bits irrespective of features
|
||||
* that repurpose legal bits, e.g. MKTME.
|
||||
*/
|
||||
extern u8 __read_mostly shadow_phys_bits;
|
||||
|
||||
static inline bool is_mmio_spte(u64 spte)
|
||||
{
|
||||
return (spte & shadow_mmio_mask) == shadow_mmio_value &&
|
||||
|
@ -396,7 +390,7 @@ static inline void check_spte_writable_invariants(u64 spte)
|
|||
"kvm: Writable SPTE is not MMU-writable: %llx", spte);
|
||||
}
|
||||
|
||||
static inline bool spte_can_locklessly_be_made_writable(u64 spte)
|
||||
static inline bool is_mmu_writable_spte(u64 spte)
|
||||
{
|
||||
return spte & shadow_mmu_writable_mask;
|
||||
}
|
||||
|
@ -410,6 +404,8 @@ static inline u64 get_mmio_spte_generation(u64 spte)
|
|||
return gen;
|
||||
}
|
||||
|
||||
bool spte_has_volatile_bits(u64 spte);
|
||||
|
||||
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
const struct kvm_memory_slot *slot,
|
||||
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/kvm_host.h>
|
||||
|
||||
#include "mmu.h"
|
||||
#include "spte.h"
|
||||
|
||||
/*
|
||||
* TDP MMU SPTEs are RCU protected to allow paging structures (non-leaf SPTEs)
|
||||
|
@ -17,9 +18,38 @@ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
|
|||
{
|
||||
return READ_ONCE(*rcu_dereference(sptep));
|
||||
}
|
||||
static inline void kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 val)
|
||||
|
||||
static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
|
||||
{
|
||||
WRITE_ONCE(*rcu_dereference(sptep), val);
|
||||
return xchg(rcu_dereference(sptep), new_spte);
|
||||
}
|
||||
|
||||
static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
|
||||
{
|
||||
WRITE_ONCE(*rcu_dereference(sptep), new_spte);
|
||||
}
|
||||
|
||||
static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
|
||||
u64 new_spte, int level)
|
||||
{
|
||||
/*
|
||||
* Atomically write the SPTE if it is a shadow-present, leaf SPTE with
|
||||
* volatile bits, i.e. has bits that can be set outside of mmu_lock.
|
||||
* The Writable bit can be set by KVM's fast page fault handler, and
|
||||
* Accessed and Dirty bits can be set by the CPU.
|
||||
*
|
||||
* Note, non-leaf SPTEs do have Accessed bits and those bits are
|
||||
* technically volatile, but KVM doesn't consume the Accessed bit of
|
||||
* non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit. This
|
||||
* logic needs to be reassessed if KVM were to use non-leaf Accessed
|
||||
* bits, e.g. to skip stepping down into child SPTEs when aging SPTEs.
|
||||
*/
|
||||
if (is_shadow_present_pte(old_spte) && is_last_spte(old_spte, level) &&
|
||||
spte_has_volatile_bits(old_spte))
|
||||
return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte);
|
||||
|
||||
__kvm_tdp_mmu_write_spte(sptep, new_spte);
|
||||
return old_spte;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -426,9 +426,9 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
|
|||
tdp_mmu_unlink_sp(kvm, sp, shared);
|
||||
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
|
||||
u64 *sptep = rcu_dereference(pt) + i;
|
||||
tdp_ptep_t sptep = pt + i;
|
||||
gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
|
||||
u64 old_child_spte;
|
||||
u64 old_spte;
|
||||
|
||||
if (shared) {
|
||||
/*
|
||||
|
@ -440,8 +440,8 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
|
|||
* value to the removed SPTE value.
|
||||
*/
|
||||
for (;;) {
|
||||
old_child_spte = xchg(sptep, REMOVED_SPTE);
|
||||
if (!is_removed_spte(old_child_spte))
|
||||
old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
|
||||
if (!is_removed_spte(old_spte))
|
||||
break;
|
||||
cpu_relax();
|
||||
}
|
||||
|
@ -455,23 +455,43 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
|
|||
* are guarded by the memslots generation, not by being
|
||||
* unreachable.
|
||||
*/
|
||||
old_child_spte = READ_ONCE(*sptep);
|
||||
if (!is_shadow_present_pte(old_child_spte))
|
||||
old_spte = kvm_tdp_mmu_read_spte(sptep);
|
||||
if (!is_shadow_present_pte(old_spte))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Marking the SPTE as a removed SPTE is not
|
||||
* strictly necessary here as the MMU lock will
|
||||
* stop other threads from concurrently modifying
|
||||
* this SPTE. Using the removed SPTE value keeps
|
||||
* the two branches consistent and simplifies
|
||||
* the function.
|
||||
* Use the common helper instead of a raw WRITE_ONCE as
|
||||
* the SPTE needs to be updated atomically if it can be
|
||||
* modified by a different vCPU outside of mmu_lock.
|
||||
* Even though the parent SPTE is !PRESENT, the TLB
|
||||
* hasn't yet been flushed, and both Intel and AMD
|
||||
* document that A/D assists can use upper-level PxE
|
||||
* entries that are cached in the TLB, i.e. the CPU can
|
||||
* still access the page and mark it dirty.
|
||||
*
|
||||
* No retry is needed in the atomic update path as the
|
||||
* sole concern is dropping a Dirty bit, i.e. no other
|
||||
* task can zap/remove the SPTE as mmu_lock is held for
|
||||
* write. Marking the SPTE as a removed SPTE is not
|
||||
* strictly necessary for the same reason, but using
|
||||
* the remove SPTE value keeps the shared/exclusive
|
||||
* paths consistent and allows the handle_changed_spte()
|
||||
* call below to hardcode the new value to REMOVED_SPTE.
|
||||
*
|
||||
* Note, even though dropping a Dirty bit is the only
|
||||
* scenario where a non-atomic update could result in a
|
||||
* functional bug, simply checking the Dirty bit isn't
|
||||
* sufficient as a fast page fault could read the upper
|
||||
* level SPTE before it is zapped, and then make this
|
||||
* target SPTE writable, resume the guest, and set the
|
||||
* Dirty bit between reading the SPTE above and writing
|
||||
* it here.
|
||||
*/
|
||||
WRITE_ONCE(*sptep, REMOVED_SPTE);
|
||||
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
|
||||
REMOVED_SPTE, level);
|
||||
}
|
||||
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
|
||||
old_child_spte, REMOVED_SPTE, level,
|
||||
shared);
|
||||
old_spte, REMOVED_SPTE, level, shared);
|
||||
}
|
||||
|
||||
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
|
||||
|
@ -667,14 +687,13 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
|
|||
KVM_PAGES_PER_HPAGE(iter->level));
|
||||
|
||||
/*
|
||||
* No other thread can overwrite the removed SPTE as they
|
||||
* must either wait on the MMU lock or use
|
||||
* tdp_mmu_set_spte_atomic which will not overwrite the
|
||||
* special removed SPTE value. No bookkeeping is needed
|
||||
* here since the SPTE is going from non-present
|
||||
* to non-present.
|
||||
* No other thread can overwrite the removed SPTE as they must either
|
||||
* wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
|
||||
* overwrite the special removed SPTE value. No bookkeeping is needed
|
||||
* here since the SPTE is going from non-present to non-present. Use
|
||||
* the raw write helper to avoid an unnecessary check on volatile bits.
|
||||
*/
|
||||
kvm_tdp_mmu_write_spte(iter->sptep, 0);
|
||||
__kvm_tdp_mmu_write_spte(iter->sptep, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -699,10 +718,13 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
|
|||
* unless performing certain dirty logging operations.
|
||||
* Leaving record_dirty_log unset in that case prevents page
|
||||
* writes from being double counted.
|
||||
*
|
||||
* Returns the old SPTE value, which _may_ be different than @old_spte if the
|
||||
* SPTE had voldatile bits.
|
||||
*/
|
||||
static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
|
||||
u64 old_spte, u64 new_spte, gfn_t gfn, int level,
|
||||
bool record_acc_track, bool record_dirty_log)
|
||||
static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
|
||||
u64 old_spte, u64 new_spte, gfn_t gfn, int level,
|
||||
bool record_acc_track, bool record_dirty_log)
|
||||
{
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
|
||||
|
@ -715,7 +737,7 @@ static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
|
|||
*/
|
||||
WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
|
||||
|
||||
kvm_tdp_mmu_write_spte(sptep, new_spte);
|
||||
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
|
||||
|
||||
__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
|
||||
|
||||
|
@ -724,6 +746,7 @@ static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
|
|||
if (record_dirty_log)
|
||||
handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
|
||||
new_spte, level);
|
||||
return old_spte;
|
||||
}
|
||||
|
||||
static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
|
||||
|
@ -732,9 +755,10 @@ static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
|
|||
{
|
||||
WARN_ON_ONCE(iter->yielded);
|
||||
|
||||
__tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, iter->old_spte,
|
||||
new_spte, iter->gfn, iter->level,
|
||||
record_acc_track, record_dirty_log);
|
||||
iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
|
||||
iter->old_spte, new_spte,
|
||||
iter->gfn, iter->level,
|
||||
record_acc_track, record_dirty_log);
|
||||
}
|
||||
|
||||
static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
|
||||
|
@ -815,14 +839,15 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
|
|||
return iter->yielded;
|
||||
}
|
||||
|
||||
static inline gfn_t tdp_mmu_max_gfn_host(void)
|
||||
static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
|
||||
{
|
||||
/*
|
||||
* Bound TDP MMU walks at host.MAXPHYADDR, guest accesses beyond that
|
||||
* will hit a #PF(RSVD) and never hit an EPT Violation/Misconfig / #NPF,
|
||||
* and so KVM will never install a SPTE for such addresses.
|
||||
* Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with
|
||||
* a gpa range that would exceed the max gfn, and KVM does not create
|
||||
* MMIO SPTEs for "impossible" gfns, instead sending such accesses down
|
||||
* the slow emulation path every time.
|
||||
*/
|
||||
return 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
||||
return kvm_mmu_max_gfn() + 1;
|
||||
}
|
||||
|
||||
static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
|
@ -830,7 +855,7 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
|||
{
|
||||
struct tdp_iter iter;
|
||||
|
||||
gfn_t end = tdp_mmu_max_gfn_host();
|
||||
gfn_t end = tdp_mmu_max_gfn_exclusive();
|
||||
gfn_t start = 0;
|
||||
|
||||
for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
|
||||
|
@ -923,7 +948,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
|
|||
{
|
||||
struct tdp_iter iter;
|
||||
|
||||
end = min(end, tdp_mmu_max_gfn_host());
|
||||
end = min(end, tdp_mmu_max_gfn_exclusive());
|
||||
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
|
||||
|
|
|
@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
|
|||
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
|
||||
};
|
||||
|
||||
/* duplicated from amd_f17h_perfmon_event_map. */
|
||||
static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
|
||||
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
|
||||
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
|
||||
[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
|
||||
[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
|
||||
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
|
||||
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
|
||||
[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
|
||||
[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
|
||||
};
|
||||
|
||||
/* amd_pmc_perf_hw_id depends on these being the same size */
|
||||
static_assert(ARRAY_SIZE(amd_event_mapping) ==
|
||||
ARRAY_SIZE(amd_f17h_event_mapping));
|
||||
|
||||
static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
|
||||
|
@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
|
|||
|
||||
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_event_hw_type_mapping *event_mapping;
|
||||
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
|
||||
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
||||
int i;
|
||||
|
@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
|
|||
if (WARN_ON(pmc_is_fixed(pmc)))
|
||||
return PERF_COUNT_HW_MAX;
|
||||
|
||||
if (guest_cpuid_family(pmc->vcpu) >= 0x17)
|
||||
event_mapping = amd_f17h_event_mapping;
|
||||
else
|
||||
event_mapping = amd_event_mapping;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
|
||||
if (amd_event_mapping[i].eventsel == event_select
|
||||
&& amd_event_mapping[i].unit_mask == unit_mask)
|
||||
if (event_mapping[i].eventsel == event_select
|
||||
&& event_mapping[i].unit_mask == unit_mask)
|
||||
break;
|
||||
|
||||
if (i == ARRAY_SIZE(amd_event_mapping))
|
||||
return PERF_COUNT_HW_MAX;
|
||||
|
||||
return amd_event_mapping[i].event_type;
|
||||
return event_mapping[i].event_type;
|
||||
}
|
||||
|
||||
/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
|
||||
|
|
|
@ -1594,24 +1594,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
|
|||
atomic_set_release(&src_sev->migration_in_progress, 0);
|
||||
}
|
||||
|
||||
/* vCPU mutex subclasses. */
|
||||
enum sev_migration_role {
|
||||
SEV_MIGRATION_SOURCE = 0,
|
||||
SEV_MIGRATION_TARGET,
|
||||
SEV_NR_MIGRATION_ROLES,
|
||||
};
|
||||
|
||||
static int sev_lock_vcpus_for_migration(struct kvm *kvm)
|
||||
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
|
||||
enum sev_migration_role role)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i, j;
|
||||
bool first = true;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (mutex_lock_killable(&vcpu->mutex))
|
||||
if (mutex_lock_killable_nested(&vcpu->mutex, role))
|
||||
goto out_unlock;
|
||||
|
||||
if (first) {
|
||||
/*
|
||||
* Reset the role to one that avoids colliding with
|
||||
* the role used for the first vcpu mutex.
|
||||
*/
|
||||
role = SEV_NR_MIGRATION_ROLES;
|
||||
first = false;
|
||||
} else {
|
||||
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
|
||||
first = true;
|
||||
kvm_for_each_vcpu(j, vcpu, kvm) {
|
||||
if (i == j)
|
||||
break;
|
||||
|
||||
if (first)
|
||||
first = false;
|
||||
else
|
||||
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
|
||||
|
||||
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
return -EINTR;
|
||||
|
@ -1621,8 +1648,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
|
|||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
bool first = true;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (first)
|
||||
first = false;
|
||||
else
|
||||
mutex_acquire(&vcpu->mutex.dep_map,
|
||||
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
|
||||
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
}
|
||||
|
@ -1748,10 +1782,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
|
|||
charged = true;
|
||||
}
|
||||
|
||||
ret = sev_lock_vcpus_for_migration(kvm);
|
||||
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
|
||||
if (ret)
|
||||
goto out_dst_cgroup;
|
||||
ret = sev_lock_vcpus_for_migration(source_kvm);
|
||||
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
|
||||
if (ret)
|
||||
goto out_dst_vcpu;
|
||||
|
||||
|
|
|
@ -5472,7 +5472,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
|
|||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
return vmx->emulation_required && !vmx->rmode.vm86_active &&
|
||||
vcpu->arch.exception.pending;
|
||||
(vcpu->arch.exception.pending || vcpu->arch.exception.injected);
|
||||
}
|
||||
|
||||
static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -10020,12 +10020,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
|
||||
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
|
||||
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
|
||||
vcpu->run->system_event.ndata = 0;
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
|
||||
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
|
||||
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
|
||||
vcpu->run->system_event.ndata = 0;
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
|
@ -12009,8 +12011,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
struct kvm_memory_slot *new,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
|
||||
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) {
|
||||
if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn())
|
||||
return -EINVAL;
|
||||
|
||||
return kvm_alloc_memslot_metadata(kvm, new);
|
||||
}
|
||||
|
||||
if (change == KVM_MR_FLAGS_ONLY)
|
||||
memcpy(&new->arch, &old->arch, sizeof(old->arch));
|
||||
|
|
|
@ -53,12 +53,12 @@
|
|||
SYM_FUNC_START(copy_user_generic_unrolled)
|
||||
ASM_STAC
|
||||
cmpl $8,%edx
|
||||
jb 20f /* less then 8 bytes, go to byte copy loop */
|
||||
jb .Lcopy_user_short_string_bytes
|
||||
ALIGN_DESTINATION
|
||||
movl %edx,%ecx
|
||||
andl $63,%edx
|
||||
shrl $6,%ecx
|
||||
jz .L_copy_short_string
|
||||
jz copy_user_short_string
|
||||
1: movq (%rsi),%r8
|
||||
2: movq 1*8(%rsi),%r9
|
||||
3: movq 2*8(%rsi),%r10
|
||||
|
@ -79,37 +79,11 @@ SYM_FUNC_START(copy_user_generic_unrolled)
|
|||
leaq 64(%rdi),%rdi
|
||||
decl %ecx
|
||||
jnz 1b
|
||||
.L_copy_short_string:
|
||||
movl %edx,%ecx
|
||||
andl $7,%edx
|
||||
shrl $3,%ecx
|
||||
jz 20f
|
||||
18: movq (%rsi),%r8
|
||||
19: movq %r8,(%rdi)
|
||||
leaq 8(%rsi),%rsi
|
||||
leaq 8(%rdi),%rdi
|
||||
decl %ecx
|
||||
jnz 18b
|
||||
20: andl %edx,%edx
|
||||
jz 23f
|
||||
movl %edx,%ecx
|
||||
21: movb (%rsi),%al
|
||||
22: movb %al,(%rdi)
|
||||
incq %rsi
|
||||
incq %rdi
|
||||
decl %ecx
|
||||
jnz 21b
|
||||
23: xor %eax,%eax
|
||||
ASM_CLAC
|
||||
RET
|
||||
jmp copy_user_short_string
|
||||
|
||||
30: shll $6,%ecx
|
||||
addl %ecx,%edx
|
||||
jmp 60f
|
||||
40: leal (%rdx,%rcx,8),%edx
|
||||
jmp 60f
|
||||
50: movl %ecx,%edx
|
||||
60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */
|
||||
jmp .Lcopy_user_handle_tail
|
||||
|
||||
_ASM_EXTABLE_CPY(1b, 30b)
|
||||
_ASM_EXTABLE_CPY(2b, 30b)
|
||||
|
@ -127,10 +101,6 @@ SYM_FUNC_START(copy_user_generic_unrolled)
|
|||
_ASM_EXTABLE_CPY(14b, 30b)
|
||||
_ASM_EXTABLE_CPY(15b, 30b)
|
||||
_ASM_EXTABLE_CPY(16b, 30b)
|
||||
_ASM_EXTABLE_CPY(18b, 40b)
|
||||
_ASM_EXTABLE_CPY(19b, 40b)
|
||||
_ASM_EXTABLE_CPY(21b, 50b)
|
||||
_ASM_EXTABLE_CPY(22b, 50b)
|
||||
SYM_FUNC_END(copy_user_generic_unrolled)
|
||||
EXPORT_SYMBOL(copy_user_generic_unrolled)
|
||||
|
||||
|
@ -191,7 +161,7 @@ EXPORT_SYMBOL(copy_user_generic_string)
|
|||
SYM_FUNC_START(copy_user_enhanced_fast_string)
|
||||
ASM_STAC
|
||||
/* CPUs without FSRM should avoid rep movsb for short copies */
|
||||
ALTERNATIVE "cmpl $64, %edx; jb .L_copy_short_string", "", X86_FEATURE_FSRM
|
||||
ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM
|
||||
movl %edx,%ecx
|
||||
1: rep movsb
|
||||
xorl %eax,%eax
|
||||
|
@ -243,6 +213,53 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
|
|||
|
||||
SYM_CODE_END(.Lcopy_user_handle_tail)
|
||||
|
||||
/*
|
||||
* Finish memcpy of less than 64 bytes. #AC should already be set.
|
||||
*
|
||||
* Input:
|
||||
* rdi destination
|
||||
* rsi source
|
||||
* rdx count (< 64)
|
||||
*
|
||||
* Output:
|
||||
* eax uncopied bytes or 0 if successful.
|
||||
*/
|
||||
SYM_CODE_START_LOCAL(copy_user_short_string)
|
||||
movl %edx,%ecx
|
||||
andl $7,%edx
|
||||
shrl $3,%ecx
|
||||
jz .Lcopy_user_short_string_bytes
|
||||
18: movq (%rsi),%r8
|
||||
19: movq %r8,(%rdi)
|
||||
leaq 8(%rsi),%rsi
|
||||
leaq 8(%rdi),%rdi
|
||||
decl %ecx
|
||||
jnz 18b
|
||||
.Lcopy_user_short_string_bytes:
|
||||
andl %edx,%edx
|
||||
jz 23f
|
||||
movl %edx,%ecx
|
||||
21: movb (%rsi),%al
|
||||
22: movb %al,(%rdi)
|
||||
incq %rsi
|
||||
incq %rdi
|
||||
decl %ecx
|
||||
jnz 21b
|
||||
23: xor %eax,%eax
|
||||
ASM_CLAC
|
||||
RET
|
||||
|
||||
40: leal (%rdx,%rcx,8),%edx
|
||||
jmp 60f
|
||||
50: movl %ecx,%edx /* ecx is zerorest also */
|
||||
60: jmp .Lcopy_user_handle_tail
|
||||
|
||||
_ASM_EXTABLE_CPY(18b, 40b)
|
||||
_ASM_EXTABLE_CPY(19b, 40b)
|
||||
_ASM_EXTABLE_CPY(21b, 50b)
|
||||
_ASM_EXTABLE_CPY(22b, 50b)
|
||||
SYM_CODE_END(copy_user_short_string)
|
||||
|
||||
/*
|
||||
* copy_user_nocache - Uncached memory copy with exception handling
|
||||
* This will force destination out of cache for more performance.
|
||||
|
|
|
@ -48,6 +48,7 @@ SYM_FUNC_START(__put_user_1)
|
|||
cmp %_ASM_BX,%_ASM_CX
|
||||
jae .Lbad_put_user
|
||||
SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
ASM_STAC
|
||||
1: movb %al,(%_ASM_CX)
|
||||
xor %ecx,%ecx
|
||||
|
@ -62,6 +63,7 @@ SYM_FUNC_START(__put_user_2)
|
|||
cmp %_ASM_BX,%_ASM_CX
|
||||
jae .Lbad_put_user
|
||||
SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
ASM_STAC
|
||||
2: movw %ax,(%_ASM_CX)
|
||||
xor %ecx,%ecx
|
||||
|
@ -76,6 +78,7 @@ SYM_FUNC_START(__put_user_4)
|
|||
cmp %_ASM_BX,%_ASM_CX
|
||||
jae .Lbad_put_user
|
||||
SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
ASM_STAC
|
||||
3: movl %eax,(%_ASM_CX)
|
||||
xor %ecx,%ecx
|
||||
|
@ -90,6 +93,7 @@ SYM_FUNC_START(__put_user_8)
|
|||
cmp %_ASM_BX,%_ASM_CX
|
||||
jae .Lbad_put_user
|
||||
SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
ASM_STAC
|
||||
4: mov %_ASM_AX,(%_ASM_CX)
|
||||
#ifdef CONFIG_X86_32
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
.align RETPOLINE_THUNK_SIZE
|
||||
SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR
|
||||
|
||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
||||
__stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
|
||||
|
@ -55,7 +56,6 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
|
|||
|
||||
.align RETPOLINE_THUNK_SIZE
|
||||
SYM_CODE_START(__x86_indirect_thunk_array)
|
||||
ANNOTATE_NOENDBR // apply_retpolines
|
||||
|
||||
#define GEN(reg) THUNK reg
|
||||
#include <asm/GEN-for-each-reg.h>
|
||||
|
|
|
@ -638,17 +638,6 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(lookup_address);
|
||||
|
||||
/*
|
||||
* Lookup the page table entry for a virtual address in a given mm. Return a
|
||||
* pointer to the entry and the level of the mapping.
|
||||
*/
|
||||
pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
|
||||
unsigned int *level)
|
||||
{
|
||||
return lookup_address_in_pgd(pgd_offset(mm, address), address, level);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lookup_address_in_mm);
|
||||
|
||||
static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
|
||||
unsigned int *level)
|
||||
{
|
||||
|
|
|
@ -467,7 +467,6 @@ static __init void xen_setup_pci_msi(void)
|
|||
else
|
||||
xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
|
||||
xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
|
||||
pci_msi_ignore_mask = 1;
|
||||
} else if (xen_hvm_domain()) {
|
||||
xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
|
||||
xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
|
||||
|
@ -481,6 +480,11 @@ static __init void xen_setup_pci_msi(void)
|
|||
* in allocating the native domain and never use it.
|
||||
*/
|
||||
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
|
||||
/*
|
||||
* With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
|
||||
* controlled by the hypervisor.
|
||||
*/
|
||||
pci_msi_ignore_mask = 1;
|
||||
}
|
||||
|
||||
#else /* CONFIG_PCI_MSI */
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
|
||||
|
||||
SYM_CODE_START_LOCAL(pvh_start_xen)
|
||||
UNWIND_HINT_EMPTY
|
||||
cld
|
||||
|
||||
lgdt (_pa(gdt))
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <asm/cpu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/microcode.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
__visible unsigned long saved_context_ebx;
|
||||
|
@ -262,11 +263,18 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|||
x86_platform.restore_sched_clock_state();
|
||||
mtrr_bp_restore();
|
||||
perf_restore_debug_store();
|
||||
msr_restore_context(ctxt);
|
||||
|
||||
c = &cpu_data(smp_processor_id());
|
||||
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
|
||||
init_ia32_feat_ctl(c);
|
||||
|
||||
microcode_bsp_resume();
|
||||
|
||||
/*
|
||||
* This needs to happen after the microcode has been updated upon resume
|
||||
* because some of the MSRs are "emulated" in microcode.
|
||||
*/
|
||||
msr_restore_context(ctxt);
|
||||
}
|
||||
|
||||
/* Needed by apm.c */
|
||||
|
|
|
@ -45,6 +45,7 @@ SYM_CODE_END(hypercall_page)
|
|||
__INIT
|
||||
SYM_CODE_START(startup_xen)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR
|
||||
cld
|
||||
|
||||
/* Clear .bss */
|
||||
|
|
|
@ -50,7 +50,6 @@
|
|||
#include "blk-pm.h"
|
||||
#include "blk-cgroup.h"
|
||||
#include "blk-throttle.h"
|
||||
#include "blk-rq-qos.h"
|
||||
|
||||
struct dentry *blk_debugfs_root;
|
||||
|
||||
|
@ -315,9 +314,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
*/
|
||||
blk_freeze_queue(q);
|
||||
|
||||
/* cleanup rq qos structures for queue without disk */
|
||||
rq_qos_exit(q);
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
blk_sync_queue(q);
|
||||
|
|
|
@ -2295,6 +2295,7 @@ static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
|
|||
{
|
||||
int ret = 0;
|
||||
struct binder_sg_copy *sgc, *tmpsgc;
|
||||
struct binder_ptr_fixup *tmppf;
|
||||
struct binder_ptr_fixup *pf =
|
||||
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
|
||||
node);
|
||||
|
@ -2349,7 +2350,11 @@ static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
|
|||
list_del(&sgc->node);
|
||||
kfree(sgc);
|
||||
}
|
||||
BUG_ON(!list_empty(pf_head));
|
||||
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
|
||||
BUG_ON(pf->skip_size == 0);
|
||||
list_del(&pf->node);
|
||||
kfree(pf);
|
||||
}
|
||||
BUG_ON(!list_empty(sgc_head));
|
||||
|
||||
return ret > 0 ? -EINVAL : ret;
|
||||
|
@ -2486,6 +2491,9 @@ static int binder_translate_fd_array(struct list_head *pf_head,
|
|||
struct binder_proc *proc = thread->proc;
|
||||
int ret;
|
||||
|
||||
if (fda->num_fds == 0)
|
||||
return 0;
|
||||
|
||||
fd_buf_size = sizeof(u32) * fda->num_fds;
|
||||
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
|
||||
binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
|
||||
|
|
|
@ -667,6 +667,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
|
|||
core_mask = &cpu_topology[cpu].llc_sibling;
|
||||
}
|
||||
|
||||
/*
|
||||
* For systems with no shared cpu-side LLC but with clusters defined,
|
||||
* extend core_mask to cluster_siblings. The sched domain builder will
|
||||
* then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
|
||||
cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
|
||||
core_mask = &cpu_topology[cpu].cluster_sibling;
|
||||
|
||||
return core_mask;
|
||||
}
|
||||
|
||||
|
@ -684,7 +693,7 @@ void update_siblings_masks(unsigned int cpuid)
|
|||
for_each_online_cpu(cpu) {
|
||||
cpu_topo = &cpu_topology[cpu];
|
||||
|
||||
if (cpuid_topo->llc_id == cpu_topo->llc_id) {
|
||||
if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) {
|
||||
cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
|
||||
cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
|
||||
}
|
||||
|
|
|
@ -152,9 +152,19 @@ static struct attribute *default_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static umode_t topology_is_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int unused)
|
||||
{
|
||||
if (attr == &dev_attr_ppin.attr && !topology_ppin(kobj_to_dev(kobj)->id))
|
||||
return 0;
|
||||
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
static const struct attribute_group topology_attr_group = {
|
||||
.attrs = default_attrs,
|
||||
.bin_attrs = bin_attrs,
|
||||
.is_visible = topology_is_visible,
|
||||
.name = "topology"
|
||||
};
|
||||
|
||||
|
|
|
@ -303,6 +303,7 @@ static struct atari_floppy_struct {
|
|||
int ref;
|
||||
int type;
|
||||
struct blk_mq_tag_set tag_set;
|
||||
int error_count;
|
||||
} unit[FD_MAX_UNITS];
|
||||
|
||||
#define UD unit[drive]
|
||||
|
@ -705,14 +706,14 @@ static void fd_error( void )
|
|||
if (!fd_request)
|
||||
return;
|
||||
|
||||
fd_request->error_count++;
|
||||
if (fd_request->error_count >= MAX_ERRORS) {
|
||||
unit[SelectedDrive].error_count++;
|
||||
if (unit[SelectedDrive].error_count >= MAX_ERRORS) {
|
||||
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
|
||||
fd_end_request_cur(BLK_STS_IOERR);
|
||||
finish_fdc();
|
||||
return;
|
||||
}
|
||||
else if (fd_request->error_count == RECALIBRATE_ERRORS) {
|
||||
else if (unit[SelectedDrive].error_count == RECALIBRATE_ERRORS) {
|
||||
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
|
||||
if (SelectedDrive != -1)
|
||||
SUD.track = -1;
|
||||
|
@ -1491,7 +1492,7 @@ static void setup_req_params( int drive )
|
|||
ReqData = ReqBuffer + 512 * ReqCnt;
|
||||
|
||||
if (UseTrackbuffer)
|
||||
read_track = (ReqCmd == READ && fd_request->error_count == 0);
|
||||
read_track = (ReqCmd == READ && unit[drive].error_count == 0);
|
||||
else
|
||||
read_track = 0;
|
||||
|
||||
|
@ -1520,6 +1521,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
fd_request = bd->rq;
|
||||
unit[drive].error_count = 0;
|
||||
blk_mq_start_request(fd_request);
|
||||
|
||||
atari_disable_irq( IRQ_MFP_FDC );
|
||||
|
|
|
@ -509,8 +509,8 @@ static unsigned long fdc_busy;
|
|||
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(command_done);
|
||||
|
||||
/* Errors during formatting are counted here. */
|
||||
static int format_errors;
|
||||
/* errors encountered on the current (or last) request */
|
||||
static int floppy_errors;
|
||||
|
||||
/* Format request descriptor. */
|
||||
static struct format_descr format_req;
|
||||
|
@ -530,7 +530,6 @@ static struct format_descr format_req;
|
|||
static char *floppy_track_buffer;
|
||||
static int max_buffer_sectors;
|
||||
|
||||
static int *errors;
|
||||
typedef void (*done_f)(int);
|
||||
static const struct cont_t {
|
||||
void (*interrupt)(void);
|
||||
|
@ -1455,7 +1454,7 @@ static int interpret_errors(void)
|
|||
if (drive_params[current_drive].flags & FTD_MSG)
|
||||
DPRINT("Over/Underrun - retrying\n");
|
||||
bad = 0;
|
||||
} else if (*errors >= drive_params[current_drive].max_errors.reporting) {
|
||||
} else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
|
||||
print_errors();
|
||||
}
|
||||
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
|
||||
|
@ -2095,7 +2094,7 @@ static void bad_flp_intr(void)
|
|||
if (!next_valid_format(current_drive))
|
||||
return;
|
||||
}
|
||||
err_count = ++(*errors);
|
||||
err_count = ++floppy_errors;
|
||||
INFBOUND(write_errors[current_drive].badness, err_count);
|
||||
if (err_count > drive_params[current_drive].max_errors.abort)
|
||||
cont->done(0);
|
||||
|
@ -2241,9 +2240,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
|
|||
return -EINVAL;
|
||||
}
|
||||
format_req = *tmp_format_req;
|
||||
format_errors = 0;
|
||||
cont = &format_cont;
|
||||
errors = &format_errors;
|
||||
floppy_errors = 0;
|
||||
ret = wait_til_done(redo_format, true);
|
||||
if (ret == -EINTR)
|
||||
return -EINTR;
|
||||
|
@ -2759,10 +2757,11 @@ static int set_next_request(void)
|
|||
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
|
||||
queuelist);
|
||||
if (current_req) {
|
||||
current_req->error_count = 0;
|
||||
floppy_errors = 0;
|
||||
list_del_init(¤t_req->queuelist);
|
||||
return 1;
|
||||
}
|
||||
return current_req != NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Starts or continues processing request. Will automatically unlock the
|
||||
|
@ -2821,7 +2820,6 @@ do_request:
|
|||
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
|
||||
} else
|
||||
probing = 0;
|
||||
errors = &(current_req->error_count);
|
||||
tmp = make_raw_rw_request();
|
||||
if (tmp < 2) {
|
||||
request_done(tmp);
|
||||
|
|
|
@ -224,8 +224,12 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
msi_lock_descs(dev);
|
||||
if (msi_first_desc(dev, MSI_DESC_ALL))
|
||||
return -EINVAL;
|
||||
error = -EINVAL;
|
||||
msi_unlock_descs(dev);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* NOTE: Calling this function will trigger the invocation of the
|
||||
|
|
|
@ -1060,6 +1060,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev)
|
|||
* the intermediate restore kernel reinitializes MHI device with new
|
||||
* context.
|
||||
*/
|
||||
flush_work(&mhi_pdev->recovery_work);
|
||||
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
|
||||
mhi_power_down(mhi_cntrl, true);
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
|
@ -1085,6 +1086,7 @@ static const struct dev_pm_ops mhi_pci_pm_ops = {
|
|||
.resume = mhi_pci_resume,
|
||||
.freeze = mhi_pci_freeze,
|
||||
.thaw = mhi_pci_restore,
|
||||
.poweroff = mhi_pci_freeze,
|
||||
.restore = mhi_pci_restore,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -3677,8 +3677,11 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
|
|||
void ipmi_unregister_smi(struct ipmi_smi *intf)
|
||||
{
|
||||
struct ipmi_smi_watcher *w;
|
||||
int intf_num = intf->intf_num, index;
|
||||
int intf_num, index;
|
||||
|
||||
if (!intf)
|
||||
return;
|
||||
intf_num = intf->intf_num;
|
||||
mutex_lock(&ipmi_interfaces_mutex);
|
||||
intf->intf_num = -1;
|
||||
intf->in_shutdown = true;
|
||||
|
@ -4518,6 +4521,8 @@ return_unspecified:
|
|||
} else
|
||||
/* The message was sent, start the timer. */
|
||||
intf_start_seq_timer(intf, msg->msgid);
|
||||
requeue = 0;
|
||||
goto out;
|
||||
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
|
||||
|| (msg->rsp[1] != msg->data[1])) {
|
||||
/*
|
||||
|
|
|
@ -2220,10 +2220,7 @@ static void cleanup_one_si(struct smi_info *smi_info)
|
|||
return;
|
||||
|
||||
list_del(&smi_info->link);
|
||||
|
||||
if (smi_info->intf)
|
||||
ipmi_unregister_smi(smi_info->intf);
|
||||
|
||||
ipmi_unregister_smi(smi_info->intf);
|
||||
kfree(smi_info);
|
||||
}
|
||||
|
||||
|
|
|
@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
|
|||
void fw_core_remove_card(struct fw_card *card)
|
||||
{
|
||||
struct fw_card_driver dummy_driver = dummy_driver_template;
|
||||
unsigned long flags;
|
||||
|
||||
card->driver->update_phy_reg(card, 4,
|
||||
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
|
||||
|
@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card)
|
|||
dummy_driver.stop_iso = card->driver->stop_iso;
|
||||
card->driver = &dummy_driver;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
fw_destroy_nodes(card);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
/* Wait for all users, especially device workqueue jobs, to finish. */
|
||||
fw_card_put(card);
|
||||
|
|
|
@ -1500,6 +1500,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
|
|||
{
|
||||
struct outbound_phy_packet_event *e =
|
||||
container_of(packet, struct outbound_phy_packet_event, p);
|
||||
struct client *e_client;
|
||||
|
||||
switch (status) {
|
||||
/* expected: */
|
||||
|
@ -1516,9 +1517,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
|
|||
}
|
||||
e->phy_packet.data[0] = packet->timestamp;
|
||||
|
||||
e_client = e->client;
|
||||
queue_event(e->client, &e->event, &e->phy_packet,
|
||||
sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
|
||||
client_put(e->client);
|
||||
client_put(e_client);
|
||||
}
|
||||
|
||||
static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
|
||||
|
|
|
@ -375,16 +375,13 @@ static void report_found_node(struct fw_card *card,
|
|||
card->bm_retries = 0;
|
||||
}
|
||||
|
||||
/* Must be called with card->lock held */
|
||||
void fw_destroy_nodes(struct fw_card *card)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
card->color++;
|
||||
if (card->local_node != NULL)
|
||||
for_each_fw_node(card, card->local_node, report_lost_node);
|
||||
card->local_node = NULL;
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
}
|
||||
|
||||
static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
|
||||
|
@ -510,6 +507,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
|
|||
struct fw_node *local_node;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
|
||||
/*
|
||||
* If the selfID buffer is not the immediate successor of the
|
||||
* previously processed one, we cannot reliably compare the
|
||||
|
@ -521,8 +520,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
|
|||
card->bm_retries = 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
|
||||
card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
|
||||
card->node_id = node_id;
|
||||
/*
|
||||
|
|
|
@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
|
|||
static int close_transaction(struct fw_transaction *transaction,
|
||||
struct fw_card *card, int rcode)
|
||||
{
|
||||
struct fw_transaction *t;
|
||||
struct fw_transaction *t = NULL, *iter;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
list_for_each_entry(t, &card->transaction_list, link) {
|
||||
if (t == transaction) {
|
||||
if (!try_cancel_split_timeout(t)) {
|
||||
list_for_each_entry(iter, &card->transaction_list, link) {
|
||||
if (iter == transaction) {
|
||||
if (!try_cancel_split_timeout(iter)) {
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
goto timed_out;
|
||||
}
|
||||
list_del_init(&t->link);
|
||||
card->tlabel_mask &= ~(1ULL << t->tlabel);
|
||||
list_del_init(&iter->link);
|
||||
card->tlabel_mask &= ~(1ULL << iter->tlabel);
|
||||
t = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
if (&t->link != &card->transaction_list) {
|
||||
if (t) {
|
||||
t->callback(card, rcode, NULL, 0, t->callback_data);
|
||||
return 0;
|
||||
}
|
||||
|
@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
|
|||
|
||||
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
||||
{
|
||||
struct fw_transaction *t;
|
||||
struct fw_transaction *t = NULL, *iter;
|
||||
unsigned long flags;
|
||||
u32 *data;
|
||||
size_t data_length;
|
||||
|
@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
|||
rcode = HEADER_GET_RCODE(p->header[1]);
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
list_for_each_entry(t, &card->transaction_list, link) {
|
||||
if (t->node_id == source && t->tlabel == tlabel) {
|
||||
if (!try_cancel_split_timeout(t)) {
|
||||
list_for_each_entry(iter, &card->transaction_list, link) {
|
||||
if (iter->node_id == source && iter->tlabel == tlabel) {
|
||||
if (!try_cancel_split_timeout(iter)) {
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
goto timed_out;
|
||||
}
|
||||
list_del_init(&t->link);
|
||||
card->tlabel_mask &= ~(1ULL << t->tlabel);
|
||||
list_del_init(&iter->link);
|
||||
card->tlabel_mask &= ~(1ULL << iter->tlabel);
|
||||
t = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
if (&t->link == &card->transaction_list) {
|
||||
if (!t) {
|
||||
timed_out:
|
||||
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
|
||||
source, tlabel);
|
||||
|
|
|
@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
|
|||
void *payload, size_t length, void *callback_data)
|
||||
{
|
||||
struct sbp2_logical_unit *lu = callback_data;
|
||||
struct sbp2_orb *orb;
|
||||
struct sbp2_orb *orb = NULL, *iter;
|
||||
struct sbp2_status status;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
|
|||
|
||||
/* Lookup the orb corresponding to this status write. */
|
||||
spin_lock_irqsave(&lu->tgt->lock, flags);
|
||||
list_for_each_entry(orb, &lu->orb_list, link) {
|
||||
list_for_each_entry(iter, &lu->orb_list, link) {
|
||||
if (STATUS_GET_ORB_HIGH(status) == 0 &&
|
||||
STATUS_GET_ORB_LOW(status) == orb->request_bus) {
|
||||
orb->rcode = RCODE_COMPLETE;
|
||||
list_del(&orb->link);
|
||||
STATUS_GET_ORB_LOW(status) == iter->request_bus) {
|
||||
iter->rcode = RCODE_COMPLETE;
|
||||
list_del(&iter->link);
|
||||
orb = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&lu->tgt->lock, flags);
|
||||
|
||||
if (&orb->link != &lu->orb_list) {
|
||||
if (orb) {
|
||||
orb->callback(orb, &status);
|
||||
kref_put(&orb->kref, free_orb); /* orb callback reference */
|
||||
} else {
|
||||
|
|
|
@ -871,13 +871,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
|
|||
mvpwm->chip.dev = dev;
|
||||
mvpwm->chip.ops = &mvebu_pwm_ops;
|
||||
mvpwm->chip.npwm = mvchip->chip.ngpio;
|
||||
/*
|
||||
* There may already be some PWM allocated, so we can't force
|
||||
* mvpwm->chip.base to a fixed point like mvchip->chip.base.
|
||||
* So, we let pwmchip_add() do the numbering and take the next free
|
||||
* region.
|
||||
*/
|
||||
mvpwm->chip.base = -1;
|
||||
|
||||
spin_lock_init(&mvpwm->lock);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue