Merge branch 'sched/urgent' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
07f9f22087
|
@ -7,6 +7,7 @@ Required properties:
|
|||
- "ti,ina220" for ina220
|
||||
- "ti,ina226" for ina226
|
||||
- "ti,ina230" for ina230
|
||||
- "ti,ina231" for ina231
|
||||
- reg: I2C address
|
||||
|
||||
Optional properties:
|
||||
|
|
|
@ -44,8 +44,8 @@ Required properties:
|
|||
- our-claim-gpio: The GPIO that we use to claim the bus.
|
||||
- their-claim-gpios: The GPIOs that the other sides use to claim the bus.
|
||||
Note that some implementations may only support a single other master.
|
||||
- Standard I2C mux properties. See mux.txt in this directory.
|
||||
- Single I2C child bus node at reg 0. See mux.txt in this directory.
|
||||
- Standard I2C mux properties. See i2c-mux.txt in this directory.
|
||||
- Single I2C child bus node at reg 0. See i2c-mux.txt in this directory.
|
||||
|
||||
Optional properties:
|
||||
- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.
|
||||
|
|
|
@ -27,7 +27,8 @@ Required properties:
|
|||
- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
|
||||
parents.
|
||||
|
||||
Furthermore, I2C mux properties and child nodes. See mux.txt in this directory.
|
||||
Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this
|
||||
directory.
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ Required properties:
|
|||
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
|
||||
port is connected to.
|
||||
- mux-gpios: list of gpios used to control the muxer
|
||||
* Standard I2C mux properties. See mux.txt in this directory.
|
||||
* I2C child bus nodes. See mux.txt in this directory.
|
||||
* Standard I2C mux properties. See i2c-mux.txt in this directory.
|
||||
* I2C child bus nodes. See i2c-mux.txt in this directory.
|
||||
|
||||
Optional properties:
|
||||
- idle-state: value to set the muxer to when idle. When no value is
|
||||
|
@ -33,7 +33,7 @@ For each i2c child node, an I2C child bus will be created. They will
|
|||
be numbered based on their order in the device tree.
|
||||
|
||||
Whenever an access is made to a device on a child bus, the value set
|
||||
in the revelant node's reg property will be output using the list of
|
||||
in the relevant node's reg property will be output using the list of
|
||||
GPIOs, the first in the list holding the least-significant value.
|
||||
|
||||
If an idle state is defined, using the idle-state (optional) property,
|
||||
|
|
|
@ -28,9 +28,9 @@ Also required are:
|
|||
* Standard pinctrl properties that specify the pin mux state for each child
|
||||
bus. See ../pinctrl/pinctrl-bindings.txt.
|
||||
|
||||
* Standard I2C mux properties. See mux.txt in this directory.
|
||||
* Standard I2C mux properties. See i2c-mux.txt in this directory.
|
||||
|
||||
* I2C child bus nodes. See mux.txt in this directory.
|
||||
* I2C child bus nodes. See i2c-mux.txt in this directory.
|
||||
|
||||
For each named state defined in the pinctrl-names property, an I2C child bus
|
||||
will be created. I2C child bus numbers are assigned based on the index into
|
||||
|
|
|
@ -7,8 +7,8 @@ Required properties:
|
|||
- compatible: i2c-mux-reg
|
||||
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
|
||||
port is connected to.
|
||||
* Standard I2C mux properties. See mux.txt in this directory.
|
||||
* I2C child bus nodes. See mux.txt in this directory.
|
||||
* Standard I2C mux properties. See i2c-mux.txt in this directory.
|
||||
* I2C child bus nodes. See i2c-mux.txt in this directory.
|
||||
|
||||
Optional properties:
|
||||
- reg: this pair of <offset size> specifies the register to control the mux.
|
||||
|
@ -24,7 +24,7 @@ Optional properties:
|
|||
given, it defaults to the last value used.
|
||||
|
||||
Whenever an access is made to a device on a child bus, the value set
|
||||
in the revelant node's reg property will be output to the register.
|
||||
in the relevant node's reg property will be output to the register.
|
||||
|
||||
If an idle state is defined, using the idle-state (optional) property,
|
||||
whenever an access is not being made to a device on a child bus, the
|
||||
|
|
|
@ -13,10 +13,10 @@ Optional properties:
|
|||
initialization. This is an array of 28 values(u8).
|
||||
|
||||
- marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
|
||||
firmware will use the pin to wakeup host system.
|
||||
firmware will use the pin to wakeup host system (u16).
|
||||
- marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
|
||||
platform. The value will be configured to firmware. This
|
||||
is needed to work chip's sleep feature as expected.
|
||||
is needed to work chip's sleep feature as expected (u16).
|
||||
- interrupt-parent: phandle of the parent interrupt controller
|
||||
- interrupts : interrupt pin number to the cpu. Driver will request an irq based
|
||||
on this interrupt number. During system suspend, the irq will be
|
||||
|
@ -50,7 +50,7 @@ calibration data is also available in below example.
|
|||
0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
|
||||
0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
|
||||
0x00 0x00 0xf0 0x00>;
|
||||
marvell,wakeup-pin = <0x0d>;
|
||||
marvell,wakeup-gap-ms = <0x64>;
|
||||
marvell,wakeup-pin = /bits/ 16 <0x0d>;
|
||||
marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -255,6 +255,7 @@ synology Synology, Inc.
|
|||
SUNW Sun Microsystems, Inc
|
||||
tbs TBS Technologies
|
||||
tcl Toby Churchill Ltd.
|
||||
technexion TechNexion
|
||||
technologic Technologic Systems
|
||||
thine THine Electronics, Inc.
|
||||
ti Texas Instruments
|
||||
|
@ -269,6 +270,7 @@ tronsmart Tronsmart
|
|||
truly Truly Semiconductors Limited
|
||||
tyan Tyan Computer Corporation
|
||||
upisemi uPI Semiconductor Corp.
|
||||
uniwest United Western Technologies Corp (UniWest)
|
||||
urt United Radiant Technology Corporation
|
||||
usi Universal Scientific Industrial Co., Ltd.
|
||||
v3 V3 Semiconductor
|
||||
|
|
|
@ -8009,6 +8009,7 @@ Q: http://patchwork.kernel.org/project/linux-wireless/list/
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/wireless/
|
||||
F: drivers/net/wireless/
|
||||
|
||||
NETXEN (1/10) GbE SUPPORT
|
||||
|
@ -8406,10 +8407,9 @@ F: drivers/i2c/busses/i2c-ocores.c
|
|||
OPEN FIRMWARE AND FLATTENED DEVICE TREE
|
||||
M: Rob Herring <robh+dt@kernel.org>
|
||||
M: Frank Rowand <frowand.list@gmail.com>
|
||||
M: Grant Likely <grant.likely@linaro.org>
|
||||
L: devicetree@vger.kernel.org
|
||||
W: http://www.devicetree.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
|
||||
S: Maintained
|
||||
F: drivers/of/
|
||||
F: include/linux/of*.h
|
||||
|
@ -8417,12 +8417,10 @@ F: scripts/dtc/
|
|||
|
||||
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
|
||||
M: Rob Herring <robh+dt@kernel.org>
|
||||
M: Pawel Moll <pawel.moll@arm.com>
|
||||
M: Mark Rutland <mark.rutland@arm.com>
|
||||
M: Ian Campbell <ijc+devicetree@hellion.org.uk>
|
||||
M: Kumar Gala <galak@codeaurora.org>
|
||||
L: devicetree@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
|
||||
Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/
|
||||
F: arch/*/boot/dts/
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 7
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Psychotic Stoned Sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
|
|||
def_bool y
|
||||
|
||||
config ARCH_DISCONTIGMEM_ENABLE
|
||||
def_bool y
|
||||
def_bool n
|
||||
|
||||
config ARCH_FLATMEM_ENABLE
|
||||
def_bool y
|
||||
|
@ -186,9 +186,6 @@ if SMP
|
|||
config ARC_HAS_COH_CACHES
|
||||
def_bool n
|
||||
|
||||
config ARC_HAS_REENTRANT_IRQ_LV2
|
||||
def_bool n
|
||||
|
||||
config ARC_MCIP
|
||||
bool "ARConnect Multicore IP (MCIP) Support "
|
||||
depends on ISA_ARCV2
|
||||
|
@ -366,25 +363,10 @@ config NODES_SHIFT
|
|||
if ISA_ARCOMPACT
|
||||
|
||||
config ARC_COMPACT_IRQ_LEVELS
|
||||
bool "ARCompact IRQ Priorities: High(2)/Low(1)"
|
||||
bool "Setup Timer IRQ as high Priority"
|
||||
default n
|
||||
# Timer HAS to be high priority, for any other high priority config
|
||||
select ARC_IRQ3_LV2
|
||||
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
|
||||
depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
|
||||
|
||||
if ARC_COMPACT_IRQ_LEVELS
|
||||
|
||||
config ARC_IRQ3_LV2
|
||||
bool
|
||||
|
||||
config ARC_IRQ5_LV2
|
||||
bool
|
||||
|
||||
config ARC_IRQ6_LV2
|
||||
bool
|
||||
|
||||
endif #ARC_COMPACT_IRQ_LEVELS
|
||||
depends on !SMP
|
||||
|
||||
config ARC_FPU_SAVE_RESTORE
|
||||
bool "Enable FPU state persistence across context switch"
|
||||
|
@ -407,11 +389,6 @@ config ARC_HAS_LLSC
|
|||
default y
|
||||
depends on !ARC_CANT_LLSC
|
||||
|
||||
config ARC_STAR_9000923308
|
||||
bool "Workaround for llock/scond livelock"
|
||||
default n
|
||||
depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
|
||||
|
||||
config ARC_HAS_SWAPE
|
||||
bool "Insn: SWAPE (endian-swap)"
|
||||
default y
|
||||
|
@ -471,7 +448,7 @@ config LINUX_LINK_BASE
|
|||
|
||||
config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
select DISCONTIGMEM
|
||||
select ARCH_DISCONTIGMEM_ENABLE
|
||||
help
|
||||
With ARC 2G:2G address split, only upper 2G is directly addressable by
|
||||
kernel. Enable this to potentially allow access to rest of 2G and PAE
|
||||
|
|
|
@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC)
|
|||
|
||||
boot := arch/arc/boot
|
||||
|
||||
#default target for make without any arguements.
|
||||
#default target for make without any arguments.
|
||||
KBUILD_IMAGE := bootpImage
|
||||
|
||||
all: $(KBUILD_IMAGE)
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
|
||||
|
||||
/ {
|
||||
clock-frequency = <500000000>; /* 500 MHZ */
|
||||
|
||||
soc100 {
|
||||
bus-frequency = <166666666>;
|
||||
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
|
||||
|
||||
/ {
|
||||
clock-frequency = <500000000>; /* 500 MHZ */
|
||||
|
||||
soc100 {
|
||||
bus-frequency = <166666666>;
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
clock-frequency = <750000000>; /* 750 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
clock-frequency = <90000000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
clock-frequency = <90000000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "ezchip,arc-nps";
|
||||
clock-frequency = <83333333>; /* 83.333333 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
interrupt-parent = <&intc>;
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,nsim";
|
||||
clock-frequency = <80000000>; /* 80 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
interrupt-parent = <&core_intc>;
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,nsimosci";
|
||||
clock-frequency = <20000000>; /* 20 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
interrupt-parent = <&core_intc>;
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,nsimosci_hs";
|
||||
clock-frequency = <20000000>; /* 20 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
interrupt-parent = <&core_intc>;
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,nsimosci_hs";
|
||||
clock-frequency = <5000000>; /* 5 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
interrupt-parent = <&core_intc>;
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
clock-frequency = <80000000>; /* 80 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
chosen { };
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
clock-frequency = <80000000>; /* 80 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
chosen { };
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
clock-frequency = <80000000>; /* 80 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
chosen { };
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
clock-frequency = <50000000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
clock-frequency = <50000000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
|
|
@ -25,50 +25,17 @@
|
|||
|
||||
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
||||
|
||||
#ifdef CONFIG_ARC_STAR_9000923308
|
||||
|
||||
#define SCOND_FAIL_RETRY_VAR_DEF \
|
||||
unsigned int delay = 1, tmp; \
|
||||
|
||||
#define SCOND_FAIL_RETRY_ASM \
|
||||
" bz 4f \n" \
|
||||
" ; --- scond fail delay --- \n" \
|
||||
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
|
||||
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
|
||||
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
|
||||
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
|
||||
" b 1b \n" /* start over */ \
|
||||
"4: ; --- success --- \n" \
|
||||
|
||||
#define SCOND_FAIL_RETRY_VARS \
|
||||
,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
|
||||
|
||||
#else /* !CONFIG_ARC_STAR_9000923308 */
|
||||
|
||||
#define SCOND_FAIL_RETRY_VAR_DEF
|
||||
|
||||
#define SCOND_FAIL_RETRY_ASM \
|
||||
" bnz 1b \n" \
|
||||
|
||||
#define SCOND_FAIL_RETRY_VARS
|
||||
|
||||
#endif
|
||||
|
||||
#define ATOMIC_OP(op, c_op, asm_op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned int val; \
|
||||
SCOND_FAIL_RETRY_VAR_DEF \
|
||||
unsigned int val; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"1: llock %[val], [%[ctr]] \n" \
|
||||
" " #asm_op " %[val], %[val], %[i] \n" \
|
||||
" scond %[val], [%[ctr]] \n" \
|
||||
" \n" \
|
||||
SCOND_FAIL_RETRY_ASM \
|
||||
\
|
||||
" bnz 1b \n" \
|
||||
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
|
||||
SCOND_FAIL_RETRY_VARS \
|
||||
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
|
||||
[i] "ir" (i) \
|
||||
: "cc"); \
|
||||
|
@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
|
|||
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
||||
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned int val; \
|
||||
SCOND_FAIL_RETRY_VAR_DEF \
|
||||
unsigned int val; \
|
||||
\
|
||||
/* \
|
||||
* Explicit full memory barrier needed before/after as \
|
||||
|
@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
"1: llock %[val], [%[ctr]] \n" \
|
||||
" " #asm_op " %[val], %[val], %[i] \n" \
|
||||
" scond %[val], [%[ctr]] \n" \
|
||||
" \n" \
|
||||
SCOND_FAIL_RETRY_ASM \
|
||||
\
|
||||
" bnz 1b \n" \
|
||||
: [val] "=&r" (val) \
|
||||
SCOND_FAIL_RETRY_VARS \
|
||||
: [ctr] "r" (&v->counter), \
|
||||
[i] "ir" (i) \
|
||||
: "cc"); \
|
||||
|
|
|
@ -76,8 +76,8 @@
|
|||
* We need to be a bit more cautious here. What if a kernel bug in
|
||||
* L1 ISR, caused SP to go whaco (some small value which looks like
|
||||
* USER stk) and then we take L2 ISR.
|
||||
* Above brlo alone would treat it as a valid L1-L2 sceanrio
|
||||
* instead of shouting alound
|
||||
* Above brlo alone would treat it as a valid L1-L2 scenario
|
||||
* instead of shouting around
|
||||
* The only feasible way is to make sure this L2 happened in
|
||||
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
|
||||
* L1 ISR before it switches stack
|
||||
|
|
|
@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
|
|||
local_flush_tlb_all();
|
||||
|
||||
/*
|
||||
* Above checke for rollover of 8 bit ASID in 32 bit container.
|
||||
* Above check for rollover of 8 bit ASID in 32 bit container.
|
||||
* If the container itself wrapped around, set it to a non zero
|
||||
* "generation" to distinguish from no context
|
||||
*/
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
* Page Tables are purely for Linux VM's consumption and the bits below are
|
||||
* suited to that (uniqueness). Hence some are not implemented in the TLB and
|
||||
* some have different value in TLB.
|
||||
* e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
|
||||
* e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
|
||||
* seperate PD0 and PD1, which combined forms a translation entry)
|
||||
* while for PTE perspective, they are 8 and 9 respectively
|
||||
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos
|
||||
|
|
|
@ -78,7 +78,7 @@ struct task_struct;
|
|||
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
|
||||
|
||||
/*
|
||||
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
|
||||
* Where about of Task's sp, fp, blink when it was last seen in kernel mode.
|
||||
* Look in process.c for details of kernel stack layout
|
||||
*/
|
||||
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
|
||||
|
|
|
@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
|
|||
* (1) These insn were introduced only in 4.10 release. So for older released
|
||||
* support needed.
|
||||
*
|
||||
* (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
|
||||
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
|
||||
* gaurantted by the platform (not something which core handles).
|
||||
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
|
||||
* disabling for atomicity.
|
||||
|
|
|
@ -20,11 +20,6 @@
|
|||
|
||||
#ifdef CONFIG_ARC_HAS_LLSC
|
||||
|
||||
/*
|
||||
* A normal LLOCK/SCOND based system, w/o need for livelock workaround
|
||||
*/
|
||||
#ifndef CONFIG_ARC_STAR_9000923308
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int val;
|
||||
|
@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|||
smp_mb();
|
||||
}
|
||||
|
||||
#else /* CONFIG_ARC_STAR_9000923308 */
|
||||
|
||||
/*
|
||||
* HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
|
||||
* coherency transactions in the SCU. The exclusive line state keeps rotating
|
||||
* among contenting cores leading to a never ending cycle. So break the cycle
|
||||
* by deferring the retry of failed exclusive access (SCOND). The actual delay
|
||||
* needed is function of number of contending cores as well as the unrelated
|
||||
* coherency traffic from other cores. To keep the code simple, start off with
|
||||
* small delay of 1 which would suffice most cases and in case of contention
|
||||
* double the delay. Eventually the delay is sufficient such that the coherency
|
||||
* pipeline is drained, thus a subsequent exclusive access would succeed.
|
||||
*/
|
||||
|
||||
#define SCOND_FAIL_RETRY_VAR_DEF \
|
||||
unsigned int delay, tmp; \
|
||||
|
||||
#define SCOND_FAIL_RETRY_ASM \
|
||||
" ; --- scond fail delay --- \n" \
|
||||
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
|
||||
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
|
||||
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
|
||||
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
|
||||
" b 1b \n" /* start over */ \
|
||||
" \n" \
|
||||
"4: ; --- done --- \n" \
|
||||
|
||||
#define SCOND_FAIL_RETRY_VARS \
|
||||
,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int val;
|
||||
SCOND_FAIL_RETRY_VAR_DEF;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"0: mov %[delay], 1 \n"
|
||||
"1: llock %[val], [%[slock]] \n"
|
||||
" breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
|
||||
" scond %[LOCKED], [%[slock]] \n" /* acquire */
|
||||
" bz 4f \n" /* done */
|
||||
" \n"
|
||||
SCOND_FAIL_RETRY_ASM
|
||||
|
||||
: [val] "=&r" (val)
|
||||
SCOND_FAIL_RETRY_VARS
|
||||
: [slock] "r" (&(lock->slock)),
|
||||
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
|
||||
: "memory", "cc");
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
/* 1 - lock taken successfully */
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int val, got_it = 0;
|
||||
SCOND_FAIL_RETRY_VAR_DEF;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"0: mov %[delay], 1 \n"
|
||||
"1: llock %[val], [%[slock]] \n"
|
||||
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
|
||||
" scond %[LOCKED], [%[slock]] \n" /* acquire */
|
||||
" bz.d 4f \n"
|
||||
" mov.z %[got_it], 1 \n" /* got it */
|
||||
" \n"
|
||||
SCOND_FAIL_RETRY_ASM
|
||||
|
||||
: [val] "=&r" (val),
|
||||
[got_it] "+&r" (got_it)
|
||||
SCOND_FAIL_RETRY_VARS
|
||||
: [slock] "r" (&(lock->slock)),
|
||||
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
|
||||
: "memory", "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return got_it;
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
smp_mb();
|
||||
|
||||
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
/*
|
||||
* Read-write spinlocks, allowing multiple readers but only one writer.
|
||||
* Unfair locking as Writers could be starved indefinitely by Reader(s)
|
||||
*/
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val;
|
||||
SCOND_FAIL_RETRY_VAR_DEF;
|
||||
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* zero means writer holds the lock exclusively, deny Reader.
|
||||
* Otherwise grant lock to first/subseq reader
|
||||
*
|
||||
* if (rw->counter > 0) {
|
||||
* rw->counter--;
|
||||
* ret = 1;
|
||||
* }
|
||||
*/
|
||||
|
||||
__asm__ __volatile__(
|
||||
"0: mov %[delay], 1 \n"
|
||||
"1: llock %[val], [%[rwlock]] \n"
|
||||
" brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
|
||||
" sub %[val], %[val], 1 \n" /* reader lock */
|
||||
" scond %[val], [%[rwlock]] \n"
|
||||
" bz 4f \n" /* done */
|
||||
" \n"
|
||||
SCOND_FAIL_RETRY_ASM
|
||||
|
||||
: [val] "=&r" (val)
|
||||
SCOND_FAIL_RETRY_VARS
|
||||
: [rwlock] "r" (&(rw->counter)),
|
||||
[WR_LOCKED] "ir" (0)
|
||||
: "memory", "cc");
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
/* 1 - lock taken successfully */
|
||||
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val, got_it = 0;
|
||||
SCOND_FAIL_RETRY_VAR_DEF;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"0: mov %[delay], 1 \n"
|
||||
"1: llock %[val], [%[rwlock]] \n"
|
||||
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
|
||||
" sub %[val], %[val], 1 \n" /* counter-- */
|
||||
" scond %[val], [%[rwlock]] \n"
|
||||
" bz.d 4f \n"
|
||||
" mov.z %[got_it], 1 \n" /* got it */
|
||||
" \n"
|
||||
SCOND_FAIL_RETRY_ASM
|
||||
|
||||
: [val] "=&r" (val),
|
||||
[got_it] "+&r" (got_it)
|
||||
SCOND_FAIL_RETRY_VARS
|
||||
: [rwlock] "r" (&(rw->counter)),
|
||||
[WR_LOCKED] "ir" (0)
|
||||
: "memory", "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return got_it;
|
||||
}
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val;
|
||||
SCOND_FAIL_RETRY_VAR_DEF;
|
||||
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
|
||||
* deny writer. Otherwise if unlocked grant to writer
|
||||
* Hence the claim that Linux rwlocks are unfair to writers.
|
||||
* (can be starved for an indefinite time by readers).
|
||||
*
|
||||
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
|
||||
* rw->counter = 0;
|
||||
* ret = 1;
|
||||
* }
|
||||
*/
|
||||
|
||||
__asm__ __volatile__(
|
||||
"0: mov %[delay], 1 \n"
|
||||
"1: llock %[val], [%[rwlock]] \n"
|
||||
" brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
|
||||
" mov %[val], %[WR_LOCKED] \n"
|
||||
" scond %[val], [%[rwlock]] \n"
|
||||
" bz 4f \n"
|
||||
" \n"
|
||||
SCOND_FAIL_RETRY_ASM
|
||||
|
||||
: [val] "=&r" (val)
|
||||
SCOND_FAIL_RETRY_VARS
|
||||
: [rwlock] "r" (&(rw->counter)),
|
||||
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
|
||||
[WR_LOCKED] "ir" (0)
|
||||
: "memory", "cc");
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
/* 1 - lock taken successfully */
|
||||
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val, got_it = 0;
|
||||
SCOND_FAIL_RETRY_VAR_DEF;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"0: mov %[delay], 1 \n"
|
||||
"1: llock %[val], [%[rwlock]] \n"
|
||||
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
|
||||
" mov %[val], %[WR_LOCKED] \n"
|
||||
" scond %[val], [%[rwlock]] \n"
|
||||
" bz.d 4f \n"
|
||||
" mov.z %[got_it], 1 \n" /* got it */
|
||||
" \n"
|
||||
SCOND_FAIL_RETRY_ASM
|
||||
|
||||
: [val] "=&r" (val),
|
||||
[got_it] "+&r" (got_it)
|
||||
SCOND_FAIL_RETRY_VARS
|
||||
: [rwlock] "r" (&(rw->counter)),
|
||||
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
|
||||
[WR_LOCKED] "ir" (0)
|
||||
: "memory", "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return got_it;
|
||||
}
|
||||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* rw->counter++;
|
||||
*/
|
||||
__asm__ __volatile__(
|
||||
"1: llock %[val], [%[rwlock]] \n"
|
||||
" add %[val], %[val], 1 \n"
|
||||
" scond %[val], [%[rwlock]] \n"
|
||||
" bnz 1b \n"
|
||||
" \n"
|
||||
: [val] "=&r" (val)
|
||||
: [rwlock] "r" (&(rw->counter))
|
||||
: "memory", "cc");
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
|
||||
*/
|
||||
__asm__ __volatile__(
|
||||
"1: llock %[val], [%[rwlock]] \n"
|
||||
" scond %[UNLOCKED], [%[rwlock]]\n"
|
||||
" bnz 1b \n"
|
||||
" \n"
|
||||
: [val] "=&r" (val)
|
||||
: [rwlock] "r" (&(rw->counter)),
|
||||
[UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
|
||||
: "memory", "cc");
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
#undef SCOND_FAIL_RETRY_VAR_DEF
|
||||
#undef SCOND_FAIL_RETRY_ASM
|
||||
#undef SCOND_FAIL_RETRY_VARS
|
||||
|
||||
#endif /* CONFIG_ARC_STAR_9000923308 */
|
||||
|
||||
#else /* !CONFIG_ARC_HAS_LLSC */
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
|
|
|
@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
|
|||
|
||||
/*
|
||||
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
|
||||
* SYSCALL_TRACE is anways seperately/unconditionally tested right after a
|
||||
* SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
|
||||
* syscall, so all that reamins to be tested is _TIF_WORK_MASK
|
||||
*/
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
|
||||
|
||||
/*
|
||||
* Algorthmically, for __user_ok() we want do:
|
||||
* Algorithmically, for __user_ok() we want do:
|
||||
* (start < TASK_SIZE) && (start+len < TASK_SIZE)
|
||||
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or
|
||||
* emitted directly in code.
|
||||
|
|
|
@ -74,7 +74,7 @@
|
|||
__tmp ^ __in; \
|
||||
})
|
||||
|
||||
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
|
||||
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
|
||||
|
||||
#define __arch_swab32(x) \
|
||||
({ \
|
||||
|
|
|
@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1)
|
|||
VECTOR instr_service ; 0x10, Instrn Error (0x2)
|
||||
|
||||
; ******************** Device ISRs **********************
|
||||
#ifdef CONFIG_ARC_IRQ3_LV2
|
||||
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
|
||||
VECTOR handle_interrupt_level2
|
||||
#else
|
||||
VECTOR handle_interrupt_level1
|
||||
#endif
|
||||
|
||||
VECTOR handle_interrupt_level1
|
||||
|
||||
#ifdef CONFIG_ARC_IRQ5_LV2
|
||||
VECTOR handle_interrupt_level2
|
||||
#else
|
||||
VECTOR handle_interrupt_level1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_IRQ6_LV2
|
||||
VECTOR handle_interrupt_level2
|
||||
#else
|
||||
VECTOR handle_interrupt_level1
|
||||
#endif
|
||||
|
||||
.rept 25
|
||||
.rept 28
|
||||
VECTOR handle_interrupt_level1 ; Other devices
|
||||
.endr
|
||||
|
||||
|
|
|
@ -28,10 +28,8 @@ void arc_init_IRQ(void)
|
|||
{
|
||||
int level_mask = 0;
|
||||
|
||||
/* setup any high priority Interrupts (Level2 in ARCompact jargon) */
|
||||
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
|
||||
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
|
||||
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
|
||||
/* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
|
||||
level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
|
||||
|
||||
/*
|
||||
* Write to register, even if no LV2 IRQs configured to reset it
|
||||
|
|
|
@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
|
|||
int64_t delta = new_raw_count - prev_raw_count;
|
||||
|
||||
/*
|
||||
* We don't afaraid of hwc->prev_count changing beneath our feet
|
||||
* We aren't afraid of hwc->prev_count changing beneath our feet
|
||||
* because there's no way for us to re-enter this function anytime.
|
||||
*/
|
||||
local64_set(&hwc->prev_count, new_raw_count);
|
||||
|
|
|
@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
/*
|
||||
* If we are here, it is established that @uboot_arg didn't
|
||||
* point to DT blob. Instead if u-boot says it is cmdline,
|
||||
* Appent to embedded DT cmdline.
|
||||
* append to embedded DT cmdline.
|
||||
* setup_machine_fdt() would have populated @boot_command_line
|
||||
*/
|
||||
if (uboot_tag == 1) {
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
* -ViXS were still seeing crashes when using insmod to load drivers.
|
||||
* It turned out that the code to change Execute permssions for TLB entries
|
||||
* of user was not guarded for interrupts (mod_tlb_permission)
|
||||
* This was cauing TLB entries to be overwritten on unrelated indexes
|
||||
* This was causing TLB entries to be overwritten on unrelated indexes
|
||||
*
|
||||
* Vineetg: July 15th 2008: Bug #94183
|
||||
* -Exception happens in Delay slot of a JMP, and before user space resumes,
|
||||
|
|
|
@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* called on user read(): display the couters */
|
||||
/* called on user read(): display the counters */
|
||||
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
|
||||
char __user *user_buf, /* user buffer */
|
||||
size_t len, /* length of buffer */
|
||||
|
|
|
@ -215,7 +215,7 @@ slc_chk:
|
|||
* ------------------
|
||||
* This ver of MMU supports variable page sizes (1k-16k): although Linux will
|
||||
* only support 8k (default), 16k and 4k.
|
||||
* However from hardware perspective, smaller page sizes aggrevate aliasing
|
||||
* However from hardware perspective, smaller page sizes aggravate aliasing
|
||||
* meaning more vaddr bits needed to disambiguate the cache-line-op ;
|
||||
* the existing scheme of piggybacking won't work for certain configurations.
|
||||
* Two new registers IC_PTAG and DC_PTAG inttoduced.
|
||||
|
@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
|
|||
|
||||
/*
|
||||
* This is technically for MMU v4, using the MMU v3 programming model
|
||||
* Special work for HS38 aliasing I-cache configuratino with PAE40
|
||||
* Special work for HS38 aliasing I-cache configuration with PAE40
|
||||
* - upper 8 bits of paddr need to be written into PTAG_HI
|
||||
* - (and needs to be written before the lower 32 bits)
|
||||
* Note that PTAG_HI is hoisted outside the line loop
|
||||
|
@ -936,7 +936,7 @@ void arc_cache_init(void)
|
|||
ic->ver, CONFIG_ARC_MMU_VER);
|
||||
|
||||
/*
|
||||
* In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
|
||||
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
|
||||
* pair to provide vaddr/paddr respectively, just as in MMU v3
|
||||
*/
|
||||
if (is_isa_arcv2() && ic->alias)
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
* DMA Coherent API Notes
|
||||
*
|
||||
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
|
||||
* implemented by accessintg it using a kernel virtual address, with
|
||||
* implemented by accessing it using a kernel virtual address, with
|
||||
* Cache bit off in the TLB entry.
|
||||
*
|
||||
* The default DMA address == Phy address which is 0x8000_0000 based.
|
||||
|
|
|
@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
* PTE_RDONLY is cleared by default in the asm below, so set it in
|
||||
* back if necessary (read-only or clean PTE).
|
||||
*/
|
||||
if (!pte_write(entry) || !dirty)
|
||||
if (!pte_write(entry) || !pte_sw_dirty(entry))
|
||||
pte_val(entry) |= PTE_RDONLY;
|
||||
|
||||
/*
|
||||
|
|
|
@ -172,7 +172,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
|||
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
pte_fragment_fre((unsigned long *)pte, 1);
|
||||
pte_fragment_free((unsigned long *)pte, 1);
|
||||
}
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
||||
|
|
|
@ -719,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
|
|||
* must match by the macro below. Update the definition if
|
||||
* the structure layout changes.
|
||||
*/
|
||||
#define IBM_ARCH_VEC_NRCORES_OFFSET 125
|
||||
#define IBM_ARCH_VEC_NRCORES_OFFSET 133
|
||||
W(NR_CPUS), /* number of cores supported */
|
||||
0,
|
||||
0,
|
||||
|
|
|
@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
|
|||
|
||||
#else
|
||||
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
||||
offsetof(struct thread_fp_state, fpr[32][0]));
|
||||
offsetof(struct thread_fp_state, fpr[32]));
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fp_state, 0, -1);
|
||||
|
@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
|||
return 0;
|
||||
#else
|
||||
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
||||
offsetof(struct thread_fp_state, fpr[32][0]));
|
||||
offsetof(struct thread_fp_state, fpr[32]));
|
||||
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fp_state, 0, -1);
|
||||
|
|
|
@ -550,7 +550,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
|
|||
}
|
||||
}
|
||||
/* This works for all page sizes, and for 256M and 1T segments */
|
||||
*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
*ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
|
||||
else
|
||||
*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
|
||||
|
||||
shift = mmu_psize_defs[size].shift;
|
||||
|
||||
avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
|
||||
|
|
|
@ -117,7 +117,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
|
|||
*/
|
||||
void radix__local_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
unsigned int pid;
|
||||
unsigned long pid;
|
||||
|
||||
preempt_disable();
|
||||
pid = mm->context.id;
|
||||
|
@ -130,7 +130,7 @@ EXPORT_SYMBOL(radix__local_flush_tlb_mm);
|
|||
void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
||||
unsigned long ap, int nid)
|
||||
{
|
||||
unsigned int pid;
|
||||
unsigned long pid;
|
||||
|
||||
preempt_disable();
|
||||
pid = mm ? mm->context.id : 0;
|
||||
|
@ -160,7 +160,7 @@ static int mm_is_core_local(struct mm_struct *mm)
|
|||
|
||||
void radix__flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
unsigned int pid;
|
||||
unsigned long pid;
|
||||
|
||||
preempt_disable();
|
||||
pid = mm->context.id;
|
||||
|
@ -185,7 +185,7 @@ EXPORT_SYMBOL(radix__flush_tlb_mm);
|
|||
void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
||||
unsigned long ap, int nid)
|
||||
{
|
||||
unsigned int pid;
|
||||
unsigned long pid;
|
||||
|
||||
preempt_disable();
|
||||
pid = mm ? mm->context.id : 0;
|
||||
|
|
|
@ -927,7 +927,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
|||
dn = pci_device_to_OF_node(dev);
|
||||
pdn = PCI_DN(dn);
|
||||
buid = pdn->phb->buid;
|
||||
cfg_addr = (pdn->busno << 8) | pdn->devfn;
|
||||
cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
|
||||
|
||||
ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
|
||||
cfg_addr, BUID_HI(buid), BUID_LO(buid));
|
||||
|
@ -956,7 +956,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
|||
dn = pci_device_to_OF_node(dev);
|
||||
pdn = PCI_DN(dn);
|
||||
buid = pdn->phb->buid;
|
||||
cfg_addr = (pdn->busno << 8) | pdn->devfn;
|
||||
cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
|
||||
|
||||
do {
|
||||
/* extra outputs are LIOBN and dma-addr (hi, lo) */
|
||||
|
|
|
@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
|
|||
for i in lib lib64 share end ; do \
|
||||
if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
|
||||
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
|
||||
if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
|
||||
cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
|
||||
fi ; \
|
||||
break ; \
|
||||
fi ; \
|
||||
if [ $$i = end ] ; then exit 1 ; fi ; \
|
||||
|
|
|
@ -714,7 +714,7 @@ static void cleanup_rapl_pmus(void)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < rapl_pmus->maxpkg; i++)
|
||||
kfree(rapl_pmus->pmus + i);
|
||||
kfree(rapl_pmus->pmus[i]);
|
||||
kfree(rapl_pmus);
|
||||
}
|
||||
|
||||
|
|
|
@ -2868,27 +2868,10 @@ static struct intel_uncore_type bdx_uncore_cbox = {
|
|||
.format_group = &hswep_uncore_cbox_format_group,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type bdx_uncore_sbox = {
|
||||
.name = "sbox",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 4,
|
||||
.perf_ctr_bits = 48,
|
||||
.event_ctl = HSWEP_S0_MSR_PMON_CTL0,
|
||||
.perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
|
||||
.event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
|
||||
.box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
|
||||
.msr_offset = HSWEP_SBOX_MSR_OFFSET,
|
||||
.ops = &hswep_uncore_sbox_msr_ops,
|
||||
.format_group = &hswep_uncore_sbox_format_group,
|
||||
};
|
||||
|
||||
#define BDX_MSR_UNCORE_SBOX 3
|
||||
|
||||
static struct intel_uncore_type *bdx_msr_uncores[] = {
|
||||
&bdx_uncore_ubox,
|
||||
&bdx_uncore_cbox,
|
||||
&hswep_uncore_pcu,
|
||||
&bdx_uncore_sbox,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -2897,10 +2880,6 @@ void bdx_uncore_cpu_init(void)
|
|||
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
uncore_msr_uncores = bdx_msr_uncores;
|
||||
|
||||
/* BDX-DE doesn't have SBOX */
|
||||
if (boot_cpu_data.x86_model == 86)
|
||||
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
|
||||
}
|
||||
|
||||
static struct intel_uncore_type bdx_uncore_ha = {
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
#ifndef _ASM_X86_INTEL_FAMILY_H
|
||||
#define _ASM_X86_INTEL_FAMILY_H
|
||||
|
||||
/*
|
||||
* "Big Core" Processors (Branded as Core, Xeon, etc...)
|
||||
*
|
||||
* The "_X" parts are generally the EP and EX Xeons, or the
|
||||
* "Extreme" ones, like Broadwell-E.
|
||||
*
|
||||
* Things ending in "2" are usually because we have no better
|
||||
* name for them. There's no processor called "WESTMERE2".
|
||||
*/
|
||||
|
||||
#define INTEL_FAM6_CORE_YONAH 0x0E
|
||||
#define INTEL_FAM6_CORE2_MEROM 0x0F
|
||||
#define INTEL_FAM6_CORE2_MEROM_L 0x16
|
||||
#define INTEL_FAM6_CORE2_PENRYN 0x17
|
||||
#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
|
||||
|
||||
#define INTEL_FAM6_NEHALEM 0x1E
|
||||
#define INTEL_FAM6_NEHALEM_EP 0x1A
|
||||
#define INTEL_FAM6_NEHALEM_EX 0x2E
|
||||
#define INTEL_FAM6_WESTMERE 0x25
|
||||
#define INTEL_FAM6_WESTMERE2 0x1F
|
||||
#define INTEL_FAM6_WESTMERE_EP 0x2C
|
||||
#define INTEL_FAM6_WESTMERE_EX 0x2F
|
||||
|
||||
#define INTEL_FAM6_SANDYBRIDGE 0x2A
|
||||
#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
|
||||
#define INTEL_FAM6_IVYBRIDGE 0x3A
|
||||
#define INTEL_FAM6_IVYBRIDGE_X 0x3E
|
||||
|
||||
#define INTEL_FAM6_HASWELL_CORE 0x3C
|
||||
#define INTEL_FAM6_HASWELL_X 0x3F
|
||||
#define INTEL_FAM6_HASWELL_ULT 0x45
|
||||
#define INTEL_FAM6_HASWELL_GT3E 0x46
|
||||
|
||||
#define INTEL_FAM6_BROADWELL_CORE 0x3D
|
||||
#define INTEL_FAM6_BROADWELL_XEON_D 0x56
|
||||
#define INTEL_FAM6_BROADWELL_GT3E 0x47
|
||||
#define INTEL_FAM6_BROADWELL_X 0x4F
|
||||
|
||||
#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
|
||||
#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
|
||||
#define INTEL_FAM6_SKYLAKE_X 0x55
|
||||
#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
|
||||
#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
|
||||
#define INTEL_FAM6_ATOM_LINCROFT 0x26
|
||||
#define INTEL_FAM6_ATOM_PENWELL 0x27
|
||||
#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
|
||||
#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
|
||||
#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
|
||||
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
|
||||
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
|
||||
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
|
||||
|
||||
/* Xeon Phi */
|
||||
|
||||
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
|
||||
|
||||
#endif /* _ASM_X86_INTEL_FAMILY_H */
|
|
@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr,
|
|||
"2:\n"
|
||||
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
|
||||
: : "c" (msr), "a"(low), "d" (high) : "memory");
|
||||
if (msr_tracepoint_active(__tracepoint_read_msr))
|
||||
if (msr_tracepoint_active(__tracepoint_write_msr))
|
||||
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
|
|||
: "c" (msr), "0" (low), "d" (high),
|
||||
[fault] "i" (-EIO)
|
||||
: "memory");
|
||||
if (msr_tracepoint_active(__tracepoint_read_msr))
|
||||
if (msr_tracepoint_active(__tracepoint_write_msr))
|
||||
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
|
|||
res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
|
||||
mem += IOAPIC_RESOURCE_NAME_SIZE;
|
||||
ioapics[i].iomem_res = &res[num];
|
||||
num++;
|
||||
ioapics[i].iomem_res = res;
|
||||
}
|
||||
|
||||
ioapic_resources = res;
|
||||
|
|
|
@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
|||
u64 value;
|
||||
|
||||
/* re-enable TopologyExtensions if switched off by BIOS */
|
||||
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
|
||||
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
|
||||
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
|
||||
|
||||
if (msr_set_bit(0xc0011005, 54) > 0) {
|
||||
rdmsrl(0xc0011005, value);
|
||||
if (value & BIT_64(54)) {
|
||||
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
|
||||
pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
|
||||
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
|
|||
local_irq_disable();
|
||||
}
|
||||
|
||||
/*
|
||||
* In IST context, we explicitly disable preemption. This serves two
|
||||
* purposes: it makes it much less likely that we would accidentally
|
||||
* schedule in IST context and it will force a warning if we somehow
|
||||
* manage to schedule by accident.
|
||||
*/
|
||||
void ist_enter(struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs)) {
|
||||
|
@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
|
|||
rcu_nmi_enter();
|
||||
}
|
||||
|
||||
/*
|
||||
* We are atomic because we're on the IST stack; or we're on
|
||||
* x86_32, in which case we still shouldn't schedule; or we're
|
||||
* on x86_64 and entered from user mode, in which case we're
|
||||
* still atomic unless ist_begin_non_atomic is called.
|
||||
*/
|
||||
preempt_count_add(HARDIRQ_OFFSET);
|
||||
preempt_disable();
|
||||
|
||||
/* This code is a bit fragile. Test it. */
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
|
||||
|
@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
|
|||
|
||||
void ist_exit(struct pt_regs *regs)
|
||||
{
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
if (!user_mode(regs))
|
||||
rcu_nmi_exit();
|
||||
|
@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
|
|||
BUG_ON((unsigned long)(current_top_of_stack() -
|
||||
current_stack_pointer()) >= THREAD_SIZE);
|
||||
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
|
|||
*/
|
||||
void ist_end_non_atomic(void)
|
||||
{
|
||||
preempt_count_add(HARDIRQ_OFFSET);
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
static nokprobe_inline int
|
||||
|
|
|
@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|||
ret = submit_bio_wait(type, bio);
|
||||
if (ret == -EOPNOTSUPP)
|
||||
ret = 0;
|
||||
bio_put(bio);
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
|
@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
|||
}
|
||||
}
|
||||
|
||||
if (bio)
|
||||
if (bio) {
|
||||
ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
|
||||
bio_put(bio);
|
||||
}
|
||||
return ret != -EOPNOTSUPP ? ret : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_write_same);
|
||||
|
@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
|||
}
|
||||
}
|
||||
|
||||
if (bio)
|
||||
return submit_bio_wait(WRITE, bio);
|
||||
if (bio) {
|
||||
ret = submit_bio_wait(WRITE, bio);
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q)) {
|
||||
if (blk_attempt_plug_merge(q, bio, &request_count,
|
||||
&same_queue_rq))
|
||||
return BLK_QC_T_NONE;
|
||||
} else
|
||||
request_count = blk_plug_queued_count(q);
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
||||
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
rq = blk_mq_map_request(q, bio, &data);
|
||||
if (unlikely(!rq))
|
||||
|
@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
||||
blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
||||
return BLK_QC_T_NONE;
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q)) {
|
||||
if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
||||
return BLK_QC_T_NONE;
|
||||
} else
|
||||
request_count = blk_plug_queued_count(q);
|
||||
|
||||
rq = blk_mq_map_request(q, bio, &data);
|
||||
if (unlikely(!rq))
|
||||
|
|
|
@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void)
|
|||
* Maybe EC region is required at bus_scan/acpi_get_devices. So it
|
||||
* is necessary to enable it as early as possible.
|
||||
*/
|
||||
acpi_boot_ec_enable();
|
||||
acpi_ec_dsdt_probe();
|
||||
|
||||
printk(KERN_INFO PREFIX "Interpreter enabled\n");
|
||||
|
||||
|
|
|
@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
int __init acpi_boot_ec_enable(void)
|
||||
static const struct acpi_device_id ec_device_ids[] = {
|
||||
{"PNP0C09", 0},
|
||||
{"", 0},
|
||||
};
|
||||
|
||||
int __init acpi_ec_dsdt_probe(void)
|
||||
{
|
||||
if (!boot_ec)
|
||||
acpi_status status;
|
||||
|
||||
if (boot_ec)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Finding EC from DSDT if there is no ECDT EC available. When this
|
||||
* function is invoked, ACPI tables have been fully loaded, we can
|
||||
* walk namespace now.
|
||||
*/
|
||||
boot_ec = make_acpi_ec();
|
||||
if (!boot_ec)
|
||||
return -ENOMEM;
|
||||
status = acpi_get_devices(ec_device_ids[0].id,
|
||||
ec_parse_device, boot_ec, NULL);
|
||||
if (ACPI_FAILURE(status) || !boot_ec->handle)
|
||||
return -ENODEV;
|
||||
if (!ec_install_handlers(boot_ec)) {
|
||||
first_ec = boot_ec;
|
||||
return 0;
|
||||
|
@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void)
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
static const struct acpi_device_id ec_device_ids[] = {
|
||||
{"PNP0C09", 0},
|
||||
{"", 0},
|
||||
};
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
|
||||
|
|
|
@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data);
|
|||
|
||||
int acpi_ec_init(void);
|
||||
int acpi_ec_ecdt_probe(void);
|
||||
int acpi_boot_ec_enable(void);
|
||||
int acpi_ec_dsdt_probe(void);
|
||||
void acpi_ec_block_transactions(void);
|
||||
void acpi_ec_unblock_transactions(void);
|
||||
void acpi_ec_unblock_transactions_early(void);
|
||||
|
|
|
@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
|
|||
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
|
||||
debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
|
||||
debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
|
||||
debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
|
||||
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
const struct blk_mq_queue_data *qd)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
|
||||
int qid = hctx->queue_num;
|
||||
struct blkfront_info *info = hctx->queue->queuedata;
|
||||
struct blkfront_ring_info *rinfo = NULL;
|
||||
|
||||
BUG_ON(info->nr_rings <= qid);
|
||||
rinfo = &info->rinfo[qid];
|
||||
blk_mq_start_request(qd->rq);
|
||||
spin_lock_irqsave(&rinfo->ring_lock, flags);
|
||||
if (RING_FULL(&rinfo->ring))
|
||||
|
@ -901,20 +905,9 @@ out_busy:
|
|||
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||
}
|
||||
|
||||
static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
||||
unsigned int index)
|
||||
{
|
||||
struct blkfront_info *info = (struct blkfront_info *)data;
|
||||
|
||||
BUG_ON(info->nr_rings <= index);
|
||||
hctx->driver_data = &info->rinfo[index];
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct blk_mq_ops blkfront_mq_ops = {
|
||||
.queue_rq = blkif_queue_rq,
|
||||
.map_queue = blk_mq_map_queue,
|
||||
.init_hctx = blk_mq_init_hctx,
|
||||
};
|
||||
|
||||
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
|
||||
|
@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
|
|||
return PTR_ERR(rq);
|
||||
}
|
||||
|
||||
rq->queuedata = info;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
|
||||
|
||||
if (info->feature_discard) {
|
||||
|
@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev)
|
|||
return err;
|
||||
|
||||
err = talk_to_blkback(dev, info);
|
||||
if (!err)
|
||||
blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
|
||||
|
||||
/*
|
||||
* We have to wait for the backend to switch to
|
||||
|
@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev,
|
|||
break;
|
||||
|
||||
case XenbusStateConnected:
|
||||
if (dev->state != XenbusStateInitialised) {
|
||||
/*
|
||||
* talk_to_blkback sets state to XenbusStateInitialised
|
||||
* and blkfront_connect sets it to XenbusStateConnected
|
||||
* (if connection went OK).
|
||||
*
|
||||
* If the backend (or toolstack) decides to poke at backend
|
||||
* state (and re-trigger the watch by setting the state repeatedly
|
||||
* to XenbusStateConnected (4)) we need to deal with this.
|
||||
* This is allowed as this is used to communicate to the guest
|
||||
* that the size of disk has changed!
|
||||
*/
|
||||
if ((dev->state != XenbusStateInitialised) &&
|
||||
(dev->state != XenbusStateConnected)) {
|
||||
if (talk_to_blkback(dev, info))
|
||||
break;
|
||||
}
|
||||
|
||||
blkfront_connect(info);
|
||||
break;
|
||||
|
||||
|
|
|
@ -1460,6 +1460,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
|
||||
intel_pstate_clear_update_util_hook(policy->cpu);
|
||||
|
||||
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
|
||||
policy->cpuinfo.max_freq, policy->max);
|
||||
|
||||
cpu = all_cpu_data[0];
|
||||
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
|
||||
policy->max < policy->cpuinfo.max_freq &&
|
||||
|
@ -1495,13 +1498,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
limits->max_sysfs_pct);
|
||||
limits->max_perf_pct = max(limits->min_policy_pct,
|
||||
limits->max_perf_pct);
|
||||
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
|
||||
|
||||
/* Make sure min_perf_pct <= max_perf_pct */
|
||||
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
|
||||
|
||||
limits->min_perf = div_fp(limits->min_perf_pct, 100);
|
||||
limits->max_perf = div_fp(limits->max_perf_pct, 100);
|
||||
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
|
||||
|
||||
out:
|
||||
intel_pstate_set_update_util_hook(policy->cpu);
|
||||
|
@ -1558,8 +1561,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
|||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
||||
policy->cpuinfo.max_freq =
|
||||
cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
||||
update_turbo_state();
|
||||
policy->cpuinfo.max_freq = limits->turbo_disabled ?
|
||||
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
|
||||
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
|
||||
|
||||
intel_pstate_init_acpi_perf_limits(policy);
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
cpumask_set_cpu(policy->cpu, policy->cpus);
|
||||
|
|
|
@ -174,6 +174,7 @@ static __init void reserve_regions(void)
|
|||
{
|
||||
efi_memory_desc_t *md;
|
||||
u64 paddr, npages, size;
|
||||
int resv;
|
||||
|
||||
if (efi_enabled(EFI_DBG))
|
||||
pr_info("Processing EFI memory map:\n");
|
||||
|
@ -190,12 +191,14 @@ static __init void reserve_regions(void)
|
|||
paddr = md->phys_addr;
|
||||
npages = md->num_pages;
|
||||
|
||||
resv = is_reserve_region(md);
|
||||
if (efi_enabled(EFI_DBG)) {
|
||||
char buf[64];
|
||||
|
||||
pr_info(" 0x%012llx-0x%012llx %s",
|
||||
pr_info(" 0x%012llx-0x%012llx %s%s\n",
|
||||
paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
|
||||
efi_md_typeattr_format(buf, sizeof(buf), md));
|
||||
efi_md_typeattr_format(buf, sizeof(buf), md),
|
||||
resv ? "*" : "");
|
||||
}
|
||||
|
||||
memrange_efi_to_native(&paddr, &npages);
|
||||
|
@ -204,14 +207,9 @@ static __init void reserve_regions(void)
|
|||
if (is_normal_ram(md))
|
||||
early_init_dt_add_memory_arch(paddr, size);
|
||||
|
||||
if (is_reserve_region(md)) {
|
||||
if (resv)
|
||||
memblock_mark_nomap(paddr, size);
|
||||
if (efi_enabled(EFI_DBG))
|
||||
pr_cont("*");
|
||||
}
|
||||
|
||||
if (efi_enabled(EFI_DBG))
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
set_bit(EFI_MEMMAP, &efi.flags);
|
||||
|
|
|
@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB
|
|||
|
||||
menuconfig GPIOLIB
|
||||
bool "GPIO Support"
|
||||
select ANON_INODES
|
||||
help
|
||||
This enables GPIO support through the generic GPIO library.
|
||||
You only need to enable this, if you also want to enable
|
||||
|
|
|
@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
|
|||
{
|
||||
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
|
||||
const unsigned io_port = offset / 8;
|
||||
const unsigned control_port = io_port / 2;
|
||||
const unsigned int control_port = io_port / 3;
|
||||
const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
|
||||
unsigned long flags;
|
||||
unsigned control;
|
||||
|
@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
|
|||
{
|
||||
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
|
||||
const unsigned io_port = offset / 8;
|
||||
const unsigned control_port = io_port / 2;
|
||||
const unsigned int control_port = io_port / 3;
|
||||
const unsigned mask = BIT(offset % 8);
|
||||
const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
|
||||
const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
|
||||
|
|
|
@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
|
|||
/* disable interrupts and clear status */
|
||||
for (i = 0; i < kona_gpio->num_bank; i++) {
|
||||
/* Unlock the entire bank first */
|
||||
bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
|
||||
bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
|
||||
writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
|
||||
writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
|
||||
/* Now re-lock the bank */
|
||||
bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
|
||||
bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "input clock not found.\n");
|
||||
return PTR_ERR(gpio->clk);
|
||||
}
|
||||
ret = clk_prepare_enable(gpio->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Unable to enable clock.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
if (ret < 0)
|
||||
|
@ -747,6 +753,7 @@ err_pm_put:
|
|||
pm_runtime_put(&pdev->dev);
|
||||
err_pm_dis:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
clk_disable_unprepare(gpio->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/io-mapping.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
|
|
|
@ -449,7 +449,6 @@ static void gpiodevice_release(struct device *dev)
|
|||
{
|
||||
struct gpio_device *gdev = dev_get_drvdata(dev);
|
||||
|
||||
cdev_del(&gdev->chrdev);
|
||||
list_del(&gdev->list);
|
||||
ida_simple_remove(&gpio_ida, gdev->id);
|
||||
kfree(gdev->label);
|
||||
|
@ -482,7 +481,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
|
|||
|
||||
/* From this point, the .release() function cleans up gpio_device */
|
||||
gdev->dev.release = gpiodevice_release;
|
||||
get_device(&gdev->dev);
|
||||
pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
|
||||
__func__, gdev->base, gdev->base + gdev->ngpio - 1,
|
||||
dev_name(&gdev->dev), gdev->chip->label ? : "generic");
|
||||
|
@ -770,6 +768,8 @@ void gpiochip_remove(struct gpio_chip *chip)
|
|||
* be removed, else it will be dangling until the last user is
|
||||
* gone.
|
||||
*/
|
||||
cdev_del(&gdev->chrdev);
|
||||
device_del(&gdev->dev);
|
||||
put_device(&gdev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiochip_remove);
|
||||
|
@ -869,7 +869,7 @@ struct gpio_chip *gpiochip_find(void *data,
|
|||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
list_for_each_entry(gdev, &gpio_devices, list)
|
||||
if (match(gdev->chip, data))
|
||||
if (gdev->chip && match(gdev->chip, data))
|
||||
break;
|
||||
|
||||
/* No match? */
|
||||
|
|
|
@ -799,6 +799,7 @@ struct amdgpu_ring {
|
|||
unsigned cond_exe_offs;
|
||||
u64 cond_exe_gpu_addr;
|
||||
volatile u32 *cond_exe_cpu_addr;
|
||||
int vmid;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -936,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
|||
unsigned vm_id, uint64_t pd_addr,
|
||||
uint32_t gds_base, uint32_t gds_size,
|
||||
uint32_t gws_base, uint32_t gws_size,
|
||||
uint32_t oa_base, uint32_t oa_size);
|
||||
uint32_t oa_base, uint32_t oa_size,
|
||||
bool vmid_switch);
|
||||
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
||||
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
|
||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
|
|
|
@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
|
|||
return result;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
|
||||
release_firmware(adev->pm.fw);
|
||||
return 0;
|
||||
}
|
||||
/* cannot release other firmware because they are not created by cgs */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
enum cgs_ucode_id type,
|
||||
struct cgs_firmware_info *info)
|
||||
|
@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
|
|||
amdgpu_cgs_pm_query_clock_limits,
|
||||
amdgpu_cgs_set_camera_voltages,
|
||||
amdgpu_cgs_get_firmware_info,
|
||||
amdgpu_cgs_rel_firmware,
|
||||
amdgpu_cgs_set_powergating_state,
|
||||
amdgpu_cgs_set_clockgating_state,
|
||||
amdgpu_cgs_get_active_displays_info,
|
||||
|
|
|
@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
|
|||
*/
|
||||
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mode_info.atom_context)
|
||||
if (adev->mode_info.atom_context) {
|
||||
kfree(adev->mode_info.atom_context->scratch);
|
||||
kfree(adev->mode_info.atom_context->iio);
|
||||
}
|
||||
kfree(adev->mode_info.atom_context);
|
||||
adev->mode_info.atom_context = NULL;
|
||||
kfree(adev->mode_info.atom_card_info);
|
||||
|
@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
|||
adev->ip_block_status[i].valid = false;
|
||||
}
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (adev->ip_blocks[i].funcs->late_fini)
|
||||
adev->ip_blocks[i].funcs->late_fini((void *)adev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
amdgpu_atombios_has_gpu_virtualization_table(adev);
|
||||
|
||||
/* Post card if necessary */
|
||||
if (!amdgpu_card_posted(adev) ||
|
||||
adev->virtualization.supports_sr_iov) {
|
||||
if (!amdgpu_card_posted(adev)) {
|
||||
if (!adev->bios) {
|
||||
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
bool skip_preamble, need_ctx_switch;
|
||||
unsigned patch_offset = ~0;
|
||||
struct amdgpu_vm *vm;
|
||||
int vmid = 0, old_vmid = ring->vmid;
|
||||
struct fence *hwf;
|
||||
uint64_t ctx;
|
||||
|
||||
|
@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
if (job) {
|
||||
vm = job->vm;
|
||||
ctx = job->ctx;
|
||||
vmid = job->vm_id;
|
||||
} else {
|
||||
vm = NULL;
|
||||
ctx = 0;
|
||||
vmid = 0;
|
||||
}
|
||||
|
||||
if (!ring->ready) {
|
||||
|
@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
|
||||
job->gds_base, job->gds_size,
|
||||
job->gws_base, job->gws_size,
|
||||
job->oa_base, job->oa_size);
|
||||
job->oa_base, job->oa_size,
|
||||
(ring->current_ctx == ctx) && (old_vmid != vmid));
|
||||
if (r) {
|
||||
amdgpu_ring_undo(ring);
|
||||
return r;
|
||||
|
@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
need_ctx_switch = ring->current_ctx != ctx;
|
||||
for (i = 0; i < num_ibs; ++i) {
|
||||
ib = &ibs[i];
|
||||
|
||||
/* drop preamble IBs if we don't have a context switch */
|
||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
|
||||
continue;
|
||||
|
@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
|
||||
need_ctx_switch);
|
||||
need_ctx_switch = false;
|
||||
ring->vmid = vmid;
|
||||
}
|
||||
|
||||
if (ring->funcs->emit_hdp_invalidate)
|
||||
|
@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||
if (job && job->vm_id)
|
||||
amdgpu_vm_reset_id(adev, job->vm_id);
|
||||
ring->vmid = old_vmid;
|
||||
amdgpu_ring_undo(ring);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_POWERPLAY
|
||||
if (adev->pp_enabled) {
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
amd_powerplay_fini(adev->powerplay.pp_handle);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void amdgpu_pp_late_fini(void *handle)
|
||||
{
|
||||
#ifdef CONFIG_DRM_AMD_POWERPLAY
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
amd_powerplay_fini(adev->powerplay.pp_handle);
|
||||
}
|
||||
|
||||
if (adev->powerplay.ip_funcs->late_fini)
|
||||
adev->powerplay.ip_funcs->late_fini(
|
||||
adev->powerplay.pp_handle);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int amdgpu_pp_suspend(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
|
|||
.sw_fini = amdgpu_pp_sw_fini,
|
||||
.hw_init = amdgpu_pp_hw_init,
|
||||
.hw_fini = amdgpu_pp_hw_fini,
|
||||
.late_fini = amdgpu_pp_late_fini,
|
||||
.suspend = amdgpu_pp_suspend,
|
||||
.resume = amdgpu_pp_resume,
|
||||
.is_idle = amdgpu_pp_is_idle,
|
||||
|
|
|
@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
|||
ring->ring = NULL;
|
||||
ring->ring_obj = NULL;
|
||||
|
||||
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->fence_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->rptr_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->wptr_offs);
|
||||
|
|
|
@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
|
||||
memset(sa_manager->cpu_ptr, 0, sa_manager->size);
|
||||
amdgpu_bo_unreserve(sa_manager->bo);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int r;
|
||||
|
||||
if (adev->uvd.vcpu_bo == NULL)
|
||||
return 0;
|
||||
kfree(adev->uvd.saved_bo);
|
||||
|
||||
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
|
||||
|
||||
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
|
||||
if (!r) {
|
||||
amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unpin(adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
|
||||
}
|
||||
if (adev->uvd.vcpu_bo) {
|
||||
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
|
||||
if (!r) {
|
||||
amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unpin(adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
|
||||
}
|
||||
|
||||
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
|
||||
}
|
||||
|
||||
amdgpu_ring_fini(&adev->uvd.ring);
|
||||
|
||||
|
|
|
@ -298,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
|||
unsigned vm_id, uint64_t pd_addr,
|
||||
uint32_t gds_base, uint32_t gds_size,
|
||||
uint32_t gws_base, uint32_t gws_size,
|
||||
uint32_t oa_base, uint32_t oa_size)
|
||||
uint32_t oa_base, uint32_t oa_size,
|
||||
bool vmid_switch)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
|
||||
|
@ -312,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
|||
int r;
|
||||
|
||||
if (ring->funcs->emit_pipeline_sync && (
|
||||
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
|
||||
ring->type == AMDGPU_RING_TYPE_COMPUTE))
|
||||
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
|
||||
amdgpu_ring_emit_pipeline_sync(ring);
|
||||
|
||||
if (ring->funcs->emit_vm_flush &&
|
||||
|
|
|
@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle)
|
|||
ci_dpm_fini(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
release_firmware(adev->pm.fw);
|
||||
adev->pm.fw = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
|
|||
|
||||
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
|
||||
|
||||
|
||||
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
release_firmware(adev->sdma.instance[i].fw);
|
||||
adev->sdma.instance[i].fw = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* sDMA - System DMA
|
||||
* Starting with CIK, the GPU has new asynchronous
|
||||
|
@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
|
|||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
||||
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
|
||||
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
|
||||
|
@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
|
|||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
}
|
||||
|
||||
cik_sdma_enable(adev, true);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
|
@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
/* unhalt the MEs */
|
||||
cik_sdma_enable(adev, true);
|
||||
/* halt the engine before programing */
|
||||
cik_sdma_enable(adev, false);
|
||||
|
||||
/* start the gfx rings and rlc compute queues */
|
||||
r = cik_sdma_gfx_resume(adev);
|
||||
|
@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle)
|
|||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
cik_sdma_free_microcode(adev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
|
|||
|
||||
static int fiji_dpm_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
release_firmware(adev->pm.fw);
|
||||
adev->pm.fw = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -991,6 +991,22 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
release_firmware(adev->gfx.pfp_fw);
|
||||
adev->gfx.pfp_fw = NULL;
|
||||
release_firmware(adev->gfx.me_fw);
|
||||
adev->gfx.me_fw = NULL;
|
||||
release_firmware(adev->gfx.ce_fw);
|
||||
adev->gfx.ce_fw = NULL;
|
||||
release_firmware(adev->gfx.mec_fw);
|
||||
adev->gfx.mec_fw = NULL;
|
||||
release_firmware(adev->gfx.mec2_fw);
|
||||
adev->gfx.mec2_fw = NULL;
|
||||
release_firmware(adev->gfx.rlc_fw);
|
||||
adev->gfx.rlc_fw = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
|
||||
*
|
||||
|
@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle)
|
|||
gfx_v7_0_cp_compute_fini(adev);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
gfx_v7_0_mec_fini(adev);
|
||||
gfx_v7_0_free_microcode(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -836,6 +836,26 @@ err1:
|
|||
return r;
|
||||
}
|
||||
|
||||
|
||||
static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
|
||||
release_firmware(adev->gfx.pfp_fw);
|
||||
adev->gfx.pfp_fw = NULL;
|
||||
release_firmware(adev->gfx.me_fw);
|
||||
adev->gfx.me_fw = NULL;
|
||||
release_firmware(adev->gfx.ce_fw);
|
||||
adev->gfx.ce_fw = NULL;
|
||||
release_firmware(adev->gfx.rlc_fw);
|
||||
adev->gfx.rlc_fw = NULL;
|
||||
release_firmware(adev->gfx.mec_fw);
|
||||
adev->gfx.mec_fw = NULL;
|
||||
if ((adev->asic_type != CHIP_STONEY) &&
|
||||
(adev->asic_type != CHIP_TOPAZ))
|
||||
release_firmware(adev->gfx.mec2_fw);
|
||||
adev->gfx.mec2_fw = NULL;
|
||||
|
||||
kfree(adev->gfx.rlc.register_list_format);
|
||||
}
|
||||
|
||||
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const char *chip_name;
|
||||
|
@ -1983,7 +2003,7 @@ static int gfx_v8_0_sw_fini(void *handle)
|
|||
|
||||
gfx_v8_0_rlc_fini(adev);
|
||||
|
||||
kfree(adev->gfx.rlc.register_list_format);
|
||||
gfx_v8_0_free_microcode(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3974,11 +3994,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
|
|||
amdgpu_ring_write(ring, 0x3a00161a);
|
||||
amdgpu_ring_write(ring, 0x0000002e);
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
case CHIP_CARRIZO:
|
||||
amdgpu_ring_write(ring, 0x00000002);
|
||||
amdgpu_ring_write(ring, 0x00000000);
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
|
||||
0x00000000 : 0x00000002);
|
||||
amdgpu_ring_write(ring, 0x00000000);
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
amdgpu_ring_write(ring, 0x00000000);
|
||||
amdgpu_ring_write(ring, 0x00000000);
|
||||
|
|
|
@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
|
|||
|
||||
static int iceland_dpm_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
release_firmware(adev->pm.fw);
|
||||
adev->pm.fw = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
release_firmware(adev->sdma.instance[i].fw);
|
||||
adev->sdma.instance[i].fw = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v2_4_init_microcode - load ucode images from disk
|
||||
*
|
||||
|
@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
|
|||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
||||
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
|
||||
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
|
||||
|
@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
|
|||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
}
|
||||
|
||||
sdma_v2_4_enable(adev, true);
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
|
@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* unhalt the MEs */
|
||||
sdma_v2_4_enable(adev, true);
|
||||
/* halt the engine before programing */
|
||||
sdma_v2_4_enable(adev, false);
|
||||
|
||||
/* start the gfx rings and rlc compute queues */
|
||||
r = sdma_v2_4_gfx_resume(adev);
|
||||
|
@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle)
|
|||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
sdma_v2_4_free_microcode(adev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
release_firmware(adev->sdma.instance[i].fw);
|
||||
adev->sdma.instance[i].fw = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v3_0_init_microcode - load ucode images from disk
|
||||
*
|
||||
|
@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
||||
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
|
||||
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
|
||||
|
@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
}
|
||||
|
||||
/* unhalt the MEs */
|
||||
sdma_v3_0_enable(adev, true);
|
||||
/* enable sdma ring preemption */
|
||||
sdma_v3_0_ctx_switch_enable(adev, true);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
|
@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
/* unhalt the MEs */
|
||||
sdma_v3_0_enable(adev, true);
|
||||
/* enable sdma ring preemption */
|
||||
sdma_v3_0_ctx_switch_enable(adev, true);
|
||||
/* disble sdma engine before programing it */
|
||||
sdma_v3_0_ctx_switch_enable(adev, false);
|
||||
sdma_v3_0_enable(adev, false);
|
||||
|
||||
/* start the gfx rings and rlc compute queues */
|
||||
r = sdma_v3_0_gfx_resume(adev);
|
||||
|
@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle)
|
|||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
sdma_v3_0_free_microcode(adev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
|
|||
|
||||
static int tonga_dpm_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
release_firmware(adev->pm.fw);
|
||||
adev->pm.fw = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -157,6 +157,7 @@ struct amd_ip_funcs {
|
|||
int (*hw_init)(void *handle);
|
||||
/* tears down the hw state */
|
||||
int (*hw_fini)(void *handle);
|
||||
void (*late_fini)(void *handle);
|
||||
/* handles IP specific hw/sw changes for suspend */
|
||||
int (*suspend)(void *handle);
|
||||
/* handles IP specific hw/sw changes for resume */
|
||||
|
|
|
@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
|
|||
enum cgs_ucode_id type,
|
||||
struct cgs_firmware_info *info);
|
||||
|
||||
typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
|
||||
enum cgs_ucode_id type);
|
||||
|
||||
typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_powergating_state state);
|
||||
|
@ -645,6 +648,7 @@ struct cgs_ops {
|
|||
cgs_set_camera_voltages_t set_camera_voltages;
|
||||
/* Firmware Info */
|
||||
cgs_get_firmware_info get_firmware_info;
|
||||
cgs_rel_firmware rel_firmware;
|
||||
/* cg pg interface*/
|
||||
cgs_set_powergating_state set_powergating_state;
|
||||
cgs_set_clockgating_state set_clockgating_state;
|
||||
|
@ -738,6 +742,8 @@ struct cgs_device
|
|||
CGS_CALL(set_camera_voltages,dev,mask,voltages)
|
||||
#define cgs_get_firmware_info(dev, type, info) \
|
||||
CGS_CALL(get_firmware_info, dev, type, info)
|
||||
#define cgs_rel_firmware(dev, type) \
|
||||
CGS_CALL(rel_firmware, dev, type)
|
||||
#define cgs_set_powergating_state(dev, block_type, state) \
|
||||
CGS_CALL(set_powergating_state, dev, block_type, state)
|
||||
#define cgs_set_clockgating_state(dev, block_type, state) \
|
||||
|
|
|
@ -73,11 +73,14 @@ static int pp_sw_init(void *handle)
|
|||
|
||||
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err1;
|
||||
|
||||
pr_info("amdgpu: powerplay initialized\n");
|
||||
|
||||
return 0;
|
||||
err1:
|
||||
if (hwmgr->pptable_func->pptable_fini)
|
||||
hwmgr->pptable_func->pptable_fini(hwmgr);
|
||||
err:
|
||||
pr_err("amdgpu: powerplay initialization failed\n");
|
||||
return ret;
|
||||
|
@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle)
|
|||
if (hwmgr->hwmgr_func->backend_fini != NULL)
|
||||
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
|
||||
|
||||
if (hwmgr->pptable_func->pptable_fini)
|
||||
hwmgr->pptable_func->pptable_fini(hwmgr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
|
|||
pem_unregister_interrupts(eventmgr);
|
||||
|
||||
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
|
||||
|
||||
if (eventmgr != NULL)
|
||||
kfree(eventmgr);
|
||||
}
|
||||
|
||||
int eventmgr_init(struct pp_instance *handle)
|
||||
|
|
|
@ -1830,7 +1830,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
|
|||
|
||||
PP_ASSERT_WITH_CODE(false,
|
||||
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
|
||||
return vddci_table->entries[i].value);
|
||||
return vddci_table->entries[i-1].value);
|
||||
}
|
||||
|
||||
static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
|
||||
|
|
|
@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
|
|||
if (hwmgr == NULL || hwmgr->ps == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* do hwmgr finish*/
|
||||
kfree(hwmgr->backend);
|
||||
|
||||
kfree(hwmgr->start_thermal_controller.function_list);
|
||||
|
||||
kfree(hwmgr->set_temperature_range.function_list);
|
||||
|
||||
kfree(hwmgr->ps);
|
||||
kfree(hwmgr);
|
||||
return 0;
|
||||
|
@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
|
|||
|
||||
PP_ASSERT_WITH_CODE(false,
|
||||
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
|
||||
return vddci_table->entries[i].value);
|
||||
return vddci_table->entries[i-1].value);
|
||||
}
|
||||
|
||||
int phm_find_boot_level(void *table,
|
||||
|
|
|
@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
|
|||
|
||||
if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
|
||||
(uint8_t *)&data->power_tune_table,
|
||||
sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
|
||||
(sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
|
||||
PP_ASSERT_WITH_CODE(false,
|
||||
"Attempt to download PmFuseTable Failed!",
|
||||
return -EINVAL);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue