Merge 4.9-rc3 into tty-next

We want the serial/tty fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2016-10-30 06:42:10 -04:00
commit 5131dcd781
288 changed files with 2601 additions and 1917 deletions

View File

@ -1864,10 +1864,11 @@ S: The Netherlands
N: Martin Kepplinger N: Martin Kepplinger
E: martink@posteo.de E: martink@posteo.de
E: martin.kepplinger@theobroma-systems.com E: martin.kepplinger@ginzinger.com
W: http://www.martinkepplinger.com W: http://www.martinkepplinger.com
D: mma8452 accelerators iio driver D: mma8452 accelerators iio driver
D: Kernel cleanups D: pegasus_notetaker input driver
D: Kernel fixes and cleanups
S: Garnisonstraße 26 S: Garnisonstraße 26
S: 4020 Linz S: 4020 Linz
S: Austria S: Austria

View File

@ -309,3 +309,4 @@ Version History
with a reshape in progress. with a reshape in progress.
1.9.0 Add support for RAID level takeover/reshape/region size 1.9.0 Add support for RAID level takeover/reshape/region size
and set size reduction. and set size reduction.
1.9.1 Fix activation of existing RAID 4/10 mapped devices

View File

@ -24,7 +24,7 @@ Example:
reg = <0x61840000 0x4000>; reg = <0x61840000 0x4000>;
clock { clock {
compatible = "socionext,uniphier-ld20-clock"; compatible = "socionext,uniphier-ld11-clock";
#clock-cells = <1>; #clock-cells = <1>;
}; };
@ -43,8 +43,8 @@ Provided clocks:
21: USB3 ch1 PHY1 21: USB3 ch1 PHY1
Media I/O (MIO) clock Media I/O (MIO) clock, SD clock
--------------------- -------------------------------
Required properties: Required properties:
- compatible: should be one of the following: - compatible: should be one of the following:
@ -52,10 +52,10 @@ Required properties:
"socionext,uniphier-ld4-mio-clock" - for LD4 SoC. "socionext,uniphier-ld4-mio-clock" - for LD4 SoC.
"socionext,uniphier-pro4-mio-clock" - for Pro4 SoC. "socionext,uniphier-pro4-mio-clock" - for Pro4 SoC.
"socionext,uniphier-sld8-mio-clock" - for sLD8 SoC. "socionext,uniphier-sld8-mio-clock" - for sLD8 SoC.
"socionext,uniphier-pro5-mio-clock" - for Pro5 SoC. "socionext,uniphier-pro5-sd-clock" - for Pro5 SoC.
"socionext,uniphier-pxs2-mio-clock" - for PXs2/LD6b SoC. "socionext,uniphier-pxs2-sd-clock" - for PXs2/LD6b SoC.
"socionext,uniphier-ld11-mio-clock" - for LD11 SoC. "socionext,uniphier-ld11-mio-clock" - for LD11 SoC.
"socionext,uniphier-ld20-mio-clock" - for LD20 SoC. "socionext,uniphier-ld20-sd-clock" - for LD20 SoC.
- #clock-cells: should be 1. - #clock-cells: should be 1.
Example: Example:
@ -66,7 +66,7 @@ Example:
reg = <0x59810000 0x800>; reg = <0x59810000 0x800>;
clock { clock {
compatible = "socionext,uniphier-ld20-mio-clock"; compatible = "socionext,uniphier-ld11-mio-clock";
#clock-cells = <1>; #clock-cells = <1>;
}; };
@ -112,7 +112,7 @@ Example:
reg = <0x59820000 0x200>; reg = <0x59820000 0x200>;
clock { clock {
compatible = "socionext,uniphier-ld20-peri-clock"; compatible = "socionext,uniphier-ld11-peri-clock";
#clock-cells = <1>; #clock-cells = <1>;
}; };

View File

@ -6,25 +6,25 @@ System reset
Required properties: Required properties:
- compatible: should be one of the following: - compatible: should be one of the following:
"socionext,uniphier-sld3-reset" - for PH1-sLD3 SoC. "socionext,uniphier-sld3-reset" - for sLD3 SoC.
"socionext,uniphier-ld4-reset" - for PH1-LD4 SoC. "socionext,uniphier-ld4-reset" - for LD4 SoC.
"socionext,uniphier-pro4-reset" - for PH1-Pro4 SoC. "socionext,uniphier-pro4-reset" - for Pro4 SoC.
"socionext,uniphier-sld8-reset" - for PH1-sLD8 SoC. "socionext,uniphier-sld8-reset" - for sLD8 SoC.
"socionext,uniphier-pro5-reset" - for PH1-Pro5 SoC. "socionext,uniphier-pro5-reset" - for Pro5 SoC.
"socionext,uniphier-pxs2-reset" - for ProXstream2/PH1-LD6b SoC. "socionext,uniphier-pxs2-reset" - for PXs2/LD6b SoC.
"socionext,uniphier-ld11-reset" - for PH1-LD11 SoC. "socionext,uniphier-ld11-reset" - for LD11 SoC.
"socionext,uniphier-ld20-reset" - for PH1-LD20 SoC. "socionext,uniphier-ld20-reset" - for LD20 SoC.
- #reset-cells: should be 1. - #reset-cells: should be 1.
Example: Example:
sysctrl@61840000 { sysctrl@61840000 {
compatible = "socionext,uniphier-ld20-sysctrl", compatible = "socionext,uniphier-ld11-sysctrl",
"simple-mfd", "syscon"; "simple-mfd", "syscon";
reg = <0x61840000 0x4000>; reg = <0x61840000 0x4000>;
reset { reset {
compatible = "socionext,uniphier-ld20-reset"; compatible = "socionext,uniphier-ld11-reset";
#reset-cells = <1>; #reset-cells = <1>;
}; };
@ -32,30 +32,30 @@ Example:
}; };
Media I/O (MIO) reset Media I/O (MIO) reset, SD reset
--------------------- -------------------------------
Required properties: Required properties:
- compatible: should be one of the following: - compatible: should be one of the following:
"socionext,uniphier-sld3-mio-reset" - for PH1-sLD3 SoC. "socionext,uniphier-sld3-mio-reset" - for sLD3 SoC.
"socionext,uniphier-ld4-mio-reset" - for PH1-LD4 SoC. "socionext,uniphier-ld4-mio-reset" - for LD4 SoC.
"socionext,uniphier-pro4-mio-reset" - for PH1-Pro4 SoC. "socionext,uniphier-pro4-mio-reset" - for Pro4 SoC.
"socionext,uniphier-sld8-mio-reset" - for PH1-sLD8 SoC. "socionext,uniphier-sld8-mio-reset" - for sLD8 SoC.
"socionext,uniphier-pro5-mio-reset" - for PH1-Pro5 SoC. "socionext,uniphier-pro5-sd-reset" - for Pro5 SoC.
"socionext,uniphier-pxs2-mio-reset" - for ProXstream2/PH1-LD6b SoC. "socionext,uniphier-pxs2-sd-reset" - for PXs2/LD6b SoC.
"socionext,uniphier-ld11-mio-reset" - for PH1-LD11 SoC. "socionext,uniphier-ld11-mio-reset" - for LD11 SoC.
"socionext,uniphier-ld20-mio-reset" - for PH1-LD20 SoC. "socionext,uniphier-ld20-sd-reset" - for LD20 SoC.
- #reset-cells: should be 1. - #reset-cells: should be 1.
Example: Example:
mioctrl@59810000 { mioctrl@59810000 {
compatible = "socionext,uniphier-ld20-mioctrl", compatible = "socionext,uniphier-ld11-mioctrl",
"simple-mfd", "syscon"; "simple-mfd", "syscon";
reg = <0x59810000 0x800>; reg = <0x59810000 0x800>;
reset { reset {
compatible = "socionext,uniphier-ld20-mio-reset"; compatible = "socionext,uniphier-ld11-mio-reset";
#reset-cells = <1>; #reset-cells = <1>;
}; };
@ -68,24 +68,24 @@ Peripheral reset
Required properties: Required properties:
- compatible: should be one of the following: - compatible: should be one of the following:
"socionext,uniphier-ld4-peri-reset" - for PH1-LD4 SoC. "socionext,uniphier-ld4-peri-reset" - for LD4 SoC.
"socionext,uniphier-pro4-peri-reset" - for PH1-Pro4 SoC. "socionext,uniphier-pro4-peri-reset" - for Pro4 SoC.
"socionext,uniphier-sld8-peri-reset" - for PH1-sLD8 SoC. "socionext,uniphier-sld8-peri-reset" - for sLD8 SoC.
"socionext,uniphier-pro5-peri-reset" - for PH1-Pro5 SoC. "socionext,uniphier-pro5-peri-reset" - for Pro5 SoC.
"socionext,uniphier-pxs2-peri-reset" - for ProXstream2/PH1-LD6b SoC. "socionext,uniphier-pxs2-peri-reset" - for PXs2/LD6b SoC.
"socionext,uniphier-ld11-peri-reset" - for PH1-LD11 SoC. "socionext,uniphier-ld11-peri-reset" - for LD11 SoC.
"socionext,uniphier-ld20-peri-reset" - for PH1-LD20 SoC. "socionext,uniphier-ld20-peri-reset" - for LD20 SoC.
- #reset-cells: should be 1. - #reset-cells: should be 1.
Example: Example:
perictrl@59820000 { perictrl@59820000 {
compatible = "socionext,uniphier-ld20-perictrl", compatible = "socionext,uniphier-ld11-perictrl",
"simple-mfd", "syscon"; "simple-mfd", "syscon";
reg = <0x59820000 0x200>; reg = <0x59820000 0x200>;
reset { reset {
compatible = "socionext,uniphier-ld20-peri-reset"; compatible = "socionext,uniphier-ld11-peri-reset";
#reset-cells = <1>; #reset-cells = <1>;
}; };

View File

@ -1,7 +1,9 @@
Binding for Cadence UART Controller Binding for Cadence UART Controller
Required properties: Required properties:
- compatible : should be "cdns,uart-r1p8", or "xlnx,xuartps" - compatible :
Use "xlnx,xuartps","cdns,uart-r1p8" for Zynq-7xxx SoC.
Use "xlnx,zynqmp-uart","cdns,uart-r1p12" for Zynq Ultrascale+ MPSoC.
- reg: Should contain UART controller registers location and length. - reg: Should contain UART controller registers location and length.
- interrupts: Should contain UART controller interrupts. - interrupts: Should contain UART controller interrupts.
- clocks: Must contain phandles to the UART clocks - clocks: Must contain phandles to the UART clocks

View File

@ -9,6 +9,14 @@ Required properties:
- "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART. - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
- "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART. - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
- "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART. - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
- "renesas,scif-r8a7743" for R8A7743 (RZ/G1M) SCIF compatible UART.
- "renesas,scifa-r8a7743" for R8A7743 (RZ/G1M) SCIFA compatible UART.
- "renesas,scifb-r8a7743" for R8A7743 (RZ/G1M) SCIFB compatible UART.
- "renesas,hscif-r8a7743" for R8A7743 (RZ/G1M) HSCIF compatible UART.
- "renesas,scif-r8a7745" for R8A7745 (RZ/G1E) SCIF compatible UART.
- "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
- "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
- "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
- "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART. - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
- "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART. - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
- "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART. - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.

View File

@ -28,10 +28,7 @@ Refer to phy/phy-bindings.txt for generic phy consumer properties
- g-use-dma: enable dma usage in gadget driver. - g-use-dma: enable dma usage in gadget driver.
- g-rx-fifo-size: size of rx fifo size in gadget mode. - g-rx-fifo-size: size of rx fifo size in gadget mode.
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode. - g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
Deprecated properties:
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0)
in gadget mode.
Example: Example:

View File

@ -6,7 +6,7 @@ Note that it only applies to the new descriptor-based interface. For a
description of the deprecated integer-based GPIO interface please refer to description of the deprecated integer-based GPIO interface please refer to
gpio-legacy.txt (actually, there is no real mapping possible with the old gpio-legacy.txt (actually, there is no real mapping possible with the old
interface; you just fetch an integer from somewhere and request the interface; you just fetch an integer from somewhere and request the
corresponding GPIO. corresponding GPIO).
All platforms can enable the GPIO library, but if the platform strictly All platforms can enable the GPIO library, but if the platform strictly
requires GPIO functionality to be present, it needs to select GPIOLIB from its requires GPIO functionality to be present, it needs to select GPIOLIB from its
@ -162,6 +162,9 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
Since the "led" GPIOs are mapped as active-high, this example will switch their Since the "led" GPIOs are mapped as active-high, this example will switch their
signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
as active-low, its actual signal will be 0 after this code. Contrary to the legacy as active-low, its actual signal will be 0 after this code. Contrary to the
integer GPIO interface, the active-low property is handled during mapping and is legacy integer GPIO interface, the active-low property is handled during
thus transparent to GPIO consumers. mapping and is thus transparent to GPIO consumers.
A set of functions such as gpiod_set_value() is available to work with
the new descriptor-oriented interface.

View File

@ -1442,6 +1442,7 @@ F: drivers/cpufreq/mvebu-cpufreq.c
F: arch/arm/configs/mvebu_*_defconfig F: arch/arm/configs/mvebu_*_defconfig
ARM/Marvell Berlin SoC support ARM/Marvell Berlin SoC support
M: Jisheng Zhang <jszhang@marvell.com>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
@ -5287,6 +5288,12 @@ M: Joe Perches <joe@perches.com>
S: Maintained S: Maintained
F: scripts/get_maintainer.pl F: scripts/get_maintainer.pl
GENWQE (IBM Generic Workqueue Card)
M: Frank Haverkamp <haver@linux.vnet.ibm.com>
M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
S: Supported
F: drivers/misc/genwqe/
GFS2 FILE SYSTEM GFS2 FILE SYSTEM
M: Steven Whitehouse <swhiteho@redhat.com> M: Steven Whitehouse <swhiteho@redhat.com>
M: Bob Peterson <rpeterso@redhat.com> M: Bob Peterson <rpeterso@redhat.com>
@ -8100,6 +8107,7 @@ S: Maintained
F: drivers/media/dvb-frontends/mn88473* F: drivers/media/dvb-frontends/mn88473*
MODULE SUPPORT MODULE SUPPORT
M: Jessica Yu <jeyu@redhat.com>
M: Rusty Russell <rusty@rustcorp.com.au> M: Rusty Russell <rusty@rustcorp.com.au>
S: Maintained S: Maintained
F: include/linux/module.h F: include/linux/module.h

View File

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 9 PATCHLEVEL = 9
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc2 EXTRAVERSION = -rc3
NAME = Psychotic Stoned Sheep NAME = Psychotic Stoned Sheep
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -41,6 +41,8 @@ config ARC
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_GENERIC_DMA_COHERENT select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
config MIGHT_HAVE_PCI config MIGHT_HAVE_PCI
bool bool
@ -186,14 +188,6 @@ if SMP
config ARC_HAS_COH_CACHES config ARC_HAS_COH_CACHES
def_bool n def_bool n
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
help
This IP block enables SMP in ARC-HS38 cores.
It provides for cross-core interrupts, multi-core debug
hardware semaphores, shared memory,....
config NR_CPUS config NR_CPUS
int "Maximum number of CPUs (2-4096)" int "Maximum number of CPUs (2-4096)"
range 2 4096 range 2 4096
@ -211,6 +205,15 @@ config ARC_SMP_HALT_ON_RESET
endif #SMP endif #SMP
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
default y if SMP
help
This IP block enables SMP in ARC-HS38 cores.
It provides for cross-core interrupts, multi-core debug
hardware semaphores, shared memory,....
menuconfig ARC_CACHE menuconfig ARC_CACHE
bool "Enable Cache Support" bool "Enable Cache Support"
default y default y
@ -537,14 +540,6 @@ config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers" bool "Paranoia Checks in Low Level TLB Handlers"
default n default n
config ARC_DBG_TLB_MISS_COUNT
bool "Profile TLB Misses"
default n
select DEBUG_FS
help
Counts number of I and D TLB Misses and exports them via Debugfs
The counters can be cleared via Debugfs as well
endif endif
config ARC_UBOOT_SUPPORT config ARC_UBOOT_SUPPORT

View File

@ -50,9 +50,6 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
cflags-$(atleast_gcc44) += -fsection-anchors cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
ifdef CONFIG_ISA_ARCV2 ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64 ifndef CONFIG_ARC_HAS_LL64

View File

@ -14,9 +14,15 @@ UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
suffix-y := bin suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_LZMA) := lzma
targets += uImage uImage.bin uImage.gz targets += uImage
extra-y += vmlinux.bin vmlinux.bin.gz targets += uImage.bin
targets += uImage.gz
targets += uImage.lzma
extra-y += vmlinux.bin
extra-y += vmlinux.bin.gz
extra-y += vmlinux.bin.lzma
$(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
@ -24,12 +30,18 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip) $(call if_changed,gzip)
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzma)
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none) $(call if_changed,uimage,none)
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip) $(call if_changed,uimage,gzip)
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma)
$(obj)/uImage: $(obj)/uImage.$(suffix-y) $(obj)/uImage: $(obj)/uImage.$(suffix-y)
@ln -sf $(notdir $<) $@ @ln -sf $(notdir $<) $@
@echo ' Image $@ is ready' @echo ' Image $@ is ready'

View File

@ -349,10 +349,11 @@ struct cpuinfo_arc {
struct cpuinfo_arc_bpu bpu; struct cpuinfo_arc_bpu bpu;
struct bcr_identity core; struct bcr_identity core;
struct bcr_isa isa; struct bcr_isa isa;
const char *details, *name;
unsigned int vec_base; unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm; struct cpuinfo_arc_ccm iccm, dccm;
struct { struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3, unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, pad2:6, fpu_sp:1, fpu_dp:1, pad2:6,
debug:1, ap:1, smart:1, rtt:1, pad3:4, debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;

View File

@ -53,7 +53,7 @@ extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void); extern void read_decode_cache_bcr(void);
extern int ioc_exists; extern int ioc_enable;
extern unsigned long perip_base, perip_end; extern unsigned long perip_base, perip_end;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@ -54,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
* the loader. We need to make sure that it is out of the way of the program * the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk. * that it will "exec", and that there is sufficient room for the brk.
*/ */
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) #define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
/* /*
* When the program starts, a1 contains a pointer to a function to be * When the program starts, a1 contains a pointer to a function to be

View File

@ -55,6 +55,22 @@ struct mcip_cmd {
#define IDU_M_DISTRI_DEST 0x2 #define IDU_M_DISTRI_DEST 0x2
}; };
struct mcip_bcr {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad3:8,
idu:1, llm:1, num_cores:6,
iocoh:1, gfrc:1, dbg:1, pad2:1,
msg:1, sem:1, ipi:1, pad:1,
ver:8;
#else
unsigned int ver:8,
pad:1, ipi:1, sem:1, msg:1,
pad2:1, dbg:1, gfrc:1, iocoh:1,
num_cores:6, llm:1, idu:1,
pad3:8;
#endif
};
/* /*
* MCIP programming model * MCIP programming model
* *

View File

@ -18,6 +18,7 @@
struct mod_arch_specific { struct mod_arch_specific {
void *unw_info; void *unw_info;
int unw_sec_idx; int unw_sec_idx;
const char *secstr;
}; };
#endif #endif

View File

@ -27,11 +27,6 @@ struct id_to_str {
const char *str; const char *str;
}; };
struct cpuinfo_data {
struct id_to_str info;
int up_range;
};
extern int root_mountflags, end_mem; extern int root_mountflags, end_mem;
void setup_processor(void); void setup_processor(void);
@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
#define IS_USED_RUN(v) ((v) ? "" : "(not used) ") #define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) #define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) #define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
#endif /* __ASMARC_SETUP_H */ #endif /* __ASMARC_SETUP_H */

View File

@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
int sys_cacheflush(uint32_t, uint32_t uint32_t); int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *); int sys_arc_settls(void *);
int sys_arc_gettls(void); int sys_arc_gettls(void);
int sys_arc_usr_cmpxchg(int *, int, int);
#include <asm-generic/syscalls.h> #include <asm-generic/syscalls.h>

View File

@ -27,18 +27,19 @@
#define NR_syscalls __NR_syscalls #define NR_syscalls __NR_syscalls
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
/* ARC specific syscall */ /* ARC specific syscall */
#define __NR_cacheflush (__NR_arch_specific_syscall + 0) #define __NR_cacheflush (__NR_arch_specific_syscall + 0)
#define __NR_arc_settls (__NR_arch_specific_syscall + 1) #define __NR_arc_settls (__NR_arch_specific_syscall + 1)
#define __NR_arc_gettls (__NR_arch_specific_syscall + 2) #define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
__SYSCALL(__NR_cacheflush, sys_cacheflush) __SYSCALL(__NR_cacheflush, sys_cacheflush)
__SYSCALL(__NR_arc_settls, sys_arc_settls) __SYSCALL(__NR_arc_settls, sys_arc_settls)
__SYSCALL(__NR_arc_gettls, sys_arc_gettls) __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
__SYSCALL(__NR_sysfs, sys_sysfs) __SYSCALL(__NR_sysfs, sys_sysfs)
#undef __SYSCALL #undef __SYSCALL

View File

@ -15,11 +15,12 @@
#include <asm/mcip.h> #include <asm/mcip.h>
#include <asm/setup.h> #include <asm/setup.h>
static char smp_cpuinfo_buf[128];
static int idu_detected;
static DEFINE_RAW_SPINLOCK(mcip_lock); static DEFINE_RAW_SPINLOCK(mcip_lock);
#ifdef CONFIG_SMP
static char smp_cpuinfo_buf[128];
static void mcip_setup_per_cpu(int cpu) static void mcip_setup_per_cpu(int cpu)
{ {
smp_ipi_irq_setup(cpu, IPI_IRQ); smp_ipi_irq_setup(cpu, IPI_IRQ);
@ -86,21 +87,7 @@ static void mcip_ipi_clear(int irq)
static void mcip_probe_n_setup(void) static void mcip_probe_n_setup(void)
{ {
struct mcip_bcr { struct mcip_bcr mp;
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad3:8,
idu:1, llm:1, num_cores:6,
iocoh:1, gfrc:1, dbg:1, pad2:1,
msg:1, sem:1, ipi:1, pad:1,
ver:8;
#else
unsigned int ver:8,
pad:1, ipi:1, sem:1, msg:1,
pad2:1, dbg:1, gfrc:1, iocoh:1,
num_cores:6, llm:1, idu:1,
pad3:8;
#endif
} mp;
READ_BCR(ARC_REG_MCIP_BCR, mp); READ_BCR(ARC_REG_MCIP_BCR, mp);
@ -114,7 +101,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.gfrc, "GFRC")); IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc; cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
idu_detected = mp.idu;
if (mp.dbg) { if (mp.dbg) {
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
@ -130,6 +116,8 @@ struct plat_smp_ops plat_smp_ops = {
.ipi_clear = mcip_ipi_clear, .ipi_clear = mcip_ipi_clear,
}; };
#endif
/*************************************************************************** /***************************************************************************
* ARCv2 Interrupt Distribution Unit (IDU) * ARCv2 Interrupt Distribution Unit (IDU)
* *
@ -295,8 +283,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
/* Read IDU BCR to confirm nr_irqs */ /* Read IDU BCR to confirm nr_irqs */
int nr_irqs = of_irq_count(intc); int nr_irqs = of_irq_count(intc);
int i, irq; int i, irq;
struct mcip_bcr mp;
if (!idu_detected) READ_BCR(ARC_REG_MCIP_BCR, mp);
if (!mp.idu)
panic("IDU not detected, but DeviceTree using it"); panic("IDU not detected, but DeviceTree using it");
pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);

View File

@ -30,17 +30,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstr, struct module *mod) char *secstr, struct module *mod)
{ {
#ifdef CONFIG_ARC_DW2_UNWIND #ifdef CONFIG_ARC_DW2_UNWIND
int i;
mod->arch.unw_sec_idx = 0; mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL; mod->arch.unw_info = NULL;
mod->arch.secstr = secstr;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {
mod->arch.unw_sec_idx = i;
break;
}
}
#endif #endif
return 0; return 0;
} }
@ -59,29 +51,33 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned int relsec, /* sec index for relo sec */ unsigned int relsec, /* sec index for relo sec */
struct module *module) struct module *module)
{ {
int i, n; int i, n, relo_type;
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr; Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym_entry, *sym_sec; Elf32_Sym *sym_entry, *sym_sec;
Elf32_Addr relocation; Elf32_Addr relocation, location, tgt_addr;
Elf32_Addr location; unsigned int tgtsec;
Elf32_Addr sec_to_patch;
int relo_type;
sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr; /*
* @relsec has relocations e.g. .rela.init.text
* @tgtsec is section to patch e.g. .init.text
*/
tgtsec = sechdrs[relsec].sh_info;
tgt_addr = sechdrs[tgtsec].sh_addr;
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr; sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(*rel_entry); n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
pr_debug("\n========== Module Sym reloc ===========================\n"); pr_debug("\nSection to fixup %s @%x\n",
pr_debug("Section to fixup %x\n", sec_to_patch); module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
pr_debug("=========================================================\n"); pr_debug("=========================================================\n");
pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n"); pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
pr_debug("=========================================================\n"); pr_debug("=========================================================\n");
/* Loop thru entries in relocation section */ /* Loop thru entries in relocation section */
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
const char *s;
/* This is where to make the change */ /* This is where to make the change */
location = sec_to_patch + rel_entry[i].r_offset; location = tgt_addr + rel_entry[i].r_offset;
/* This is the symbol it is referring to. Note that all /* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */ undefined symbols have been resolved. */
@ -89,10 +85,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
relocation = sym_entry->st_value + rel_entry[i].r_addend; relocation = sym_entry->st_value + rel_entry[i].r_addend;
pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n", if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
rel_entry[i].r_offset, rel_entry[i].r_addend, s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
sym_entry->st_value, location, relocation, } else {
strtab + sym_entry->st_name); s = strtab + sym_entry->st_name;
}
pr_debug(" %x\t%x\t%x %x %x [%s]\n",
rel_entry[i].r_offset, rel_entry[i].r_addend,
sym_entry->st_value, location, relocation, s);
/* This assumes modules are built with -mlong-calls /* This assumes modules are built with -mlong-calls
* so any branches/jumps are absolute 32 bit jmps * so any branches/jumps are absolute 32 bit jmps
@ -111,6 +112,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
goto relo_err; goto relo_err;
} }
if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
module->arch.unw_sec_idx = tgtsec;
return 0; return 0;
relo_err: relo_err:

View File

@ -41,6 +41,39 @@ SYSCALL_DEFINE0(arc_gettls)
return task_thread_info(current)->thr_ptr; return task_thread_info(current)->thr_ptr;
} }
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{
int uval;
int ret;
/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
* can't possibly be SMP. Thus doesn't need to be SMP safe.
* And this also helps reduce the overhead for serializing in
* the UP case
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
preempt_disable();
ret = __get_user(uval, uaddr);
if (ret)
goto done;
if (uval != expected)
ret = -EAGAIN;
else
ret = __put_user(new, uaddr);
done:
preempt_enable();
return ret;
}
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
/* sleep, but enable all interrupts before committing */ /* sleep, but enable all interrupts before committing */

View File

@ -40,6 +40,29 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
static const struct id_to_str arc_cpu_rel[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ 0x34, "R4.10"},
{ 0x35, "R4.11"},
#else
{ 0x51, "R2.0" },
{ 0x52, "R2.1" },
{ 0x53, "R3.0" },
#endif
{ 0x00, NULL }
};
static const struct id_to_str arc_cpu_nm[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ 0x20, "ARC 600" },
{ 0x30, "ARC 770" }, /* 750 identified seperately */
#else
{ 0x40, "ARC EM" },
{ 0x50, "ARC HS38" },
#endif
{ 0x00, "Unknown" }
};
static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu) static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
{ {
if (is_isa_arcompact()) { if (is_isa_arcompact()) {
@ -92,11 +115,26 @@ static void read_arc_build_cfg_regs(void)
struct bcr_timer timer; struct bcr_timer timer;
struct bcr_generic bcr; struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
const struct id_to_str *tbl;
FIX_PTR(cpu); FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core); READ_BCR(AUX_IDENTITY, cpu->core);
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
if (cpu->core.family == tbl->id) {
cpu->details = tbl->str;
break;
}
}
for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
if ((cpu->core.family & 0xF0) == tbl->id)
break;
}
cpu->name = tbl->str;
READ_BCR(ARC_REG_TIMERS_BCR, timer); READ_BCR(ARC_REG_TIMERS_BCR, timer);
cpu->extn.timer0 = timer.t0; cpu->extn.timer0 = timer.t0;
cpu->extn.timer1 = timer.t1; cpu->extn.timer1 = timer.t1;
@ -111,6 +149,9 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */ /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
@ -160,64 +201,38 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.rtt = bcr.ver ? 1 : 0; cpu->extn.rtt = bcr.ver ? 1 : 0;
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
/* some hacks for lack of feature BCR info in old ARC700 cores */
if (is_isa_arcompact()) {
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
else
cpu->isa.atomic = cpu->isa.atomic1;
cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
/* there's no direct way to distinguish 750 vs. 770 */
if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
cpu->name = "ARC750";
}
} }
static const struct cpuinfo_data arc_cpu_tbl[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ {0x20, "ARC 600" }, 0x2F},
{ {0x30, "ARC 700" }, 0x33},
{ {0x34, "ARC 700 R4.10"}, 0x34},
{ {0x35, "ARC 700 R4.11"}, 0x35},
#else
{ {0x50, "ARC HS38 R2.0"}, 0x51},
{ {0x52, "ARC HS38 R2.1"}, 0x52},
{ {0x53, "ARC HS38 R3.0"}, 0x53},
#endif
{ {0x00, NULL } }
};
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{ {
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core; struct bcr_identity *core = &cpu->core;
const struct cpuinfo_data *tbl; int i, n = 0;
char *isa_nm;
int i, be, atomic;
int n = 0;
FIX_PTR(cpu); FIX_PTR(cpu);
if (is_isa_arcompact()) {
isa_nm = "ARCompact";
be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
atomic = cpu->isa.atomic1;
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
} else {
isa_nm = "ARCv2";
be = cpu->isa.be;
atomic = cpu->isa.atomic;
}
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
core->family, core->cpu_id, core->chip_id); core->family, core->cpu_id, core->chip_id);
for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
if ((core->family >= tbl->info.id) && cpu_id, cpu->name, cpu->details,
(core->family <= tbl->up_range)) { is_isa_arcompact() ? "ARCompact" : "ARCv2",
n += scnprintf(buf + n, len - n, IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
"processor [%d]\t: %s (%s ISA) %s\n",
cpu_id, tbl->info.str, isa_nm,
IS_AVAIL1(be, "[Big-Endian]"));
break;
}
}
if (tbl->info.id == 0)
n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "), IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@ -226,7 +241,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
CONFIG_ARC_HAS_RTC)); CONFIG_ARC_HAS_RTC));
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC), IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL1(cpu->isa.unalign, "unalign (not used)")); IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
@ -253,7 +268,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL1(cpu->extn.swap, "swap "), IS_AVAIL1(cpu->extn.swap, "swap "),
IS_AVAIL1(cpu->extn.minmax, "minmax "), IS_AVAIL1(cpu->extn.minmax, "minmax "),
IS_AVAIL1(cpu->extn.crc, "crc "), IS_AVAIL1(cpu->extn.crc, "crc "),
IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE)); IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
if (cpu->bpu.ver) if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n,
@ -272,9 +287,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
FIX_PTR(cpu); FIX_PTR(cpu);
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
"Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",
cpu->vec_base, perip_base, perip_end);
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp) if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n", n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
@ -507,7 +520,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
* way to pass it w/o having to kmalloc/free a 2 byte string. * way to pass it w/o having to kmalloc/free a 2 byte string.
* Encode cpu-id as 0xFFcccc, which is decoded by show routine. * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
*/ */
return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL; return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
} }
static void *c_next(struct seq_file *m, void *v, loff_t *pos) static void *c_next(struct seq_file *m, void *v, loff_t *pos)

View File

@ -237,113 +237,3 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
if (!user_mode(regs)) if (!user_mode(regs))
show_stacktrace(current, regs); show_stacktrace(current, regs);
} }
#ifdef CONFIG_DEBUG_FS
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/namei.h>
#include <linux/debugfs.h>
static struct dentry *test_dentry;
static struct dentry *test_dir;
static struct dentry *test_u32_dentry;
static u32 clr_on_read = 1;
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
u32 numitlb, numdtlb, num_pte_not_present;
static int fill_display_data(char *kbuf)
{
size_t num = 0;
num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
if (clr_on_read)
numitlb = numdtlb = num_pte_not_present = 0;
return num;
}
static int tlb_stats_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)__get_free_page(GFP_KERNEL);
return 0;
}
/* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */
loff_t *offset) /* offset in the file */
{
size_t num;
char *kbuf = (char *)file->private_data;
/* All of the data can he shoved in one iteration */
if (*offset != 0)
return 0;
num = fill_display_data(kbuf);
/* simple_read_from_buffer() is helper for copy to user space
It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
@3 (offset) into the user space address starting at @1 (user_buf).
@5 (len) is max size of user buffer
*/
return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
}
/* called on user write : clears the counters */
static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
size_t length, loff_t *offset)
{
numitlb = numdtlb = num_pte_not_present = 0;
return length;
}
static int tlb_stats_close(struct inode *inode, struct file *file)
{
free_page((unsigned long)(file->private_data));
return 0;
}
static const struct file_operations tlb_stats_file_ops = {
.read = tlb_stats_output,
.write = tlb_stats_clear,
.open = tlb_stats_open,
.release = tlb_stats_close
};
#endif
static int __init arc_debugfs_init(void)
{
test_dir = debugfs_create_dir("arc", NULL);
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
&tlb_stats_file_ops);
#endif
test_u32_dentry =
debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
return 0;
}
module_init(arc_debugfs_init);
static void __exit arc_debugfs_exit(void)
{
debugfs_remove(test_u32_dentry);
debugfs_remove(test_dentry);
debugfs_remove(test_dir);
}
module_exit(arc_debugfs_exit);
#endif

View File

@ -22,8 +22,8 @@
#include <asm/setup.h> #include <asm/setup.h>
static int l2_line_sz; static int l2_line_sz;
int ioc_exists; static int ioc_exists;
volatile int slc_enable = 1, ioc_enable = 1; int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
@ -53,18 +53,15 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
if (!is_isa_arcv2())
return buf;
p = &cpuinfo_arc700[c].slc; p = &cpuinfo_arc700[c].slc;
if (p->ver) if (p->ver)
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n", "SLC\t\t: %uK, %uB Line%s\n",
p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
if (ioc_exists) n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n", perip_base,
IS_DISABLED_RUN(ioc_enable)); IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
return buf; return buf;
} }
@ -113,8 +110,10 @@ static void read_decode_cache_bcr_arcv2(int cpu)
} }
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
if (cbcr.c && ioc_enable) if (cbcr.c)
ioc_exists = 1; ioc_exists = 1;
else
ioc_enable = 0;
/* HS 2.0 didn't have AUX_VOL */ /* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) { if (cpuinfo_arc700[cpu].core.family > 0x51) {
@ -1002,7 +1001,7 @@ void arc_cache_init(void)
read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE); read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
} }
if (is_isa_arcv2() && ioc_exists) { if (is_isa_arcv2() && ioc_enable) {
/* IO coherency base - 0x8z */ /* IO coherency base - 0x8z */
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/* IO coherency aperture size - 512Mb: 0x8z-0xAz */ /* IO coherency aperture size - 512Mb: 0x8z-0xAz */

View File

@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache * -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster) * (vs. always going to memory - thus are faster)
*/ */
if ((is_isa_arcv2() && ioc_exists) || if ((is_isa_arcv2() && ioc_enable) ||
(attrs & DMA_ATTR_NON_CONSISTENT)) (attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0; need_coh = 0;
@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
int is_non_coh = 1; int is_non_coh = 1;
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
(is_isa_arcv2() && ioc_exists); (is_isa_arcv2() && ioc_enable);
if (PageHighMem(page) || !is_non_coh) if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr); iounmap((void __force __iomem *)vaddr);

View File

@ -793,16 +793,16 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
char super_pg[64] = ""; char super_pg[64] = "";
if (p_mmu->s_pg_sz_m) if (p_mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "%dM Super Page%s, ", scnprintf(super_pg, 64, "%dM Super Page %s",
p_mmu->s_pg_sz_m, p_mmu->s_pg_sz_m,
IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE)); IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n,
"MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n", "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg, p_mmu->ver, p_mmu->pg_sz_k, super_pg,
p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways, p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb, p_mmu->u_dtlb, p_mmu->u_itlb,
IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40)); IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf; return buf;
} }

View File

@ -237,15 +237,6 @@ ex_saved_reg1:
2: 2:
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
and.f 0, r0, _PAGE_PRESENT
bz 1f
ld r3, [num_pte_not_present]
add r3, r3, 1
st r3, [num_pte_not_present]
1:
#endif
.endm .endm
;----------------------------------------------------------------- ;-----------------------------------------------------------------
@ -309,12 +300,6 @@ ENTRY(EV_TLBMissI)
TLBMISS_FREEUP_REGS TLBMISS_FREEUP_REGS
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
ld r0, [@numitlb]
add r0, r0, 1
st r0, [@numitlb]
#endif
;---------------------------------------------------------------- ;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
LOAD_FAULT_PTE LOAD_FAULT_PTE
@ -349,12 +334,6 @@ ENTRY(EV_TLBMissD)
TLBMISS_FREEUP_REGS TLBMISS_FREEUP_REGS
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
ld r0, [@numdtlb]
add r0, r0, 1
st r0, [@numdtlb]
#endif
;---------------------------------------------------------------- ;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed ; Get the PTE corresponding to V-addr accessed
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA

View File

@ -239,14 +239,25 @@
arm,primecell-periphid = <0x10480180>; arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>; max-frequency = <100000000>;
bus-width = <4>; bus-width = <4>;
cap-sd-highspeed;
cap-mmc-highspeed; cap-mmc-highspeed;
sd-uhs-sdr12;
sd-uhs-sdr25;
/* All direction control is used */
st,sig-dir-cmd;
st,sig-dir-dat0;
st,sig-dir-dat2;
st,sig-dir-dat31;
st,sig-pin-fbclk;
full-pwr-cycle;
vmmc-supply = <&ab8500_ldo_aux3_reg>; vmmc-supply = <&ab8500_ldo_aux3_reg>;
vqmmc-supply = <&vmmci>; vqmmc-supply = <&vmmci>;
pinctrl-names = "default", "sleep"; pinctrl-names = "default", "sleep";
pinctrl-0 = <&sdi0_default_mode>; pinctrl-0 = <&sdi0_default_mode>;
pinctrl-1 = <&sdi0_sleep_mode>; pinctrl-1 = <&sdi0_sleep_mode>;
cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218 /* GPIO218 MMC_CD */
cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>;
status = "okay"; status = "okay";
}; };
@ -549,7 +560,7 @@
/* VMMCI level-shifter enable */ /* VMMCI level-shifter enable */
snowball_cfg3 { snowball_cfg3 {
pins = "GPIO217_AH12"; pins = "GPIO217_AH12";
ste,config = <&gpio_out_lo>; ste,config = <&gpio_out_hi>;
}; };
/* VMMCI level-shifter voltage select */ /* VMMCI level-shifter voltage select */
snowball_cfg4 { snowball_cfg4 {

View File

@ -184,11 +184,11 @@
}; };
&mio_clk { &mio_clk {
compatible = "socionext,uniphier-pro5-mio-clock"; compatible = "socionext,uniphier-pro5-sd-clock";
}; };
&mio_rst { &mio_rst {
compatible = "socionext,uniphier-pro5-mio-reset"; compatible = "socionext,uniphier-pro5-sd-reset";
}; };
&peri_clk { &peri_clk {

View File

@ -197,11 +197,11 @@
}; };
&mio_clk { &mio_clk {
compatible = "socionext,uniphier-pxs2-mio-clock"; compatible = "socionext,uniphier-pxs2-sd-clock";
}; };
&mio_rst { &mio_rst {
compatible = "socionext,uniphier-pxs2-mio-reset"; compatible = "socionext,uniphier-pxs2-sd-reset";
}; };
&peri_clk { &peri_clk {

View File

@ -70,7 +70,7 @@
global_timer: timer@40002200 { global_timer: timer@40002200 {
compatible = "arm,cortex-a9-global-timer"; compatible = "arm,cortex-a9-global-timer";
reg = <0x40002200 0x20>; reg = <0x40002200 0x20>;
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
interrupt-parent = <&intc>; interrupt-parent = <&intc>;
clocks = <&clks VF610_CLK_PLATFORM_BUS>; clocks = <&clks VF610_CLK_PLATFORM_BUS>;
}; };

View File

@ -850,6 +850,7 @@ CONFIG_PWM_SUN4I=y
CONFIG_PWM_TEGRA=y CONFIG_PWM_TEGRA=y
CONFIG_PWM_VT8500=y CONFIG_PWM_VT8500=y
CONFIG_PHY_HIX5HD2_SATA=y CONFIG_PHY_HIX5HD2_SATA=y
CONFIG_E1000E=y
CONFIG_PWM_STI=y CONFIG_PWM_STI=y
CONFIG_PWM_BCM2835=y CONFIG_PWM_BCM2835=y
CONFIG_PWM_BRCMSTB=m CONFIG_PWM_BRCMSTB=m

View File

@ -408,7 +408,7 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg) static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
{ {
struct clk *clk; struct clk *clk;
int i; int i, ret;
imx6q_pu_domain.reg = pu_reg; imx6q_pu_domain.reg = pu_reg;
@ -430,13 +430,22 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
return 0; return 0;
pm_genpd_init(&imx6q_pu_domain.base, NULL, false); for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
return of_genpd_add_provider_onecell(dev->of_node, pm_genpd_init(imx_gpc_domains[i], NULL, false);
&imx_gpc_onecell_data);
ret = of_genpd_add_provider_onecell(dev->of_node,
&imx_gpc_onecell_data);
if (ret)
goto power_off;
return 0;
power_off:
imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
clk_err: clk_err:
while (i--) while (i--)
clk_put(imx6q_pu_domain.clk[i]); clk_put(imx6q_pu_domain.clk[i]);
imx6q_pu_domain.reg = NULL;
return -EINVAL; return -EINVAL;
} }

View File

@ -173,7 +173,7 @@ static void __init imx6q_enet_phy_init(void)
ksz9021rn_phy_fixup); ksz9021rn_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK, phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK,
ksz9031rn_phy_fixup); ksz9031rn_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff, phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
ar8031_phy_fixup); ar8031_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef, phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
ar8035_phy_fixup); ar8035_phy_fixup);

View File

@ -23,6 +23,7 @@ config MACH_MVEBU_V7
select CACHE_L2X0 select CACHE_L2X0
select ARM_CPU_SUSPEND select ARM_CPU_SUSPEND
select MACH_MVEBU_ANY select MACH_MVEBU_ANY
select MVEBU_CLK_COREDIV
config MACH_ARMADA_370 config MACH_ARMADA_370
bool "Marvell Armada 370 boards" bool "Marvell Armada 370 boards"
@ -32,7 +33,6 @@ config MACH_ARMADA_370
select CPU_PJ4B select CPU_PJ4B
select MACH_MVEBU_V7 select MACH_MVEBU_V7
select PINCTRL_ARMADA_370 select PINCTRL_ARMADA_370
select MVEBU_CLK_COREDIV
help help
Say 'Y' here if you want your kernel to support boards based Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 370 SoC with device tree. on the Marvell Armada 370 SoC with device tree.
@ -50,7 +50,6 @@ config MACH_ARMADA_375
select HAVE_SMP select HAVE_SMP
select MACH_MVEBU_V7 select MACH_MVEBU_V7
select PINCTRL_ARMADA_375 select PINCTRL_ARMADA_375
select MVEBU_CLK_COREDIV
help help
Say 'Y' here if you want your kernel to support boards based Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 375 SoC with device tree. on the Marvell Armada 375 SoC with device tree.
@ -68,7 +67,6 @@ config MACH_ARMADA_38X
select HAVE_SMP select HAVE_SMP
select MACH_MVEBU_V7 select MACH_MVEBU_V7
select PINCTRL_ARMADA_38X select PINCTRL_ARMADA_38X
select MVEBU_CLK_COREDIV
help help
Say 'Y' here if you want your kernel to support boards based Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 380/385 SoC with device tree. on the Marvell Armada 380/385 SoC with device tree.

View File

@ -1,6 +1,7 @@
config ARCH_UNIPHIER config ARCH_UNIPHIER
bool "Socionext UniPhier SoCs" bool "Socionext UniPhier SoCs"
depends on ARCH_MULTI_V7 depends on ARCH_MULTI_V7
select ARCH_HAS_RESET_CONTROLLER
select ARM_AMBA select ARM_AMBA
select ARM_GLOBAL_TIMER select ARM_GLOBAL_TIMER
select ARM_GIC select ARM_GIC

View File

@ -190,6 +190,7 @@ config ARCH_THUNDER
config ARCH_UNIPHIER config ARCH_UNIPHIER
bool "Socionext UniPhier SoC Family" bool "Socionext UniPhier SoC Family"
select ARCH_HAS_RESET_CONTROLLER
select PINCTRL select PINCTRL
help help
This enables support for Socionext UniPhier SoC family. This enables support for Socionext UniPhier SoC family.

View File

@ -164,6 +164,8 @@
nand-ecc-mode = "hw"; nand-ecc-mode = "hw";
nand-ecc-strength = <8>; nand-ecc-strength = <8>;
nand-ecc-step-size = <512>; nand-ecc-step-size = <512>;
nand-bus-width = <16>;
brcm,nand-oob-sector-size = <16>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
}; };

View File

@ -123,6 +123,7 @@
<1 14 0xf08>, /* Physical Non-Secure PPI */ <1 14 0xf08>, /* Physical Non-Secure PPI */
<1 11 0xf08>, /* Virtual PPI */ <1 11 0xf08>, /* Virtual PPI */
<1 10 0xf08>; /* Hypervisor PPI */ <1 10 0xf08>; /* Hypervisor PPI */
fsl,erratum-a008585;
}; };
pmu { pmu {

View File

@ -195,6 +195,7 @@
<1 14 4>, /* Physical Non-Secure PPI, active-low */ <1 14 4>, /* Physical Non-Secure PPI, active-low */
<1 11 4>, /* Virtual PPI, active-low */ <1 11 4>, /* Virtual PPI, active-low */
<1 10 4>; /* Hypervisor PPI, active-low */ <1 10 4>; /* Hypervisor PPI, active-low */
fsl,erratum-a008585;
}; };
pmu { pmu {

View File

@ -131,7 +131,7 @@
#address-cells = <0x1>; #address-cells = <0x1>;
#size-cells = <0x0>; #size-cells = <0x0>;
cell-index = <1>; cell-index = <1>;
clocks = <&cpm_syscon0 0 3>; clocks = <&cpm_syscon0 1 21>;
status = "disabled"; status = "disabled";
}; };

View File

@ -116,7 +116,6 @@
cap-mmc-highspeed; cap-mmc-highspeed;
clock-frequency = <150000000>; clock-frequency = <150000000>;
disable-wp; disable-wp;
keep-power-in-suspend;
non-removable; non-removable;
num-slots = <1>; num-slots = <1>;
vmmc-supply = <&vcc_io>; vmmc-supply = <&vcc_io>;
@ -258,8 +257,6 @@
}; };
vcc_sd: SWITCH_REG1 { vcc_sd: SWITCH_REG1 {
regulator-always-on;
regulator-boot-on;
regulator-name = "vcc_sd"; regulator-name = "vcc_sd";
}; };

View File

@ -152,8 +152,6 @@
gpio = <&gpio3 11 GPIO_ACTIVE_LOW>; gpio = <&gpio3 11 GPIO_ACTIVE_LOW>;
regulator-min-microvolt = <1800000>; regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>; regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-boot-on;
vin-supply = <&vcc_io>; vin-supply = <&vcc_io>;
}; };
@ -201,7 +199,6 @@
bus-width = <8>; bus-width = <8>;
cap-mmc-highspeed; cap-mmc-highspeed;
disable-wp; disable-wp;
keep-power-in-suspend;
mmc-pwrseq = <&emmc_pwrseq>; mmc-pwrseq = <&emmc_pwrseq>;
mmc-hs200-1_2v; mmc-hs200-1_2v;
mmc-hs200-1_8v; mmc-hs200-1_8v;
@ -350,7 +347,6 @@
clock-freq-min-max = <400000 50000000>; clock-freq-min-max = <400000 50000000>;
cap-sd-highspeed; cap-sd-highspeed;
card-detect-delay = <200>; card-detect-delay = <200>;
keep-power-in-suspend;
num-slots = <1>; num-slots = <1>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;

View File

@ -257,18 +257,18 @@
reg = <0x59801000 0x400>; reg = <0x59801000 0x400>;
}; };
mioctrl@59810000 { sdctrl@59810000 {
compatible = "socionext,uniphier-mioctrl", compatible = "socionext,uniphier-ld20-sdctrl",
"simple-mfd", "syscon"; "simple-mfd", "syscon";
reg = <0x59810000 0x800>; reg = <0x59810000 0x800>;
mio_clk: clock { sd_clk: clock {
compatible = "socionext,uniphier-ld20-mio-clock"; compatible = "socionext,uniphier-ld20-sd-clock";
#clock-cells = <1>; #clock-cells = <1>;
}; };
mio_rst: reset { sd_rst: reset {
compatible = "socionext,uniphier-ld20-mio-reset"; compatible = "socionext,uniphier-ld20-sd-reset";
#reset-cells = <1>; #reset-cells = <1>;
}; };
}; };

View File

@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else #else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) #define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))

View File

@ -147,7 +147,7 @@ static int __init early_cpu_to_node(int cpu)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{ {
return node_distance(from, to); return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
} }
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
@ -223,8 +223,11 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd; void *nd;
int tnid; int tnid;
pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", if (start_pfn < end_pfn)
nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
else
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa); nd = __va(nd_pa);

View File

@ -3149,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop)
printk("print_dma_descriptors start\n"); printk("print_dma_descriptors start\n");
printk("iop:\n"); printk("iop:\n");
printk("\tsid: 0x%lld\n", iop->sid); printk("\tsid: 0x%llx\n", iop->sid);
printk("\tcdesc_out: 0x%p\n", iop->cdesc_out); printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
printk("\tcdesc_in: 0x%p\n", iop->cdesc_in); printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);

View File

@ -31,7 +31,6 @@ struct thread_info {
int cpu; /* cpu we're on */ int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; mm_segment_t addr_limit;
struct restart_block restart_block;
}; };
/* /*
@ -44,9 +43,6 @@ struct thread_info {
.cpu = 0, \ .cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \ .addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
} }
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)

View File

@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
unsigned int er0; unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */ /* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall; current->restart_block.fn = do_no_restart_syscall;
/* restore passed registers */ /* restore passed registers */
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0) #define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)

View File

@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
std r0,0(r1); \ std r0,0(r1); \
ptesync; \ ptesync; \
ld r0,0(r1); \ ld r0,0(r1); \
1: cmp cr0,r0,r0; \ 1: cmpd cr0,r0,r0; \
bne 1b; \ bne 1b; \
IDLE_INST; \ IDLE_INST; \
b . b .

View File

@ -93,6 +93,10 @@
ld reg,PACAKBASE(r13); /* get high part of &label */ \ ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l; ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
#define __LOAD_HANDLER(reg, label) \
ld reg,PACAKBASE(r13); \
ori reg,reg,(ABS_ADDR(label))@l;
/* Exception register prefixes */ /* Exception register prefixes */
#define EXC_HV H #define EXC_HV H
#define EXC_STD #define EXC_STD
@ -208,6 +212,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define kvmppc_interrupt kvmppc_interrupt_pr #define kvmppc_interrupt kvmppc_interrupt_pr
#endif #endif
#ifdef CONFIG_RELOCATABLE
#define BRANCH_TO_COMMON(reg, label) \
__LOAD_HANDLER(reg, label); \
mtctr reg; \
bctr
#else
#define BRANCH_TO_COMMON(reg, label) \
b label
#endif
#define __KVM_HANDLER_PROLOG(area, n) \ #define __KVM_HANDLER_PROLOG(area, n) \
BEGIN_FTR_SECTION_NESTED(947) \ BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \ ld r10,area+EX_CFAR(r13); \

View File

@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
return cpumask_subset(mm_cpumask(mm), return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id())); topology_sibling_cpumask(smp_processor_id()));
} }
static inline int mm_is_thread_local(struct mm_struct *mm)
{
return cpumask_equal(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
}
#else #else
static inline int mm_is_core_local(struct mm_struct *mm) static inline int mm_is_core_local(struct mm_struct *mm)
{ {
return 1; return 1;
} }
static inline int mm_is_thread_local(struct mm_struct *mm)
{
return 1;
}
#endif #endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View File

@ -95,19 +95,35 @@ __start_interrupts:
/* No virt vectors corresponding with 0x0..0x100 */ /* No virt vectors corresponding with 0x0..0x100 */
EXC_VIRT_NONE(0x4000, 0x4100) EXC_VIRT_NONE(0x4000, 0x4100)
#ifdef CONFIG_PPC_P7_NAP
/*
* If running native on arch 2.06 or later, check if we are waking up
* from nap/sleep/winkle, and branch to idle handler.
*/
#define IDLETEST(n) \
BEGIN_FTR_SECTION ; \
mfspr r10,SPRN_SRR1 ; \
rlwinm. r10,r10,47-31,30,31 ; \
beq- 1f ; \
cmpwi cr3,r10,2 ; \
BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
1: \
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#else
#define IDLETEST NOTEST
#endif
EXC_REAL_BEGIN(system_reset, 0x100, 0x200) EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
SET_SCRATCH0(r13) SET_SCRATCH0(r13)
#ifdef CONFIG_PPC_P7_NAP EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
BEGIN_FTR_SECTION IDLETEST, 0x100)
/* Running native on arch 2.06 or later, check if we are
* waking up from nap/sleep/winkle.
*/
mfspr r13,SPRN_SRR1
rlwinm. r13,r13,47-31,30,31
beq 9f
cmpwi cr3,r13,2 EXC_REAL_END(system_reset, 0x100, 0x200)
GET_PACA(r13) EXC_VIRT_NONE(0x4100, 0x4200)
#ifdef CONFIG_PPC_P7_NAP
EXC_COMMON_BEGIN(system_reset_idle_common)
bl pnv_restore_hyp_resource bl pnv_restore_hyp_resource
li r0,PNV_THREAD_RUNNING li r0,PNV_THREAD_RUNNING
@ -130,14 +146,8 @@ BEGIN_FTR_SECTION
blt cr3,2f blt cr3,2f
b pnv_wakeup_loss b pnv_wakeup_loss
2: b pnv_wakeup_noloss 2: b pnv_wakeup_noloss
#endif
9:
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif /* CONFIG_PPC_P7_NAP */
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
NOTEST, 0x100)
EXC_REAL_END(system_reset, 0x100, 0x200)
EXC_VIRT_NONE(0x4100, 0x4200)
EXC_COMMON(system_reset_common, 0x100, system_reset_exception) EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
#ifdef CONFIG_PPC_PSERIES #ifdef CONFIG_PPC_PSERIES
@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
TRAMP_KVM(PACA_EXGEN, 0xb00) TRAMP_KVM(PACA_EXGEN, 0xb00)
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
#define LOAD_SYSCALL_HANDLER(reg) \
#define LOAD_SYSCALL_HANDLER(reg) \ __LOAD_HANDLER(reg, system_call_common)
ld reg,PACAKBASE(r13); \
ori reg,reg,(ABS_ADDR(system_call_common))@l;
/* Syscall routine is used twice, in reloc-off and reloc-on paths */ /* Syscall routine is used twice, in reloc-off and reloc-on paths */
#define SYSCALL_PSERIES_1 \ #define SYSCALL_PSERIES_1 \

View File

@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args)
if (!stepped) { if (!stepped) {
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
"0x%lx will be disabled.", info->address); "0x%lx will be disabled.", info->address);
perf_event_disable(bp); perf_event_disable_inatomic(bp);
goto out; goto out;
} }
/* /*

View File

@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
* Threads will spin in HMT_LOW until the lock bit is cleared. * Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state * r14 - pointer to core_idle_state
* r15 - used to load contents of core_idle_state * r15 - used to load contents of core_idle_state
* r9 - used as a temporary variable
*/ */
core_idle_lock_held: core_idle_lock_held:
@ -99,6 +100,8 @@ core_idle_lock_held:
bne 3b bne 3b
HMT_MEDIUM HMT_MEDIUM
lwarx r15,0,r14 lwarx r15,0,r14
andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
bne core_idle_lock_held
blr blr
/* /*
@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
std r9,_MSR(r1) std r9,_MSR(r1)
std r1,PACAR1(r13) std r1,PACAR1(r13)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're entering idle */
li r4,KVM_HWTHREAD_IN_IDLE
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
/* /*
* Go to real mode to do the nap, as required by the architecture. * Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state, * Also, we need to be in real mode before setting hwthread_state,
@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
.globl pnv_enter_arch207_idle_mode .globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode: pnv_enter_arch207_idle_mode:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're entering idle */
li r4,KVM_HWTHREAD_IN_IDLE
/******************************************************/
/* N O T E W E L L ! ! ! N O T E W E L L */
/* The following store to HSTATE_HWTHREAD_STATE(r13) */
/* MUST occur in real mode, i.e. with the MMU off, */
/* and the MMU must stay off until we clear this flag */
/* and test HSTATE_HWTHREAD_REQ(r13) in the system */
/* reset interrupt vector in exceptions-64s.S. */
/* The reason is that another thread can switch the */
/* MMU to a guest context whenever this flag is set */
/* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
/* that would potentially cause this thread to start */
/* executing instructions from guest memory in */
/* hypervisor mode, leading to a host crash or data */
/* corruption, or worse. */
/******************************************************/
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
stb r3,PACA_THREAD_IDLE_STATE(r13) stb r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi cr3,r3,PNV_THREAD_SLEEP cmpwi cr3,r3,PNV_THREAD_SLEEP
bge cr3,2f bge cr3,2f
@ -250,6 +267,12 @@ enter_winkle:
* r3 - requested stop state * r3 - requested stop state
*/ */
power_enter_stop: power_enter_stop:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're entering idle */
li r4,KVM_HWTHREAD_IN_IDLE
/* DO THIS IN REAL MODE! See comment above. */
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
/* /*
* Check if the requested state is a deep idle state. * Check if the requested state is a deep idle state.
*/ */

View File

@ -1012,7 +1012,7 @@ void restore_tm_state(struct pt_regs *regs)
/* Ensure that restore_math() will restore */ /* Ensure that restore_math() will restore */
if (msr_diff & MSR_FP) if (msr_diff & MSR_FP)
current->thread.load_fp = 1; current->thread.load_fp = 1;
#ifdef CONFIG_ALIVEC #ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC) if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
current->thread.load_vec = 1; current->thread.load_vec = 1;
#endif #endif

View File

@ -23,6 +23,7 @@
#include <asm/ppc-opcode.h> #include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h> #include <asm/pnv-pci.h>
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/smp.h>
#include "book3s_xics.h" #include "book3s_xics.h"

View File

@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (unlikely(pid == MMU_NO_CONTEXT)) if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context; goto no_context;
if (!mm_is_core_local(mm)) { if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie) if (lock_tlbie)
@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
if (unlikely(pid == MMU_NO_CONTEXT)) if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context; goto no_context;
if (!mm_is_core_local(mm)) { if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie) if (lock_tlbie)
@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
pid = mm ? mm->context.id : 0; pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT)) if (unlikely(pid == MMU_NO_CONTEXT))
goto bail; goto bail;
if (!mm_is_core_local(mm)) { if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie) if (lock_tlbie)
@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
{ {
unsigned long pid; unsigned long pid;
unsigned long addr; unsigned long addr;
int local = mm_is_core_local(mm); int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(psize); unsigned long ap = mmu_get_ap(psize);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift; unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;

View File

@ -12,9 +12,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
unsigned long return_address(int depth); #define ftrace_return_address(n) __builtin_return_address(n)
#define ftrace_return_address(n) return_address(n)
void _mcount(void); void _mcount(void);
void ftrace_caller(void); void ftrace_caller(void);

View File

@ -192,7 +192,7 @@ struct task_struct;
struct mm_struct; struct mm_struct;
struct seq_file; struct seq_file;
typedef int (*dump_trace_func_t)(void *data, unsigned long address); typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data, void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp); struct task_struct *task, unsigned long sp);

View File

@ -9,6 +9,9 @@
#include <uapi/asm/unistd.h> #include <uapi/asm/unistd.h>
#define __IGNORE_time #define __IGNORE_time
#define __IGNORE_pkey_mprotect
#define __IGNORE_pkey_alloc
#define __IGNORE_pkey_free
#define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_ALARM

View File

@ -2014,12 +2014,12 @@ void show_code(struct pt_regs *regs)
*ptr++ = '\t'; *ptr++ = '\t';
ptr += print_insn(ptr, code + start, addr); ptr += print_insn(ptr, code + start, addr);
start += opsize; start += opsize;
printk("%s", buffer); pr_cont("%s", buffer);
ptr = buffer; ptr = buffer;
ptr += sprintf(ptr, "\n "); ptr += sprintf(ptr, "\n ");
hops++; hops++;
} }
printk("\n"); pr_cont("\n");
} }
void print_fn_code(unsigned char *code, unsigned long len) void print_fn_code(unsigned char *code, unsigned long len)

View File

@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
if (sp < low || sp > high - sizeof(*sf)) if (sp < low || sp > high - sizeof(*sf))
return sp; return sp;
sf = (struct stack_frame *) sp; sf = (struct stack_frame *) sp;
if (func(data, sf->gprs[8], 0))
return sp;
/* Follow the backchain. */ /* Follow the backchain. */
while (1) { while (1) {
if (func(data, sf->gprs[8]))
return sp;
low = sp; low = sp;
sp = sf->back_chain; sp = sf->back_chain;
if (!sp) if (!sp)
@ -49,6 +49,8 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
if (sp <= low || sp > high - sizeof(*sf)) if (sp <= low || sp > high - sizeof(*sf))
return sp; return sp;
sf = (struct stack_frame *) sp; sf = (struct stack_frame *) sp;
if (func(data, sf->gprs[8], 1))
return sp;
} }
/* Zero backchain detected, check for interrupt frame. */ /* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1); sp = (unsigned long) (sf + 1);
@ -56,7 +58,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
return sp; return sp;
regs = (struct pt_regs *) sp; regs = (struct pt_regs *) sp;
if (!user_mode(regs)) { if (!user_mode(regs)) {
if (func(data, regs->psw.addr)) if (func(data, regs->psw.addr, 1))
return sp; return sp;
} }
low = sp; low = sp;
@ -85,33 +87,12 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
} }
EXPORT_SYMBOL_GPL(dump_trace); EXPORT_SYMBOL_GPL(dump_trace);
struct return_address_data { static int show_address(void *data, unsigned long address, int reliable)
unsigned long address;
int depth;
};
static int __return_address(void *data, unsigned long address)
{ {
struct return_address_data *rd = data; if (reliable)
printk(" [<%016lx>] %pSR \n", address, (void *)address);
if (rd->depth--) else
return 0; printk("([<%016lx>] %pSR)\n", address, (void *)address);
rd->address = address;
return 1;
}
unsigned long return_address(int depth)
{
struct return_address_data rd = { .depth = depth + 2 };
dump_trace(__return_address, &rd, NULL, current_stack_pointer());
return rd.address;
}
EXPORT_SYMBOL_GPL(return_address);
static int show_address(void *data, unsigned long address)
{
printk("([<%016lx>] %pSR)\n", address, (void *)address);
return 0; return 0;
} }
@ -138,14 +119,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
else else
stack = (unsigned long *)task->thread.ksp; stack = (unsigned long *)task->thread.ksp;
} }
printk(KERN_DEFAULT "Stack:\n");
for (i = 0; i < 20; i++) { for (i = 0; i < 20; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0) if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break; break;
if ((i * sizeof(long) % 32) == 0) if (i % 4 == 0)
printk("%s ", i == 0 ? "" : "\n"); printk(KERN_DEFAULT " ");
printk("%016lx ", *stack++); pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
} }
printk("\n");
show_trace(task, (unsigned long)sp); show_trace(task, (unsigned long)sp);
} }
@ -163,13 +144,13 @@ void show_registers(struct pt_regs *regs)
mode = user_mode(regs) ? "User" : "Krnl"; mode = user_mode(regs) ? "User" : "Krnl";
printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
if (!user_mode(regs)) if (!user_mode(regs))
printk(" (%pSR)", (void *)regs->psw.addr); pr_cont(" (%pSR)", (void *)regs->psw.addr);
printk("\n"); pr_cont("\n");
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e, "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm); psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
printk(" RI:%x EA:%x", psw->ri, psw->eaba); pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode, printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" %016lx %016lx %016lx %016lx\n", printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@ -205,14 +186,14 @@ void die(struct pt_regs *regs, const char *str)
printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff, printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
regs->int_code >> 17, ++die_counter); regs->int_code >> 17, ++die_counter);
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
printk("PREEMPT "); pr_cont("PREEMPT ");
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
printk("SMP "); pr_cont("SMP ");
#endif #endif
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled())
printk("DEBUG_PAGEALLOC"); pr_cont("DEBUG_PAGEALLOC");
printk("\n"); pr_cont("\n");
notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
print_modules(); print_modules();
show_regs(regs); show_regs(regs);

View File

@ -222,7 +222,7 @@ static int __init service_level_perf_register(void)
} }
arch_initcall(service_level_perf_register); arch_initcall(service_level_perf_register);
static int __perf_callchain_kernel(void *data, unsigned long address) static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
{ {
struct perf_callchain_entry_ctx *entry = data; struct perf_callchain_entry_ctx *entry = data;

View File

@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched)
return 1; return 1;
} }
static int save_address(void *data, unsigned long address) static int save_address(void *data, unsigned long address, int reliable)
{ {
return __save_address(data, address, 0); return __save_address(data, address, 0);
} }
static int save_address_nosched(void *data, unsigned long address) static int save_address_nosched(void *data, unsigned long address, int reliable)
{ {
return __save_address(data, address, 1); return __save_address(data, address, 1);
} }

View File

@ -217,6 +217,7 @@ static __init int setup_hugepagesz(char *opt)
} else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) { } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else { } else {
hugetlb_bad_size();
pr_err("hugepagesz= specifies an unsupported page size %s\n", pr_err("hugepagesz= specifies an unsupported page size %s\n",
string); string);
return 0; return 0;

View File

@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool for_device) int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{ {
unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
unsigned long start_pfn = PFN_DOWN(start); unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size); unsigned long size_pages = PFN_DOWN(size);
unsigned long nr_pages; pg_data_t *pgdat = NODE_DATA(nid);
int rc, zone_enum; struct zone *zone;
int rc, i;
rc = vmem_add_mapping(start, size); rc = vmem_add_mapping(start, size);
if (rc) if (rc)
return rc; return rc;
while (size_pages > 0) { for (i = 0; i < MAX_NR_ZONES; i++) {
if (start_pfn < dma_end_pfn) { zone = pgdat->node_zones + i;
nr_pages = (start_pfn + size_pages > dma_end_pfn) ? if (zone_idx(zone) != ZONE_MOVABLE) {
dma_end_pfn - start_pfn : size_pages; /* Add range within existing zone limits, if possible */
zone_enum = ZONE_DMA; zone_start_pfn = zone->zone_start_pfn;
} else if (start_pfn < normal_end_pfn) { zone_end_pfn = zone->zone_start_pfn +
nr_pages = (start_pfn + size_pages > normal_end_pfn) ? zone->spanned_pages;
normal_end_pfn - start_pfn : size_pages;
zone_enum = ZONE_NORMAL;
} else { } else {
nr_pages = size_pages; /* Add remaining range to ZONE_MOVABLE */
zone_enum = ZONE_MOVABLE; zone_start_pfn = start_pfn;
zone_end_pfn = start_pfn + size_pages;
} }
rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
start_pfn, size_pages); continue;
nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
zone_end_pfn - start_pfn : size_pages;
rc = __add_pages(nid, zone, start_pfn, nr_pages);
if (rc) if (rc)
break; break;
start_pfn += nr_pages; start_pfn += nr_pages;
size_pages -= nr_pages; size_pages -= nr_pages;
if (!size_pages)
break;
} }
if (rc) if (rc)
vmem_remove_mapping(start, size); vmem_remove_mapping(start, size);

View File

@ -13,7 +13,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <asm/processor.h> #include <asm/processor.h>
static int __s390_backtrace(void *data, unsigned long address) static int __s390_backtrace(void *data, unsigned long address, int reliable)
{ {
unsigned int *depth = data; unsigned int *depth = data;

View File

@ -5,8 +5,8 @@
OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
CFLAGS_syscall_64.o += -Wno-override-init CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
CFLAGS_syscall_32.o += -Wno-override-init CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o obj-y += common.o

View File

@ -3607,10 +3607,14 @@ __init int intel_pmu_init(void)
/* /*
* Quirk: v2 perfmon does not report fixed-purpose events, so * Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events: * assume at least 3 events, when not running in a hypervisor:
*/ */
if (version > 1) if (version > 1) {
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
x86_pmu.num_counters_fixed =
max((int)edx.split.num_counters_fixed, assume);
}
if (boot_cpu_has(X86_FEATURE_PDCM)) { if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities; u64 capabilities;

View File

@ -48,7 +48,8 @@
* Scope: Core * Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02 * perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
* SKL,KNL
* Scope: Core * Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03 * perf code: 0x03
@ -56,15 +57,16 @@
* Scope: Core * Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00 * perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL * Available model: SNB,IVB,HSW,BDW,SKL,KNL
* Scope: Package (physical package) * Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01 * perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
* Scope: Package (physical package) * Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02 * perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
* SKL,KNL
* Scope: Package (physical package) * Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03 * perf code: 0x03
@ -118,6 +120,7 @@ struct cstate_model {
/* Quirk flags */ /* Quirk flags */
#define SLM_PKG_C6_USE_C7_MSR (1UL << 0) #define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
#define KNL_CORE_C6_MSR (1UL << 1)
struct perf_cstate_msr { struct perf_cstate_msr {
u64 msr; u64 msr;
@ -488,6 +491,18 @@ static const struct cstate_model slm_cstates __initconst = {
.quirks = SLM_PKG_C6_USE_C7_MSR, .quirks = SLM_PKG_C6_USE_C7_MSR,
}; };
static const struct cstate_model knl_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C6_RES),
.pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
BIT(PERF_CSTATE_PKG_C3_RES) |
BIT(PERF_CSTATE_PKG_C6_RES),
.quirks = KNL_CORE_C6_MSR,
};
#define X86_CSTATES_MODEL(model, states) \ #define X86_CSTATES_MODEL(model, states) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) } { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
@ -523,6 +538,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
{ }, { },
}; };
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
@ -558,6 +575,11 @@ static int __init cstate_probe(const struct cstate_model *cm)
if (cm->quirks & SLM_PKG_C6_USE_C7_MSR) if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY; pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
/* KNL has different MSR for CORE C6 */
if (cm->quirks & KNL_CORE_C6_MSR)
pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
has_cstate_core = cstate_probe_msr(cm->core_events, has_cstate_core = cstate_probe_msr(cm->core_events,
PERF_CSTATE_CORE_EVENT_MAX, PERF_CSTATE_CORE_EVENT_MAX,
core_msr, core_events_attrs); core_msr, core_events_attrs);

View File

@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
#define arch_phys_wc_add arch_phys_wc_add #define arch_phys_wc_add arch_phys_wc_add
#endif #endif
#ifdef CONFIG_X86_PAT
extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
#endif
#endif /* _ASM_X86_IO_H */ #endif /* _ASM_X86_IO_H */

View File

@ -454,6 +454,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
acpi_penalize_sci_irq(bus_irq, trigger, polarity);
/* /*
* stash over-ride to indicate we've been here * stash over-ride to indicate we've been here

View File

@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
* We need the physical address of the container for both bitness since * We need the physical address of the container for both bitness since
* boot_params.hdr.ramdisk_image is a physical address. * boot_params.hdr.ramdisk_image is a physical address.
*/ */
cont = __pa(container); cont = __pa_nodebug(container);
cont_va = container; cont_va = container;
#endif #endif

View File

@ -18,8 +18,10 @@
#ifdef CC_USING_FENTRY #ifdef CC_USING_FENTRY
# define function_hook __fentry__ # define function_hook __fentry__
EXPORT_SYMBOL(__fentry__)
#else #else
# define function_hook mcount # define function_hook mcount
EXPORT_SYMBOL(mcount)
#endif #endif
/* All cases save the original rbp (8 bytes) */ /* All cases save the original rbp (8 bytes) */
@ -295,7 +297,6 @@ trace:
jmp fgraph_trace jmp fgraph_trace
END(function_hook) END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(function_hook)
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER

View File

@ -625,8 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
amd_disable_seq_and_redirect_scrub); amd_disable_seq_and_redirect_scrub);
#endif
#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <asm/string_64.h> #include <asm/string_64.h>
@ -657,3 +655,4 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
#endif #endif
#endif

View File

@ -1221,11 +1221,16 @@ void __init setup_arch(char **cmdline_p)
*/ */
get_smp_config(); get_smp_config();
/*
* Systems w/o ACPI and mptables might not have it mapped the local
* APIC yet, but prefill_possible_map() might need to access it.
*/
init_apic_mappings();
prefill_possible_map(); prefill_possible_map();
init_cpu_to_node(); init_cpu_to_node();
init_apic_mappings();
io_apic_init_mappings(); io_apic_init_mappings();
kvm_guest_init(); kvm_guest_init();

View File

@ -47,7 +47,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
get_stack_info(first_frame, state->task, &state->stack_info, get_stack_info(first_frame, state->task, &state->stack_info,
&state->stack_mask); &state->stack_mask);
if (!__kernel_text_address(*first_frame)) /*
* The caller can provide the address of the first frame directly
* (first_frame) or indirectly (regs->sp) to indicate which stack frame
* to start unwinding at. Skip ahead until we reach it.
*/
if (!unwind_done(state) &&
(!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
!__kernel_text_address(*first_frame)))
unwind_next_frame(state); unwind_next_frame(state);
} }
EXPORT_SYMBOL_GPL(__unwind_start); EXPORT_SYMBOL_GPL(__unwind_start);

View File

@ -104,10 +104,10 @@ void __init kernel_randomize_memory(void)
* consistent with the vaddr_start/vaddr_end variables. * consistent with the vaddr_start/vaddr_end variables.
*/ */
BUILD_BUG_ON(vaddr_start >= vaddr_end); BUILD_BUG_ON(vaddr_start >= vaddr_end);
BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) && BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
vaddr_end >= EFI_VA_START); vaddr_end >= EFI_VA_START);
BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) || BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
config_enabled(CONFIG_EFI)) && IS_ENABLED(CONFIG_EFI)) &&
vaddr_end >= __START_KERNEL_map); vaddr_end >= __START_KERNEL_map);
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);

View File

@ -730,6 +730,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
free_memtype(start, end); free_memtype(start, end);
} }
int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
{
enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
return io_reserve_memtype(start, start + size, &type);
}
EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
{
io_free_memtype(start, start + size);
}
EXPORT_SYMBOL(arch_io_free_memtype_wc);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot) unsigned long size, pgprot_t vma_prot)
{ {

View File

@ -1837,6 +1837,7 @@ static void __init init_hvm_pv_info(void)
xen_domain_type = XEN_HVM_DOMAIN; xen_domain_type = XEN_HVM_DOMAIN;
} }
#endif
static int xen_cpu_up_prepare(unsigned int cpu) static int xen_cpu_up_prepare(unsigned int cpu)
{ {
@ -1887,6 +1888,7 @@ static int xen_cpu_up_online(unsigned int cpu)
return 0; return 0;
} }
#ifdef CONFIG_XEN_PVHVM
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void) static void xen_hvm_shutdown(void)
{ {

View File

@ -133,6 +133,26 @@ retry:
} }
EXPORT_SYMBOL_GPL(badblocks_check); EXPORT_SYMBOL_GPL(badblocks_check);
static void badblocks_update_acked(struct badblocks *bb)
{
u64 *p = bb->page;
int i;
bool unacked = false;
if (!bb->unacked_exist)
return;
for (i = 0; i < bb->count ; i++) {
if (!BB_ACK(p[i])) {
unacked = true;
break;
}
}
if (!unacked)
bb->unacked_exist = 0;
}
/** /**
* badblocks_set() - Add a range of bad blocks to the table. * badblocks_set() - Add a range of bad blocks to the table.
* @bb: the badblocks structure that holds all badblock information * @bb: the badblocks structure that holds all badblock information
@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
bb->changed = 1; bb->changed = 1;
if (!acknowledged) if (!acknowledged)
bb->unacked_exist = 1; bb->unacked_exist = 1;
else
badblocks_update_acked(bb);
write_sequnlock_irqrestore(&bb->lock, flags); write_sequnlock_irqrestore(&bb->lock, flags);
return rv; return rv;
@ -401,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
} }
} }
badblocks_update_acked(bb);
bb->changed = 1; bb->changed = 1;
out: out:
write_sequnlock_irq(&bb->lock); write_sequnlock_irq(&bb->lock);

View File

@ -342,6 +342,34 @@ static void flush_data_end_io(struct request *rq, int error)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
/*
* Updating q->in_flight[] here for making this tag usable
* early. Because in blk_queue_start_tag(),
* q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
* reserve tags for sync I/O.
*
* More importantly this way can avoid the following I/O
* deadlock:
*
* - suppose there are 40 fua requests comming to flush queue
* and queue depth is 31
* - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
* tag for async I/O any more
* - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
* and flush_data_end_io() is called
* - the other rqs still can't go ahead if not updating
* q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
* are held in flush data queue and make no progress of
* handling post flush rq
* - only after the post flush rq is handled, all these rqs
* can be completed
*/
elv_completed_request(q, rq);
/* for avoiding double accounting */
rq->cmd_flags &= ~REQ_STARTED;
/* /*
* After populating an empty queue, kick it to avoid stall. Read * After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io(). * the comment in flush_end_io().

View File

@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q,
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
hctx->queued++; data->hctx = alloc_data.hctx;
data->hctx = hctx; data->ctx = alloc_data.ctx;
data->ctx = ctx; data->hctx->queued++;
return rq; return rq;
} }

View File

@ -46,6 +46,7 @@
#include "acdispat.h" #include "acdispat.h"
#include "acnamesp.h" #include "acnamesp.h"
#include "actables.h" #include "actables.h"
#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER #define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsinit") ACPI_MODULE_NAME("dsinit")
@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index,
/* Walk entire namespace from the supplied root */ /* Walk entire namespace from the supplied root */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* /*
* We don't use acpi_walk_namespace since we do not want to acquire * We don't use acpi_walk_namespace since we do not want to acquire
* the namespace reader lock. * the namespace reader lock.
*/ */
status = status =
acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object, 0, acpi_ds_init_one_object, NULL, &info,
NULL, &info, NULL); NULL);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
} }
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
status = acpi_get_table_by_index(table_index, &table); status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {

View File

@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
"Method auto-serialization parse [%4.4s] %p\n", "Method auto-serialization parse [%4.4s] %p\n",
acpi_ut_get_node_name(node), node)); acpi_ut_get_node_name(node), node));
acpi_ex_enter_interpreter();
/* Create/Init a root op for the method parse tree */ /* Create/Init a root op for the method parse tree */
op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
if (!op) { if (!op) {
status = AE_NO_MEMORY; return_ACPI_STATUS(AE_NO_MEMORY);
goto unlock;
} }
acpi_ps_set_name(op, node->name.integer); acpi_ps_set_name(op, node->name.integer);
@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
if (!walk_state) { if (!walk_state) {
acpi_ps_free_op(op); acpi_ps_free_op(op);
status = AE_NO_MEMORY; return_ACPI_STATUS(AE_NO_MEMORY);
goto unlock;
} }
status = acpi_ds_init_aml_walk(walk_state, op, node, status = acpi_ds_init_aml_walk(walk_state, op, node,
@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
status = acpi_ps_parse_aml(walk_state); status = acpi_ps_parse_aml(walk_state);
acpi_ps_delete_parse_tree(op); acpi_ps_delete_parse_tree(op);
unlock:
acpi_ex_exit_interpreter();
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
@ -730,26 +724,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_ds_method_data_delete_all(walk_state); acpi_ds_method_data_delete_all(walk_state);
/*
* If method is serialized, release the mutex and restore the
* current sync level for this thread
*/
if (method_desc->method.mutex) {
/* Acquisition Depth handles recursive calls */
method_desc->method.mutex->mutex.acquisition_depth--;
if (!method_desc->method.mutex->mutex.acquisition_depth) {
walk_state->thread->current_sync_level =
method_desc->method.mutex->mutex.
original_sync_level;
acpi_os_release_mutex(method_desc->method.
mutex->mutex.os_mutex);
method_desc->method.mutex->mutex.thread_id = 0;
}
}
/* /*
* Delete any namespace objects created anywhere within the * Delete any namespace objects created anywhere within the
* namespace by the execution of this method. Unless: * namespace by the execution of this method. Unless:
@ -786,6 +760,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
~ACPI_METHOD_MODIFIED_NAMESPACE; ~ACPI_METHOD_MODIFIED_NAMESPACE;
} }
} }
/*
* If method is serialized, release the mutex and restore the
* current sync level for this thread
*/
if (method_desc->method.mutex) {
/* Acquisition Depth handles recursive calls */
method_desc->method.mutex->mutex.acquisition_depth--;
if (!method_desc->method.mutex->mutex.acquisition_depth) {
walk_state->thread->current_sync_level =
method_desc->method.mutex->mutex.
original_sync_level;
acpi_os_release_mutex(method_desc->method.
mutex->mutex.os_mutex);
method_desc->method.mutex->mutex.thread_id = 0;
}
}
} }
/* Decrement the thread count on the method */ /* Decrement the thread count on the method */

View File

@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
} }
} }
acpi_ex_exit_interpreter();
status = status =
acpi_ev_initialize_region acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE); (acpi_ns_get_attached_object(node), FALSE);
acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
/* /*

View File

@ -45,6 +45,7 @@
#include "accommon.h" #include "accommon.h"
#include "acevents.h" #include "acevents.h"
#include "acnamesp.h" #include "acnamesp.h"
#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS #define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini") ACPI_MODULE_NAME("evrgnini")
@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
} }
} }
acpi_ex_exit_interpreter();
status = status =
acpi_ev_execute_reg_method(region_obj, acpi_ev_execute_reg_method(region_obj,
ACPI_REG_CONNECT); ACPI_REG_CONNECT);
acpi_ex_enter_interpreter();
if (acpi_ns_locked) { if (acpi_ns_locked) {
status = status =

View File

@ -137,7 +137,9 @@ unlock:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Begin Table Object Initialization\n")); "**** Begin Table Object Initialization\n"));
acpi_ex_enter_interpreter();
status = acpi_ds_initialize_objects(table_index, node); status = acpi_ds_initialize_objects(table_index, node);
acpi_ex_exit_interpreter();
ACPI_DEBUG_PRINT((ACPI_DB_INFO, ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Completed Table Object Initialization\n")); "**** Completed Table Object Initialization\n"));

View File

@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
ghes_do_proc(ghes, ghes->estatus); ghes_do_proc(ghes, ghes->estatus);
out: out:
ghes_clear_estatus(ghes); ghes_clear_estatus(ghes);
return 0; return rc;
} }
static void ghes_add_timer(struct ghes *ghes) static void ghes_add_timer(struct ghes *ghes)

View File

@ -87,6 +87,7 @@ struct acpi_pci_link {
static LIST_HEAD(acpi_link_list); static LIST_HEAD(acpi_link_list);
static DEFINE_MUTEX(acpi_link_lock); static DEFINE_MUTEX(acpi_link_lock);
static int sci_irq = -1, sci_penalty;
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
PCI Link Device Management PCI Link Device Management
@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq)
{ {
int penalty = 0; int penalty = 0;
/* if (irq == sci_irq)
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict penalty += sci_penalty;
* with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
* use for PCI IRQs.
*/
if (irq == acpi_gbl_FADT.sci_interrupt) {
u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
if (type != IRQ_TYPE_LEVEL_LOW)
penalty += PIRQ_PENALTY_ISA_ALWAYS;
else
penalty += PIRQ_PENALTY_PCI_USING;
}
if (irq < ACPI_MAX_ISA_IRQS) if (irq < ACPI_MAX_ISA_IRQS)
return penalty + acpi_isa_irq_penalty[irq]; return penalty + acpi_isa_irq_penalty[irq];
penalty += acpi_irq_pci_sharing_penalty(irq); return penalty + acpi_irq_pci_sharing_penalty(irq);
return penalty;
} }
int __init acpi_irq_penalty_init(void) int __init acpi_irq_penalty_init(void)
@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device)); acpi_device_bid(link->device));
return -ENODEV; return -ENODEV;
} else { } else {
if (link->irq.active < ACPI_MAX_ISA_IRQS)
acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_USING;
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device), acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active); acpi_device_bid(link->device), link->irq.active);
@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
continue; continue;
if (used) if (used)
new_penalty = acpi_irq_get_penalty(irq) + new_penalty = acpi_isa_irq_penalty[irq] +
PIRQ_PENALTY_ISA_USED; PIRQ_PENALTY_ISA_USED;
else else
new_penalty = 0; new_penalty = 0;
@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
void acpi_penalize_isa_irq(int irq, int active) void acpi_penalize_isa_irq(int irq, int active)
{ {
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty))) if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) + acpi_isa_irq_penalty[irq] +=
(active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
} }
@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq)
acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
} }
void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
{
sci_irq = irq;
if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
sci_penalty = PIRQ_PENALTY_PCI_USING;
else
sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
}
/* /*
* Over-ride default table to reserve additional IRQs for use by ISA * Over-ride default table to reserve additional IRQs for use by ISA
* e.g. acpi_irq_isa=5 * e.g. acpi_irq_isa=5

View File

@ -1002,7 +1002,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc, static struct binder_ref *binder_get_ref(struct binder_proc *proc,
uint32_t desc) u32 desc, bool need_strong_ref)
{ {
struct rb_node *n = proc->refs_by_desc.rb_node; struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref; struct binder_ref *ref;
@ -1010,12 +1010,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) { while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc); ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (desc < ref->desc) if (desc < ref->desc) {
n = n->rb_left; n = n->rb_left;
else if (desc > ref->desc) } else if (desc > ref->desc) {
n = n->rb_right; n = n->rb_right;
else } else if (need_strong_ref && !ref->strong) {
binder_user_error("tried to use weak ref as strong ref\n");
return NULL;
} else {
return ref; return ref;
}
} }
return NULL; return NULL;
} }
@ -1285,7 +1289,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break; } break;
case BINDER_TYPE_HANDLE: case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: { case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle); struct binder_ref *ref;
ref = binder_get_ref(proc, fp->handle,
fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) { if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n", pr_err("transaction release %d bad handle %d\n",
@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
if (tr->target.handle) { if (tr->target.handle) {
struct binder_ref *ref; struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle); ref = binder_get_ref(proc, tr->target.handle, true);
if (ref == NULL) { if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n", binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid); proc->pid, thread->pid);
@ -1577,7 +1584,9 @@ static void binder_transaction(struct binder_proc *proc,
fp->type = BINDER_TYPE_HANDLE; fp->type = BINDER_TYPE_HANDLE;
else else
fp->type = BINDER_TYPE_WEAK_HANDLE; fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
fp->handle = ref->desc; fp->handle = ref->desc;
fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo); &thread->todo);
@ -1589,7 +1598,10 @@ static void binder_transaction(struct binder_proc *proc,
} break; } break;
case BINDER_TYPE_HANDLE: case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: { case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle); struct binder_ref *ref;
ref = binder_get_ref(proc, fp->handle,
fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) { if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n", binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@ -1624,7 +1636,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY; return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed; goto err_binder_get_ref_for_node_failed;
} }
fp->binder = 0;
fp->handle = new_ref->desc; fp->handle = new_ref->desc;
fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref, trace_binder_transaction_ref_to_ref(t, ref,
new_ref); new_ref);
@ -1678,6 +1692,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION, binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd); " fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */ /* TODO: fput? */
fp->binder = 0;
fp->handle = target_fd; fp->handle = target_fd;
} break; } break;
@ -1800,7 +1815,9 @@ static int binder_thread_write(struct binder_proc *proc,
ref->desc); ref->desc);
} }
} else } else
ref = binder_get_ref(proc, target); ref = binder_get_ref(proc, target,
cmd == BC_ACQUIRE ||
cmd == BC_RELEASE);
if (ref == NULL) { if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n", binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target); proc->pid, thread->pid, target);
@ -1996,7 +2013,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr)) if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT; return -EFAULT;
ptr += sizeof(binder_uintptr_t); ptr += sizeof(binder_uintptr_t);
ref = binder_get_ref(proc, target); ref = binder_get_ref(proc, target, false);
if (ref == NULL) { if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n", binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid, proc->pid, thread->pid,

View File

@ -1418,30 +1418,33 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
* Message mode could be enforced. In this case assume that advantage * Message mode could be enforced. In this case assume that advantage
* of multipe MSIs is negated and use single MSI mode instead. * of multipe MSIs is negated and use single MSI mode instead.
*/ */
nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX, if (n_ports > 1) {
PCI_IRQ_MSIX | PCI_IRQ_MSI); nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
if (nvec > 0) { PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) { if (nvec > 0) {
hpriv->get_irq_vector = ahci_get_irq_vector; if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
hpriv->flags |= AHCI_HFLAG_MULTI_MSI; hpriv->get_irq_vector = ahci_get_irq_vector;
return nvec; hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
return nvec;
}
/*
* Fallback to single MSI mode if the controller
* enforced MRSM mode.
*/
printk(KERN_INFO
"ahci: MRSM is on, fallback to single MSI\n");
pci_free_irq_vectors(pdev);
} }
/* /*
* Fallback to single MSI mode if the controller enforced MRSM * -ENOSPC indicated we don't have enough vectors. Don't bother
* mode. * trying a single vectors for any other error:
*/ */
printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n"); if (nvec < 0 && nvec != -ENOSPC)
pci_free_irq_vectors(pdev); return nvec;
} }
/*
* -ENOSPC indicated we don't have enough vectors. Don't bother trying
* a single vectors for any other error:
*/
if (nvec < 0 && nvec != -ENOSPC)
return nvec;
/* /*
* If the host is not capable of supporting per-port vectors, fall * If the host is not capable of supporting per-port vectors, fall
* back to single MSI before finally attempting single MSI-X. * back to single MSI before finally attempting single MSI-X.
@ -1617,7 +1620,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* legacy intx interrupts */ /* legacy intx interrupts */
pci_intx(pdev, 1); pci_intx(pdev, 1);
} }
hpriv->irq = pdev->irq; hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN; host->flags |= ATA_HOST_PARALLEL_SCAN;

View File

@ -213,14 +213,16 @@ config DEBUG_DEVRES
If you are unsure about this, Say N here. If you are unsure about this, Say N here.
config DEBUG_TEST_DRIVER_REMOVE config DEBUG_TEST_DRIVER_REMOVE
bool "Test driver remove calls during probe" bool "Test driver remove calls during probe (UNSTABLE)"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
help help
Say Y here if you want the Driver core to test driver remove functions Say Y here if you want the Driver core to test driver remove functions
by calling probe, remove, probe. This tests the remove path without by calling probe, remove, probe. This tests the remove path without
having to unbind the driver or unload the driver module. having to unbind the driver or unload the driver module.
If you are unsure about this, say N here. This option is expected to find errors and may render your system
unusable. You should say N here unless you are explicitly looking to
test this functionality.
config SYS_HYPERVISOR config SYS_HYPERVISOR
bool bool

View File

@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_PD_Controller: case DAC960_PD_Controller:
if (!request_region(Controller->IO_Address, 0x80, if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)) { Controller->FullModelName)) {
DAC960_Error("IO port 0x%d busy for Controller at\n", DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address); Controller, Controller->IO_Address);
goto Failure; goto Failure;
} }
@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_P_Controller: case DAC960_P_Controller:
if (!request_region(Controller->IO_Address, 0x80, if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)){ Controller->FullModelName)){
DAC960_Error("IO port 0x%d busy for Controller at\n", DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address); Controller, Controller->IO_Address);
goto Failure; goto Failure;
} }

View File

@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd)
spin_lock(&nbd->sock_lock); spin_lock(&nbd->sock_lock);
if (!nbd->sock) { if (!nbd->sock) {
spin_unlock_irq(&nbd->sock_lock); spin_unlock(&nbd->sock_lock);
return; return;
} }

View File

@ -111,6 +111,7 @@ config OMAP_OCP2SCP
config QCOM_EBI2 config QCOM_EBI2
bool "Qualcomm External Bus Interface 2 (EBI2)" bool "Qualcomm External Bus Interface 2 (EBI2)"
depends on HAS_IOMEM depends on HAS_IOMEM
depends on ARCH_QCOM || COMPILE_TEST
help help
Say y here to enable support for the Qualcomm External Bus Say y here to enable support for the Qualcomm External Bus
Interface 2, which can be used to connect things like NAND Flash, Interface 2, which can be used to connect things like NAND Flash,

Some files were not shown because too many files have changed in this diff Show More