Merge 4.9-rc3 into char-misc-next
We need the binder patches in here to build on for other submitted patches to apply properly. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
d4608a83f7
5
CREDITS
5
CREDITS
|
@ -1864,10 +1864,11 @@ S: The Netherlands
|
|||
|
||||
N: Martin Kepplinger
|
||||
E: martink@posteo.de
|
||||
E: martin.kepplinger@theobroma-systems.com
|
||||
E: martin.kepplinger@ginzinger.com
|
||||
W: http://www.martinkepplinger.com
|
||||
D: mma8452 accelerators iio driver
|
||||
D: Kernel cleanups
|
||||
D: pegasus_notetaker input driver
|
||||
D: Kernel fixes and cleanups
|
||||
S: Garnisonstraße 26
|
||||
S: 4020 Linz
|
||||
S: Austria
|
||||
|
|
|
@ -309,3 +309,4 @@ Version History
|
|||
with a reshape in progress.
|
||||
1.9.0 Add support for RAID level takeover/reshape/region size
|
||||
and set size reduction.
|
||||
1.9.1 Fix activation of existing RAID 4/10 mapped devices
|
||||
|
|
|
@ -24,7 +24,7 @@ Example:
|
|||
reg = <0x61840000 0x4000>;
|
||||
|
||||
clock {
|
||||
compatible = "socionext,uniphier-ld20-clock";
|
||||
compatible = "socionext,uniphier-ld11-clock";
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
|
@ -43,8 +43,8 @@ Provided clocks:
|
|||
21: USB3 ch1 PHY1
|
||||
|
||||
|
||||
Media I/O (MIO) clock
|
||||
---------------------
|
||||
Media I/O (MIO) clock, SD clock
|
||||
-------------------------------
|
||||
|
||||
Required properties:
|
||||
- compatible: should be one of the following:
|
||||
|
@ -52,10 +52,10 @@ Required properties:
|
|||
"socionext,uniphier-ld4-mio-clock" - for LD4 SoC.
|
||||
"socionext,uniphier-pro4-mio-clock" - for Pro4 SoC.
|
||||
"socionext,uniphier-sld8-mio-clock" - for sLD8 SoC.
|
||||
"socionext,uniphier-pro5-mio-clock" - for Pro5 SoC.
|
||||
"socionext,uniphier-pxs2-mio-clock" - for PXs2/LD6b SoC.
|
||||
"socionext,uniphier-pro5-sd-clock" - for Pro5 SoC.
|
||||
"socionext,uniphier-pxs2-sd-clock" - for PXs2/LD6b SoC.
|
||||
"socionext,uniphier-ld11-mio-clock" - for LD11 SoC.
|
||||
"socionext,uniphier-ld20-mio-clock" - for LD20 SoC.
|
||||
"socionext,uniphier-ld20-sd-clock" - for LD20 SoC.
|
||||
- #clock-cells: should be 1.
|
||||
|
||||
Example:
|
||||
|
@ -66,7 +66,7 @@ Example:
|
|||
reg = <0x59810000 0x800>;
|
||||
|
||||
clock {
|
||||
compatible = "socionext,uniphier-ld20-mio-clock";
|
||||
compatible = "socionext,uniphier-ld11-mio-clock";
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
|
@ -112,7 +112,7 @@ Example:
|
|||
reg = <0x59820000 0x200>;
|
||||
|
||||
clock {
|
||||
compatible = "socionext,uniphier-ld20-peri-clock";
|
||||
compatible = "socionext,uniphier-ld11-peri-clock";
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
|
|
|
@ -6,25 +6,25 @@ System reset
|
|||
|
||||
Required properties:
|
||||
- compatible: should be one of the following:
|
||||
"socionext,uniphier-sld3-reset" - for PH1-sLD3 SoC.
|
||||
"socionext,uniphier-ld4-reset" - for PH1-LD4 SoC.
|
||||
"socionext,uniphier-pro4-reset" - for PH1-Pro4 SoC.
|
||||
"socionext,uniphier-sld8-reset" - for PH1-sLD8 SoC.
|
||||
"socionext,uniphier-pro5-reset" - for PH1-Pro5 SoC.
|
||||
"socionext,uniphier-pxs2-reset" - for ProXstream2/PH1-LD6b SoC.
|
||||
"socionext,uniphier-ld11-reset" - for PH1-LD11 SoC.
|
||||
"socionext,uniphier-ld20-reset" - for PH1-LD20 SoC.
|
||||
"socionext,uniphier-sld3-reset" - for sLD3 SoC.
|
||||
"socionext,uniphier-ld4-reset" - for LD4 SoC.
|
||||
"socionext,uniphier-pro4-reset" - for Pro4 SoC.
|
||||
"socionext,uniphier-sld8-reset" - for sLD8 SoC.
|
||||
"socionext,uniphier-pro5-reset" - for Pro5 SoC.
|
||||
"socionext,uniphier-pxs2-reset" - for PXs2/LD6b SoC.
|
||||
"socionext,uniphier-ld11-reset" - for LD11 SoC.
|
||||
"socionext,uniphier-ld20-reset" - for LD20 SoC.
|
||||
- #reset-cells: should be 1.
|
||||
|
||||
Example:
|
||||
|
||||
sysctrl@61840000 {
|
||||
compatible = "socionext,uniphier-ld20-sysctrl",
|
||||
compatible = "socionext,uniphier-ld11-sysctrl",
|
||||
"simple-mfd", "syscon";
|
||||
reg = <0x61840000 0x4000>;
|
||||
|
||||
reset {
|
||||
compatible = "socionext,uniphier-ld20-reset";
|
||||
compatible = "socionext,uniphier-ld11-reset";
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
|
@ -32,30 +32,30 @@ Example:
|
|||
};
|
||||
|
||||
|
||||
Media I/O (MIO) reset
|
||||
---------------------
|
||||
Media I/O (MIO) reset, SD reset
|
||||
-------------------------------
|
||||
|
||||
Required properties:
|
||||
- compatible: should be one of the following:
|
||||
"socionext,uniphier-sld3-mio-reset" - for PH1-sLD3 SoC.
|
||||
"socionext,uniphier-ld4-mio-reset" - for PH1-LD4 SoC.
|
||||
"socionext,uniphier-pro4-mio-reset" - for PH1-Pro4 SoC.
|
||||
"socionext,uniphier-sld8-mio-reset" - for PH1-sLD8 SoC.
|
||||
"socionext,uniphier-pro5-mio-reset" - for PH1-Pro5 SoC.
|
||||
"socionext,uniphier-pxs2-mio-reset" - for ProXstream2/PH1-LD6b SoC.
|
||||
"socionext,uniphier-ld11-mio-reset" - for PH1-LD11 SoC.
|
||||
"socionext,uniphier-ld20-mio-reset" - for PH1-LD20 SoC.
|
||||
"socionext,uniphier-sld3-mio-reset" - for sLD3 SoC.
|
||||
"socionext,uniphier-ld4-mio-reset" - for LD4 SoC.
|
||||
"socionext,uniphier-pro4-mio-reset" - for Pro4 SoC.
|
||||
"socionext,uniphier-sld8-mio-reset" - for sLD8 SoC.
|
||||
"socionext,uniphier-pro5-sd-reset" - for Pro5 SoC.
|
||||
"socionext,uniphier-pxs2-sd-reset" - for PXs2/LD6b SoC.
|
||||
"socionext,uniphier-ld11-mio-reset" - for LD11 SoC.
|
||||
"socionext,uniphier-ld20-sd-reset" - for LD20 SoC.
|
||||
- #reset-cells: should be 1.
|
||||
|
||||
Example:
|
||||
|
||||
mioctrl@59810000 {
|
||||
compatible = "socionext,uniphier-ld20-mioctrl",
|
||||
compatible = "socionext,uniphier-ld11-mioctrl",
|
||||
"simple-mfd", "syscon";
|
||||
reg = <0x59810000 0x800>;
|
||||
|
||||
reset {
|
||||
compatible = "socionext,uniphier-ld20-mio-reset";
|
||||
compatible = "socionext,uniphier-ld11-mio-reset";
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
|
@ -68,24 +68,24 @@ Peripheral reset
|
|||
|
||||
Required properties:
|
||||
- compatible: should be one of the following:
|
||||
"socionext,uniphier-ld4-peri-reset" - for PH1-LD4 SoC.
|
||||
"socionext,uniphier-pro4-peri-reset" - for PH1-Pro4 SoC.
|
||||
"socionext,uniphier-sld8-peri-reset" - for PH1-sLD8 SoC.
|
||||
"socionext,uniphier-pro5-peri-reset" - for PH1-Pro5 SoC.
|
||||
"socionext,uniphier-pxs2-peri-reset" - for ProXstream2/PH1-LD6b SoC.
|
||||
"socionext,uniphier-ld11-peri-reset" - for PH1-LD11 SoC.
|
||||
"socionext,uniphier-ld20-peri-reset" - for PH1-LD20 SoC.
|
||||
"socionext,uniphier-ld4-peri-reset" - for LD4 SoC.
|
||||
"socionext,uniphier-pro4-peri-reset" - for Pro4 SoC.
|
||||
"socionext,uniphier-sld8-peri-reset" - for sLD8 SoC.
|
||||
"socionext,uniphier-pro5-peri-reset" - for Pro5 SoC.
|
||||
"socionext,uniphier-pxs2-peri-reset" - for PXs2/LD6b SoC.
|
||||
"socionext,uniphier-ld11-peri-reset" - for LD11 SoC.
|
||||
"socionext,uniphier-ld20-peri-reset" - for LD20 SoC.
|
||||
- #reset-cells: should be 1.
|
||||
|
||||
Example:
|
||||
|
||||
perictrl@59820000 {
|
||||
compatible = "socionext,uniphier-ld20-perictrl",
|
||||
compatible = "socionext,uniphier-ld11-perictrl",
|
||||
"simple-mfd", "syscon";
|
||||
reg = <0x59820000 0x200>;
|
||||
|
||||
reset {
|
||||
compatible = "socionext,uniphier-ld20-peri-reset";
|
||||
compatible = "socionext,uniphier-ld11-peri-reset";
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
Binding for Cadence UART Controller
|
||||
|
||||
Required properties:
|
||||
- compatible : should be "cdns,uart-r1p8", or "xlnx,xuartps"
|
||||
- compatible :
|
||||
Use "xlnx,xuartps","cdns,uart-r1p8" for Zynq-7xxx SoC.
|
||||
Use "xlnx,zynqmp-uart","cdns,uart-r1p12" for Zynq Ultrascale+ MPSoC.
|
||||
- reg: Should contain UART controller registers location and length.
|
||||
- interrupts: Should contain UART controller interrupts.
|
||||
- clocks: Must contain phandles to the UART clocks
|
||||
|
|
|
@ -9,6 +9,14 @@ Required properties:
|
|||
- "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
|
||||
- "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
|
||||
- "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
|
||||
- "renesas,scif-r8a7743" for R8A7743 (RZ/G1M) SCIF compatible UART.
|
||||
- "renesas,scifa-r8a7743" for R8A7743 (RZ/G1M) SCIFA compatible UART.
|
||||
- "renesas,scifb-r8a7743" for R8A7743 (RZ/G1M) SCIFB compatible UART.
|
||||
- "renesas,hscif-r8a7743" for R8A7743 (RZ/G1M) HSCIF compatible UART.
|
||||
- "renesas,scif-r8a7745" for R8A7745 (RZ/G1E) SCIF compatible UART.
|
||||
- "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
|
||||
- "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
|
||||
- "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
|
||||
- "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
|
||||
- "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
|
||||
- "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
|
||||
|
|
|
@ -28,10 +28,7 @@ Refer to phy/phy-bindings.txt for generic phy consumer properties
|
|||
- g-use-dma: enable dma usage in gadget driver.
|
||||
- g-rx-fifo-size: size of rx fifo size in gadget mode.
|
||||
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
|
||||
|
||||
Deprecated properties:
|
||||
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0)
|
||||
in gadget mode.
|
||||
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ Note that it only applies to the new descriptor-based interface. For a
|
|||
description of the deprecated integer-based GPIO interface please refer to
|
||||
gpio-legacy.txt (actually, there is no real mapping possible with the old
|
||||
interface; you just fetch an integer from somewhere and request the
|
||||
corresponding GPIO.
|
||||
corresponding GPIO).
|
||||
|
||||
All platforms can enable the GPIO library, but if the platform strictly
|
||||
requires GPIO functionality to be present, it needs to select GPIOLIB from its
|
||||
|
@ -162,6 +162,9 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
|
|||
|
||||
Since the "led" GPIOs are mapped as active-high, this example will switch their
|
||||
signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
|
||||
as active-low, its actual signal will be 0 after this code. Contrary to the legacy
|
||||
integer GPIO interface, the active-low property is handled during mapping and is
|
||||
thus transparent to GPIO consumers.
|
||||
as active-low, its actual signal will be 0 after this code. Contrary to the
|
||||
legacy integer GPIO interface, the active-low property is handled during
|
||||
mapping and is thus transparent to GPIO consumers.
|
||||
|
||||
A set of functions such as gpiod_set_value() is available to work with
|
||||
the new descriptor-oriented interface.
|
||||
|
|
|
@ -1442,6 +1442,7 @@ F: drivers/cpufreq/mvebu-cpufreq.c
|
|||
F: arch/arm/configs/mvebu_*_defconfig
|
||||
|
||||
ARM/Marvell Berlin SoC support
|
||||
M: Jisheng Zhang <jszhang@marvell.com>
|
||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
@ -5287,6 +5288,12 @@ M: Joe Perches <joe@perches.com>
|
|||
S: Maintained
|
||||
F: scripts/get_maintainer.pl
|
||||
|
||||
GENWQE (IBM Generic Workqueue Card)
|
||||
M: Frank Haverkamp <haver@linux.vnet.ibm.com>
|
||||
M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
|
||||
S: Supported
|
||||
F: drivers/misc/genwqe/
|
||||
|
||||
GFS2 FILE SYSTEM
|
||||
M: Steven Whitehouse <swhiteho@redhat.com>
|
||||
M: Bob Peterson <rpeterso@redhat.com>
|
||||
|
@ -8100,6 +8107,7 @@ S: Maintained
|
|||
F: drivers/media/dvb-frontends/mn88473*
|
||||
|
||||
MODULE SUPPORT
|
||||
M: Jessica Yu <jeyu@redhat.com>
|
||||
M: Rusty Russell <rusty@rustcorp.com.au>
|
||||
S: Maintained
|
||||
F: include/linux/module.h
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Psychotic Stoned Sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -41,6 +41,8 @@ config ARC
|
|||
select PERF_USE_VMALLOC
|
||||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select HAVE_GENERIC_DMA_COHERENT
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZMA
|
||||
|
||||
config MIGHT_HAVE_PCI
|
||||
bool
|
||||
|
@ -186,14 +188,6 @@ if SMP
|
|||
config ARC_HAS_COH_CACHES
|
||||
def_bool n
|
||||
|
||||
config ARC_MCIP
|
||||
bool "ARConnect Multicore IP (MCIP) Support "
|
||||
depends on ISA_ARCV2
|
||||
help
|
||||
This IP block enables SMP in ARC-HS38 cores.
|
||||
It provides for cross-core interrupts, multi-core debug
|
||||
hardware semaphores, shared memory,....
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-4096)"
|
||||
range 2 4096
|
||||
|
@ -211,6 +205,15 @@ config ARC_SMP_HALT_ON_RESET
|
|||
|
||||
endif #SMP
|
||||
|
||||
config ARC_MCIP
|
||||
bool "ARConnect Multicore IP (MCIP) Support "
|
||||
depends on ISA_ARCV2
|
||||
default y if SMP
|
||||
help
|
||||
This IP block enables SMP in ARC-HS38 cores.
|
||||
It provides for cross-core interrupts, multi-core debug
|
||||
hardware semaphores, shared memory,....
|
||||
|
||||
menuconfig ARC_CACHE
|
||||
bool "Enable Cache Support"
|
||||
default y
|
||||
|
@ -537,14 +540,6 @@ config ARC_DBG_TLB_PARANOIA
|
|||
bool "Paranoia Checks in Low Level TLB Handlers"
|
||||
default n
|
||||
|
||||
config ARC_DBG_TLB_MISS_COUNT
|
||||
bool "Profile TLB Misses"
|
||||
default n
|
||||
select DEBUG_FS
|
||||
help
|
||||
Counts number of I and D TLB Misses and exports them via Debugfs
|
||||
The counters can be cleared via Debugfs as well
|
||||
|
||||
endif
|
||||
|
||||
config ARC_UBOOT_SUPPORT
|
||||
|
|
|
@ -50,9 +50,6 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
|
|||
|
||||
cflags-$(atleast_gcc44) += -fsection-anchors
|
||||
|
||||
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
|
||||
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
|
||||
|
||||
ifdef CONFIG_ISA_ARCV2
|
||||
|
||||
ifndef CONFIG_ARC_HAS_LL64
|
||||
|
|
|
@ -14,9 +14,15 @@ UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
|
|||
|
||||
suffix-y := bin
|
||||
suffix-$(CONFIG_KERNEL_GZIP) := gz
|
||||
suffix-$(CONFIG_KERNEL_LZMA) := lzma
|
||||
|
||||
targets += uImage uImage.bin uImage.gz
|
||||
extra-y += vmlinux.bin vmlinux.bin.gz
|
||||
targets += uImage
|
||||
targets += uImage.bin
|
||||
targets += uImage.gz
|
||||
targets += uImage.lzma
|
||||
extra-y += vmlinux.bin
|
||||
extra-y += vmlinux.bin.gz
|
||||
extra-y += vmlinux.bin.lzma
|
||||
|
||||
$(obj)/vmlinux.bin: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
@ -24,12 +30,18 @@ $(obj)/vmlinux.bin: vmlinux FORCE
|
|||
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,gzip)
|
||||
|
||||
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,lzma)
|
||||
|
||||
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,uimage,none)
|
||||
|
||||
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
|
||||
$(call if_changed,uimage,gzip)
|
||||
|
||||
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
|
||||
$(call if_changed,uimage,lzma)
|
||||
|
||||
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
|
||||
@ln -sf $(notdir $<) $@
|
||||
@echo ' Image $@ is ready'
|
||||
|
|
|
@ -349,10 +349,11 @@ struct cpuinfo_arc {
|
|||
struct cpuinfo_arc_bpu bpu;
|
||||
struct bcr_identity core;
|
||||
struct bcr_isa isa;
|
||||
const char *details, *name;
|
||||
unsigned int vec_base;
|
||||
struct cpuinfo_arc_ccm iccm, dccm;
|
||||
struct {
|
||||
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
|
||||
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
|
||||
fpu_sp:1, fpu_dp:1, pad2:6,
|
||||
debug:1, ap:1, smart:1, rtt:1, pad3:4,
|
||||
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
|
||||
|
|
|
@ -53,7 +53,7 @@ extern void arc_cache_init(void);
|
|||
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
|
||||
extern void read_decode_cache_bcr(void);
|
||||
|
||||
extern int ioc_exists;
|
||||
extern int ioc_enable;
|
||||
extern unsigned long perip_base, perip_end;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
|
|
@ -54,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
|
|||
* the loader. We need to make sure that it is out of the way of the program
|
||||
* that it will "exec", and that there is sufficient room for the brk.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
|
||||
#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
|
||||
|
||||
/*
|
||||
* When the program starts, a1 contains a pointer to a function to be
|
||||
|
|
|
@ -55,6 +55,22 @@ struct mcip_cmd {
|
|||
#define IDU_M_DISTRI_DEST 0x2
|
||||
};
|
||||
|
||||
struct mcip_bcr {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad3:8,
|
||||
idu:1, llm:1, num_cores:6,
|
||||
iocoh:1, gfrc:1, dbg:1, pad2:1,
|
||||
msg:1, sem:1, ipi:1, pad:1,
|
||||
ver:8;
|
||||
#else
|
||||
unsigned int ver:8,
|
||||
pad:1, ipi:1, sem:1, msg:1,
|
||||
pad2:1, dbg:1, gfrc:1, iocoh:1,
|
||||
num_cores:6, llm:1, idu:1,
|
||||
pad3:8;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* MCIP programming model
|
||||
*
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
struct mod_arch_specific {
|
||||
void *unw_info;
|
||||
int unw_sec_idx;
|
||||
const char *secstr;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
|
@ -27,11 +27,6 @@ struct id_to_str {
|
|||
const char *str;
|
||||
};
|
||||
|
||||
struct cpuinfo_data {
|
||||
struct id_to_str info;
|
||||
int up_range;
|
||||
};
|
||||
|
||||
extern int root_mountflags, end_mem;
|
||||
|
||||
void setup_processor(void);
|
||||
|
@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
|
|||
#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
|
||||
#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
|
||||
#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
|
||||
#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
|
||||
|
||||
#endif /* __ASMARC_SETUP_H */
|
||||
|
|
|
@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
|
|||
int sys_cacheflush(uint32_t, uint32_t uint32_t);
|
||||
int sys_arc_settls(void *);
|
||||
int sys_arc_gettls(void);
|
||||
int sys_arc_usr_cmpxchg(int *, int, int);
|
||||
|
||||
#include <asm-generic/syscalls.h>
|
||||
|
||||
|
|
|
@ -27,18 +27,19 @@
|
|||
|
||||
#define NR_syscalls __NR_syscalls
|
||||
|
||||
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
|
||||
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
|
||||
|
||||
/* ARC specific syscall */
|
||||
#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
|
||||
#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
|
||||
#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
|
||||
#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
|
||||
|
||||
__SYSCALL(__NR_cacheflush, sys_cacheflush)
|
||||
__SYSCALL(__NR_arc_settls, sys_arc_settls)
|
||||
__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
|
||||
|
||||
|
||||
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
|
||||
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
|
||||
__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
|
||||
__SYSCALL(__NR_sysfs, sys_sysfs)
|
||||
|
||||
#undef __SYSCALL
|
||||
|
|
|
@ -15,11 +15,12 @@
|
|||
#include <asm/mcip.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
static char smp_cpuinfo_buf[128];
|
||||
static int idu_detected;
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(mcip_lock);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static char smp_cpuinfo_buf[128];
|
||||
|
||||
static void mcip_setup_per_cpu(int cpu)
|
||||
{
|
||||
smp_ipi_irq_setup(cpu, IPI_IRQ);
|
||||
|
@ -86,21 +87,7 @@ static void mcip_ipi_clear(int irq)
|
|||
|
||||
static void mcip_probe_n_setup(void)
|
||||
{
|
||||
struct mcip_bcr {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad3:8,
|
||||
idu:1, llm:1, num_cores:6,
|
||||
iocoh:1, gfrc:1, dbg:1, pad2:1,
|
||||
msg:1, sem:1, ipi:1, pad:1,
|
||||
ver:8;
|
||||
#else
|
||||
unsigned int ver:8,
|
||||
pad:1, ipi:1, sem:1, msg:1,
|
||||
pad2:1, dbg:1, gfrc:1, iocoh:1,
|
||||
num_cores:6, llm:1, idu:1,
|
||||
pad3:8;
|
||||
#endif
|
||||
} mp;
|
||||
struct mcip_bcr mp;
|
||||
|
||||
READ_BCR(ARC_REG_MCIP_BCR, mp);
|
||||
|
||||
|
@ -114,7 +101,6 @@ static void mcip_probe_n_setup(void)
|
|||
IS_AVAIL1(mp.gfrc, "GFRC"));
|
||||
|
||||
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
|
||||
idu_detected = mp.idu;
|
||||
|
||||
if (mp.dbg) {
|
||||
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
|
||||
|
@ -130,6 +116,8 @@ struct plat_smp_ops plat_smp_ops = {
|
|||
.ipi_clear = mcip_ipi_clear,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/***************************************************************************
|
||||
* ARCv2 Interrupt Distribution Unit (IDU)
|
||||
*
|
||||
|
@ -295,8 +283,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
|
|||
/* Read IDU BCR to confirm nr_irqs */
|
||||
int nr_irqs = of_irq_count(intc);
|
||||
int i, irq;
|
||||
struct mcip_bcr mp;
|
||||
|
||||
if (!idu_detected)
|
||||
READ_BCR(ARC_REG_MCIP_BCR, mp);
|
||||
|
||||
if (!mp.idu)
|
||||
panic("IDU not detected, but DeviceTree using it");
|
||||
|
||||
pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
|
||||
|
|
|
@ -30,17 +30,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
|
|||
char *secstr, struct module *mod)
|
||||
{
|
||||
#ifdef CONFIG_ARC_DW2_UNWIND
|
||||
int i;
|
||||
|
||||
mod->arch.unw_sec_idx = 0;
|
||||
mod->arch.unw_info = NULL;
|
||||
|
||||
for (i = 1; i < hdr->e_shnum; i++) {
|
||||
if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {
|
||||
mod->arch.unw_sec_idx = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mod->arch.secstr = secstr;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -59,29 +51,33 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
|
|||
unsigned int relsec, /* sec index for relo sec */
|
||||
struct module *module)
|
||||
{
|
||||
int i, n;
|
||||
int i, n, relo_type;
|
||||
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
|
||||
Elf32_Sym *sym_entry, *sym_sec;
|
||||
Elf32_Addr relocation;
|
||||
Elf32_Addr location;
|
||||
Elf32_Addr sec_to_patch;
|
||||
int relo_type;
|
||||
Elf32_Addr relocation, location, tgt_addr;
|
||||
unsigned int tgtsec;
|
||||
|
||||
sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
|
||||
/*
|
||||
* @relsec has relocations e.g. .rela.init.text
|
||||
* @tgtsec is section to patch e.g. .init.text
|
||||
*/
|
||||
tgtsec = sechdrs[relsec].sh_info;
|
||||
tgt_addr = sechdrs[tgtsec].sh_addr;
|
||||
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
|
||||
n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
|
||||
|
||||
pr_debug("\n========== Module Sym reloc ===========================\n");
|
||||
pr_debug("Section to fixup %x\n", sec_to_patch);
|
||||
pr_debug("\nSection to fixup %s @%x\n",
|
||||
module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
|
||||
pr_debug("=========================================================\n");
|
||||
pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
|
||||
pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
|
||||
pr_debug("=========================================================\n");
|
||||
|
||||
/* Loop thru entries in relocation section */
|
||||
for (i = 0; i < n; i++) {
|
||||
const char *s;
|
||||
|
||||
/* This is where to make the change */
|
||||
location = sec_to_patch + rel_entry[i].r_offset;
|
||||
location = tgt_addr + rel_entry[i].r_offset;
|
||||
|
||||
/* This is the symbol it is referring to. Note that all
|
||||
undefined symbols have been resolved. */
|
||||
|
@ -89,10 +85,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
|
|||
|
||||
relocation = sym_entry->st_value + rel_entry[i].r_addend;
|
||||
|
||||
pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
|
||||
rel_entry[i].r_offset, rel_entry[i].r_addend,
|
||||
sym_entry->st_value, location, relocation,
|
||||
strtab + sym_entry->st_name);
|
||||
if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
|
||||
s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
|
||||
} else {
|
||||
s = strtab + sym_entry->st_name;
|
||||
}
|
||||
|
||||
pr_debug(" %x\t%x\t%x %x %x [%s]\n",
|
||||
rel_entry[i].r_offset, rel_entry[i].r_addend,
|
||||
sym_entry->st_value, location, relocation, s);
|
||||
|
||||
/* This assumes modules are built with -mlong-calls
|
||||
* so any branches/jumps are absolute 32 bit jmps
|
||||
|
@ -111,6 +112,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
|
|||
goto relo_err;
|
||||
|
||||
}
|
||||
|
||||
if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
|
||||
module->arch.unw_sec_idx = tgtsec;
|
||||
|
||||
return 0;
|
||||
|
||||
relo_err:
|
||||
|
|
|
@ -41,6 +41,39 @@ SYSCALL_DEFINE0(arc_gettls)
|
|||
return task_thread_info(current)->thr_ptr;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
||||
{
|
||||
int uval;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* This is only for old cores lacking LLOCK/SCOND, which by defintion
|
||||
* can't possibly be SMP. Thus doesn't need to be SMP safe.
|
||||
* And this also helps reduce the overhead for serializing in
|
||||
* the UP case
|
||||
*/
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
ret = __get_user(uval, uaddr);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
if (uval != expected)
|
||||
ret = -EAGAIN;
|
||||
else
|
||||
ret = __put_user(new, uaddr);
|
||||
|
||||
done:
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
/* sleep, but enable all interrupts before committing */
|
||||
|
|
|
@ -40,6 +40,29 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
|
|||
|
||||
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
|
||||
|
||||
static const struct id_to_str arc_cpu_rel[] = {
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
{ 0x34, "R4.10"},
|
||||
{ 0x35, "R4.11"},
|
||||
#else
|
||||
{ 0x51, "R2.0" },
|
||||
{ 0x52, "R2.1" },
|
||||
{ 0x53, "R3.0" },
|
||||
#endif
|
||||
{ 0x00, NULL }
|
||||
};
|
||||
|
||||
static const struct id_to_str arc_cpu_nm[] = {
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
{ 0x20, "ARC 600" },
|
||||
{ 0x30, "ARC 770" }, /* 750 identified seperately */
|
||||
#else
|
||||
{ 0x40, "ARC EM" },
|
||||
{ 0x50, "ARC HS38" },
|
||||
#endif
|
||||
{ 0x00, "Unknown" }
|
||||
};
|
||||
|
||||
static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
|
||||
{
|
||||
if (is_isa_arcompact()) {
|
||||
|
@ -92,11 +115,26 @@ static void read_arc_build_cfg_regs(void)
|
|||
struct bcr_timer timer;
|
||||
struct bcr_generic bcr;
|
||||
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
||||
const struct id_to_str *tbl;
|
||||
|
||||
FIX_PTR(cpu);
|
||||
|
||||
READ_BCR(AUX_IDENTITY, cpu->core);
|
||||
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
|
||||
|
||||
for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
|
||||
if (cpu->core.family == tbl->id) {
|
||||
cpu->details = tbl->str;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
|
||||
if ((cpu->core.family & 0xF0) == tbl->id)
|
||||
break;
|
||||
}
|
||||
cpu->name = tbl->str;
|
||||
|
||||
READ_BCR(ARC_REG_TIMERS_BCR, timer);
|
||||
cpu->extn.timer0 = timer.t0;
|
||||
cpu->extn.timer1 = timer.t1;
|
||||
|
@ -111,6 +149,9 @@ static void read_arc_build_cfg_regs(void)
|
|||
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
|
||||
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
|
||||
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
|
||||
cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
|
||||
IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
|
||||
|
||||
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
|
||||
|
||||
/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
|
||||
|
@ -160,64 +201,38 @@ static void read_arc_build_cfg_regs(void)
|
|||
cpu->extn.rtt = bcr.ver ? 1 : 0;
|
||||
|
||||
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
|
||||
|
||||
/* some hacks for lack of feature BCR info in old ARC700 cores */
|
||||
if (is_isa_arcompact()) {
|
||||
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
|
||||
cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
|
||||
else
|
||||
cpu->isa.atomic = cpu->isa.atomic1;
|
||||
|
||||
cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
|
||||
|
||||
/* there's no direct way to distinguish 750 vs. 770 */
|
||||
if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
|
||||
cpu->name = "ARC750";
|
||||
}
|
||||
}
|
||||
|
||||
static const struct cpuinfo_data arc_cpu_tbl[] = {
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
{ {0x20, "ARC 600" }, 0x2F},
|
||||
{ {0x30, "ARC 700" }, 0x33},
|
||||
{ {0x34, "ARC 700 R4.10"}, 0x34},
|
||||
{ {0x35, "ARC 700 R4.11"}, 0x35},
|
||||
#else
|
||||
{ {0x50, "ARC HS38 R2.0"}, 0x51},
|
||||
{ {0x52, "ARC HS38 R2.1"}, 0x52},
|
||||
{ {0x53, "ARC HS38 R3.0"}, 0x53},
|
||||
#endif
|
||||
{ {0x00, NULL } }
|
||||
};
|
||||
|
||||
|
||||
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
|
||||
{
|
||||
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
|
||||
struct bcr_identity *core = &cpu->core;
|
||||
const struct cpuinfo_data *tbl;
|
||||
char *isa_nm;
|
||||
int i, be, atomic;
|
||||
int n = 0;
|
||||
int i, n = 0;
|
||||
|
||||
FIX_PTR(cpu);
|
||||
|
||||
if (is_isa_arcompact()) {
|
||||
isa_nm = "ARCompact";
|
||||
be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
|
||||
|
||||
atomic = cpu->isa.atomic1;
|
||||
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
|
||||
atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
|
||||
} else {
|
||||
isa_nm = "ARCv2";
|
||||
be = cpu->isa.be;
|
||||
atomic = cpu->isa.atomic;
|
||||
}
|
||||
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
|
||||
core->family, core->cpu_id, core->chip_id);
|
||||
|
||||
for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
|
||||
if ((core->family >= tbl->info.id) &&
|
||||
(core->family <= tbl->up_range)) {
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"processor [%d]\t: %s (%s ISA) %s\n",
|
||||
cpu_id, tbl->info.str, isa_nm,
|
||||
IS_AVAIL1(be, "[Big-Endian]"));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (tbl->info.id == 0)
|
||||
n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
|
||||
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
|
||||
cpu_id, cpu->name, cpu->details,
|
||||
is_isa_arcompact() ? "ARCompact" : "ARCv2",
|
||||
IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
|
||||
|
||||
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
|
||||
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
|
||||
|
@ -226,7 +241,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
|
|||
CONFIG_ARC_HAS_RTC));
|
||||
|
||||
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
|
||||
IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
|
||||
IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
|
||||
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
|
||||
IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
|
||||
|
||||
|
@ -253,7 +268,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
|
|||
IS_AVAIL1(cpu->extn.swap, "swap "),
|
||||
IS_AVAIL1(cpu->extn.minmax, "minmax "),
|
||||
IS_AVAIL1(cpu->extn.crc, "crc "),
|
||||
IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));
|
||||
IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
|
||||
|
||||
if (cpu->bpu.ver)
|
||||
n += scnprintf(buf + n, len - n,
|
||||
|
@ -272,9 +287,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
|
|||
|
||||
FIX_PTR(cpu);
|
||||
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",
|
||||
cpu->vec_base, perip_base, perip_end);
|
||||
n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
|
||||
|
||||
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
|
||||
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
|
||||
|
@ -507,7 +520,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
|
|||
* way to pass it w/o having to kmalloc/free a 2 byte string.
|
||||
* Encode cpu-id as 0xFFcccc, which is decoded by show routine.
|
||||
*/
|
||||
return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
|
||||
return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
|
||||
}
|
||||
|
||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
|
|
|
@ -237,113 +237,3 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
|
|||
if (!user_mode(regs))
|
||||
show_stacktrace(current, regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
static struct dentry *test_dentry;
|
||||
static struct dentry *test_dir;
|
||||
static struct dentry *test_u32_dentry;
|
||||
|
||||
static u32 clr_on_read = 1;
|
||||
|
||||
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
|
||||
u32 numitlb, numdtlb, num_pte_not_present;
|
||||
|
||||
static int fill_display_data(char *kbuf)
|
||||
{
|
||||
size_t num = 0;
|
||||
num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
|
||||
num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
|
||||
num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
|
||||
|
||||
if (clr_on_read)
|
||||
numitlb = numdtlb = num_pte_not_present = 0;
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
static int tlb_stats_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
file->private_data = (void *)__get_free_page(GFP_KERNEL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* called on user read(): display the counters */
|
||||
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
|
||||
char __user *user_buf, /* user buffer */
|
||||
size_t len, /* length of buffer */
|
||||
loff_t *offset) /* offset in the file */
|
||||
{
|
||||
size_t num;
|
||||
char *kbuf = (char *)file->private_data;
|
||||
|
||||
/* All of the data can he shoved in one iteration */
|
||||
if (*offset != 0)
|
||||
return 0;
|
||||
|
||||
num = fill_display_data(kbuf);
|
||||
|
||||
/* simple_read_from_buffer() is helper for copy to user space
|
||||
It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
|
||||
@3 (offset) into the user space address starting at @1 (user_buf).
|
||||
@5 (len) is max size of user buffer
|
||||
*/
|
||||
return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
|
||||
}
|
||||
|
||||
/* called on user write : clears the counters */
|
||||
static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
|
||||
size_t length, loff_t *offset)
|
||||
{
|
||||
numitlb = numdtlb = num_pte_not_present = 0;
|
||||
return length;
|
||||
}
|
||||
|
||||
static int tlb_stats_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
free_page((unsigned long)(file->private_data));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations tlb_stats_file_ops = {
|
||||
.read = tlb_stats_output,
|
||||
.write = tlb_stats_clear,
|
||||
.open = tlb_stats_open,
|
||||
.release = tlb_stats_close
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init arc_debugfs_init(void)
|
||||
{
|
||||
test_dir = debugfs_create_dir("arc", NULL);
|
||||
|
||||
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
|
||||
test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
|
||||
&tlb_stats_file_ops);
|
||||
#endif
|
||||
|
||||
test_u32_dentry =
|
||||
debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(arc_debugfs_init);
|
||||
|
||||
static void __exit arc_debugfs_exit(void)
|
||||
{
|
||||
debugfs_remove(test_u32_dentry);
|
||||
debugfs_remove(test_dentry);
|
||||
debugfs_remove(test_dir);
|
||||
}
|
||||
module_exit(arc_debugfs_exit);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
#include <asm/setup.h>
|
||||
|
||||
static int l2_line_sz;
|
||||
int ioc_exists;
|
||||
volatile int slc_enable = 1, ioc_enable = 1;
|
||||
static int ioc_exists;
|
||||
int slc_enable = 1, ioc_enable = 1;
|
||||
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
|
||||
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
|
||||
|
||||
|
@ -53,18 +53,15 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
|
|||
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
|
||||
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
|
||||
|
||||
if (!is_isa_arcv2())
|
||||
return buf;
|
||||
|
||||
p = &cpuinfo_arc700[c].slc;
|
||||
if (p->ver)
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"SLC\t\t: %uK, %uB Line%s\n",
|
||||
p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
|
||||
|
||||
if (ioc_exists)
|
||||
n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
|
||||
IS_DISABLED_RUN(ioc_enable));
|
||||
n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
|
||||
perip_base,
|
||||
IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
@ -113,8 +110,10 @@ static void read_decode_cache_bcr_arcv2(int cpu)
|
|||
}
|
||||
|
||||
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
|
||||
if (cbcr.c && ioc_enable)
|
||||
if (cbcr.c)
|
||||
ioc_exists = 1;
|
||||
else
|
||||
ioc_enable = 0;
|
||||
|
||||
/* HS 2.0 didn't have AUX_VOL */
|
||||
if (cpuinfo_arc700[cpu].core.family > 0x51) {
|
||||
|
@ -1002,7 +1001,7 @@ void arc_cache_init(void)
|
|||
read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
|
||||
}
|
||||
|
||||
if (is_isa_arcv2() && ioc_exists) {
|
||||
if (is_isa_arcv2() && ioc_enable) {
|
||||
/* IO coherency base - 0x8z */
|
||||
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
|
||||
/* IO coherency aperture size - 512Mb: 0x8z-0xAz */
|
||||
|
|
|
@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
|
|||
* -For coherent data, Read/Write to buffers terminate early in cache
|
||||
* (vs. always going to memory - thus are faster)
|
||||
*/
|
||||
if ((is_isa_arcv2() && ioc_exists) ||
|
||||
if ((is_isa_arcv2() && ioc_enable) ||
|
||||
(attrs & DMA_ATTR_NON_CONSISTENT))
|
||||
need_coh = 0;
|
||||
|
||||
|
@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||
int is_non_coh = 1;
|
||||
|
||||
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
|
||||
(is_isa_arcv2() && ioc_exists);
|
||||
(is_isa_arcv2() && ioc_enable);
|
||||
|
||||
if (PageHighMem(page) || !is_non_coh)
|
||||
iounmap((void __force __iomem *)vaddr);
|
||||
|
|
|
@ -793,16 +793,16 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
|
|||
char super_pg[64] = "";
|
||||
|
||||
if (p_mmu->s_pg_sz_m)
|
||||
scnprintf(super_pg, 64, "%dM Super Page%s, ",
|
||||
scnprintf(super_pg, 64, "%dM Super Page %s",
|
||||
p_mmu->s_pg_sz_m,
|
||||
IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
|
||||
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
|
||||
"MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
|
||||
p_mmu->ver, p_mmu->pg_sz_k, super_pg,
|
||||
p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
|
||||
p_mmu->u_dtlb, p_mmu->u_itlb,
|
||||
IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
|
||||
IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -237,15 +237,6 @@ ex_saved_reg1:
|
|||
|
||||
2:
|
||||
|
||||
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
|
||||
and.f 0, r0, _PAGE_PRESENT
|
||||
bz 1f
|
||||
ld r3, [num_pte_not_present]
|
||||
add r3, r3, 1
|
||||
st r3, [num_pte_not_present]
|
||||
1:
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
||||
;-----------------------------------------------------------------
|
||||
|
@ -309,12 +300,6 @@ ENTRY(EV_TLBMissI)
|
|||
|
||||
TLBMISS_FREEUP_REGS
|
||||
|
||||
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
|
||||
ld r0, [@numitlb]
|
||||
add r0, r0, 1
|
||||
st r0, [@numitlb]
|
||||
#endif
|
||||
|
||||
;----------------------------------------------------------------
|
||||
; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
|
||||
LOAD_FAULT_PTE
|
||||
|
@ -349,12 +334,6 @@ ENTRY(EV_TLBMissD)
|
|||
|
||||
TLBMISS_FREEUP_REGS
|
||||
|
||||
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
|
||||
ld r0, [@numdtlb]
|
||||
add r0, r0, 1
|
||||
st r0, [@numdtlb]
|
||||
#endif
|
||||
|
||||
;----------------------------------------------------------------
|
||||
; Get the PTE corresponding to V-addr accessed
|
||||
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
|
||||
|
|
|
@ -239,14 +239,25 @@
|
|||
arm,primecell-periphid = <0x10480180>;
|
||||
max-frequency = <100000000>;
|
||||
bus-width = <4>;
|
||||
cap-sd-highspeed;
|
||||
cap-mmc-highspeed;
|
||||
sd-uhs-sdr12;
|
||||
sd-uhs-sdr25;
|
||||
/* All direction control is used */
|
||||
st,sig-dir-cmd;
|
||||
st,sig-dir-dat0;
|
||||
st,sig-dir-dat2;
|
||||
st,sig-dir-dat31;
|
||||
st,sig-pin-fbclk;
|
||||
full-pwr-cycle;
|
||||
vmmc-supply = <&ab8500_ldo_aux3_reg>;
|
||||
vqmmc-supply = <&vmmci>;
|
||||
pinctrl-names = "default", "sleep";
|
||||
pinctrl-0 = <&sdi0_default_mode>;
|
||||
pinctrl-1 = <&sdi0_sleep_mode>;
|
||||
|
||||
cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218
|
||||
/* GPIO218 MMC_CD */
|
||||
cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>;
|
||||
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -549,7 +560,7 @@
|
|||
/* VMMCI level-shifter enable */
|
||||
snowball_cfg3 {
|
||||
pins = "GPIO217_AH12";
|
||||
ste,config = <&gpio_out_lo>;
|
||||
ste,config = <&gpio_out_hi>;
|
||||
};
|
||||
/* VMMCI level-shifter voltage select */
|
||||
snowball_cfg4 {
|
||||
|
|
|
@ -184,11 +184,11 @@
|
|||
};
|
||||
|
||||
&mio_clk {
|
||||
compatible = "socionext,uniphier-pro5-mio-clock";
|
||||
compatible = "socionext,uniphier-pro5-sd-clock";
|
||||
};
|
||||
|
||||
&mio_rst {
|
||||
compatible = "socionext,uniphier-pro5-mio-reset";
|
||||
compatible = "socionext,uniphier-pro5-sd-reset";
|
||||
};
|
||||
|
||||
&peri_clk {
|
||||
|
|
|
@ -197,11 +197,11 @@
|
|||
};
|
||||
|
||||
&mio_clk {
|
||||
compatible = "socionext,uniphier-pxs2-mio-clock";
|
||||
compatible = "socionext,uniphier-pxs2-sd-clock";
|
||||
};
|
||||
|
||||
&mio_rst {
|
||||
compatible = "socionext,uniphier-pxs2-mio-reset";
|
||||
compatible = "socionext,uniphier-pxs2-sd-reset";
|
||||
};
|
||||
|
||||
&peri_clk {
|
||||
|
|
|
@ -70,7 +70,7 @@
|
|||
global_timer: timer@40002200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0x40002200 0x20>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupt-parent = <&intc>;
|
||||
clocks = <&clks VF610_CLK_PLATFORM_BUS>;
|
||||
};
|
||||
|
|
|
@ -850,6 +850,7 @@ CONFIG_PWM_SUN4I=y
|
|||
CONFIG_PWM_TEGRA=y
|
||||
CONFIG_PWM_VT8500=y
|
||||
CONFIG_PHY_HIX5HD2_SATA=y
|
||||
CONFIG_E1000E=y
|
||||
CONFIG_PWM_STI=y
|
||||
CONFIG_PWM_BCM2835=y
|
||||
CONFIG_PWM_BRCMSTB=m
|
||||
|
|
|
@ -408,7 +408,7 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
|
|||
static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
|
||||
{
|
||||
struct clk *clk;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
imx6q_pu_domain.reg = pu_reg;
|
||||
|
||||
|
@ -430,13 +430,22 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
|
|||
if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
|
||||
return 0;
|
||||
|
||||
pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
|
||||
return of_genpd_add_provider_onecell(dev->of_node,
|
||||
&imx_gpc_onecell_data);
|
||||
for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
|
||||
pm_genpd_init(imx_gpc_domains[i], NULL, false);
|
||||
|
||||
ret = of_genpd_add_provider_onecell(dev->of_node,
|
||||
&imx_gpc_onecell_data);
|
||||
if (ret)
|
||||
goto power_off;
|
||||
|
||||
return 0;
|
||||
|
||||
power_off:
|
||||
imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
|
||||
clk_err:
|
||||
while (i--)
|
||||
clk_put(imx6q_pu_domain.clk[i]);
|
||||
imx6q_pu_domain.reg = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ static void __init imx6q_enet_phy_init(void)
|
|||
ksz9021rn_phy_fixup);
|
||||
phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK,
|
||||
ksz9031rn_phy_fixup);
|
||||
phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
|
||||
phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
|
||||
ar8031_phy_fixup);
|
||||
phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
|
||||
ar8035_phy_fixup);
|
||||
|
|
|
@ -23,6 +23,7 @@ config MACH_MVEBU_V7
|
|||
select CACHE_L2X0
|
||||
select ARM_CPU_SUSPEND
|
||||
select MACH_MVEBU_ANY
|
||||
select MVEBU_CLK_COREDIV
|
||||
|
||||
config MACH_ARMADA_370
|
||||
bool "Marvell Armada 370 boards"
|
||||
|
@ -32,7 +33,6 @@ config MACH_ARMADA_370
|
|||
select CPU_PJ4B
|
||||
select MACH_MVEBU_V7
|
||||
select PINCTRL_ARMADA_370
|
||||
select MVEBU_CLK_COREDIV
|
||||
help
|
||||
Say 'Y' here if you want your kernel to support boards based
|
||||
on the Marvell Armada 370 SoC with device tree.
|
||||
|
@ -50,7 +50,6 @@ config MACH_ARMADA_375
|
|||
select HAVE_SMP
|
||||
select MACH_MVEBU_V7
|
||||
select PINCTRL_ARMADA_375
|
||||
select MVEBU_CLK_COREDIV
|
||||
help
|
||||
Say 'Y' here if you want your kernel to support boards based
|
||||
on the Marvell Armada 375 SoC with device tree.
|
||||
|
@ -68,7 +67,6 @@ config MACH_ARMADA_38X
|
|||
select HAVE_SMP
|
||||
select MACH_MVEBU_V7
|
||||
select PINCTRL_ARMADA_38X
|
||||
select MVEBU_CLK_COREDIV
|
||||
help
|
||||
Say 'Y' here if you want your kernel to support boards based
|
||||
on the Marvell Armada 380/385 SoC with device tree.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
config ARCH_UNIPHIER
|
||||
bool "Socionext UniPhier SoCs"
|
||||
depends on ARCH_MULTI_V7
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select ARM_AMBA
|
||||
select ARM_GLOBAL_TIMER
|
||||
select ARM_GIC
|
||||
|
|
|
@ -190,6 +190,7 @@ config ARCH_THUNDER
|
|||
|
||||
config ARCH_UNIPHIER
|
||||
bool "Socionext UniPhier SoC Family"
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select PINCTRL
|
||||
help
|
||||
This enables support for Socionext UniPhier SoC family.
|
||||
|
|
|
@ -164,6 +164,8 @@
|
|||
nand-ecc-mode = "hw";
|
||||
nand-ecc-strength = <8>;
|
||||
nand-ecc-step-size = <512>;
|
||||
nand-bus-width = <16>;
|
||||
brcm,nand-oob-sector-size = <16>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
};
|
||||
|
|
|
@ -123,6 +123,7 @@
|
|||
<1 14 0xf08>, /* Physical Non-Secure PPI */
|
||||
<1 11 0xf08>, /* Virtual PPI */
|
||||
<1 10 0xf08>; /* Hypervisor PPI */
|
||||
fsl,erratum-a008585;
|
||||
};
|
||||
|
||||
pmu {
|
||||
|
|
|
@ -195,6 +195,7 @@
|
|||
<1 14 4>, /* Physical Non-Secure PPI, active-low */
|
||||
<1 11 4>, /* Virtual PPI, active-low */
|
||||
<1 10 4>; /* Hypervisor PPI, active-low */
|
||||
fsl,erratum-a008585;
|
||||
};
|
||||
|
||||
pmu {
|
||||
|
|
|
@ -131,7 +131,7 @@
|
|||
#address-cells = <0x1>;
|
||||
#size-cells = <0x0>;
|
||||
cell-index = <1>;
|
||||
clocks = <&cpm_syscon0 0 3>;
|
||||
clocks = <&cpm_syscon0 1 21>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
|
|
@ -116,7 +116,6 @@
|
|||
cap-mmc-highspeed;
|
||||
clock-frequency = <150000000>;
|
||||
disable-wp;
|
||||
keep-power-in-suspend;
|
||||
non-removable;
|
||||
num-slots = <1>;
|
||||
vmmc-supply = <&vcc_io>;
|
||||
|
@ -258,8 +257,6 @@
|
|||
};
|
||||
|
||||
vcc_sd: SWITCH_REG1 {
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
regulator-name = "vcc_sd";
|
||||
};
|
||||
|
||||
|
|
|
@ -152,8 +152,6 @@
|
|||
gpio = <&gpio3 11 GPIO_ACTIVE_LOW>;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
vin-supply = <&vcc_io>;
|
||||
};
|
||||
|
||||
|
@ -201,7 +199,6 @@
|
|||
bus-width = <8>;
|
||||
cap-mmc-highspeed;
|
||||
disable-wp;
|
||||
keep-power-in-suspend;
|
||||
mmc-pwrseq = <&emmc_pwrseq>;
|
||||
mmc-hs200-1_2v;
|
||||
mmc-hs200-1_8v;
|
||||
|
@ -350,7 +347,6 @@
|
|||
clock-freq-min-max = <400000 50000000>;
|
||||
cap-sd-highspeed;
|
||||
card-detect-delay = <200>;
|
||||
keep-power-in-suspend;
|
||||
num-slots = <1>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
|
||||
|
|
|
@ -257,18 +257,18 @@
|
|||
reg = <0x59801000 0x400>;
|
||||
};
|
||||
|
||||
mioctrl@59810000 {
|
||||
compatible = "socionext,uniphier-mioctrl",
|
||||
sdctrl@59810000 {
|
||||
compatible = "socionext,uniphier-ld20-sdctrl",
|
||||
"simple-mfd", "syscon";
|
||||
reg = <0x59810000 0x800>;
|
||||
|
||||
mio_clk: clock {
|
||||
compatible = "socionext,uniphier-ld20-mio-clock";
|
||||
sd_clk: clock {
|
||||
compatible = "socionext,uniphier-ld20-sd-clock";
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
mio_rst: reset {
|
||||
compatible = "socionext,uniphier-ld20-mio-reset";
|
||||
sd_rst: reset {
|
||||
compatible = "socionext,uniphier-ld20-sd-reset";
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|||
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
#else
|
||||
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
|
||||
#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
|
||||
#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
|
||||
|
||||
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
|
||||
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
|
||||
|
|
|
@ -147,7 +147,7 @@ static int __init early_cpu_to_node(int cpu)
|
|||
|
||||
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
|
||||
{
|
||||
return node_distance(from, to);
|
||||
return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
|
||||
}
|
||||
|
||||
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
|
||||
|
@ -223,8 +223,11 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
|
|||
void *nd;
|
||||
int tnid;
|
||||
|
||||
pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
|
||||
nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
|
||||
if (start_pfn < end_pfn)
|
||||
pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
|
||||
start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
|
||||
else
|
||||
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
|
||||
|
||||
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
|
||||
nd = __va(nd_pa);
|
||||
|
|
|
@ -3149,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop)
|
|||
printk("print_dma_descriptors start\n");
|
||||
|
||||
printk("iop:\n");
|
||||
printk("\tsid: 0x%lld\n", iop->sid);
|
||||
printk("\tsid: 0x%llx\n", iop->sid);
|
||||
|
||||
printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
|
||||
printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
|
||||
|
|
|
@ -31,7 +31,6 @@ struct thread_info {
|
|||
int cpu; /* cpu we're on */
|
||||
int preempt_count; /* 0 => preemptable, <0 => BUG */
|
||||
mm_segment_t addr_limit;
|
||||
struct restart_block restart_block;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -44,9 +43,6 @@ struct thread_info {
|
|||
.cpu = 0, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
.restart_block = { \
|
||||
.fn = do_no_restart_syscall, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define init_thread_info (init_thread_union.thread_info)
|
||||
|
|
|
@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
|
|||
unsigned int er0;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
current->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
/* restore passed registers */
|
||||
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
|
||||
|
|
|
@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
|
|||
std r0,0(r1); \
|
||||
ptesync; \
|
||||
ld r0,0(r1); \
|
||||
1: cmp cr0,r0,r0; \
|
||||
1: cmpd cr0,r0,r0; \
|
||||
bne 1b; \
|
||||
IDLE_INST; \
|
||||
b .
|
||||
|
|
|
@ -93,6 +93,10 @@
|
|||
ld reg,PACAKBASE(r13); /* get high part of &label */ \
|
||||
ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
|
||||
|
||||
#define __LOAD_HANDLER(reg, label) \
|
||||
ld reg,PACAKBASE(r13); \
|
||||
ori reg,reg,(ABS_ADDR(label))@l;
|
||||
|
||||
/* Exception register prefixes */
|
||||
#define EXC_HV H
|
||||
#define EXC_STD
|
||||
|
@ -208,6 +212,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
|||
#define kvmppc_interrupt kvmppc_interrupt_pr
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
#define BRANCH_TO_COMMON(reg, label) \
|
||||
__LOAD_HANDLER(reg, label); \
|
||||
mtctr reg; \
|
||||
bctr
|
||||
|
||||
#else
|
||||
#define BRANCH_TO_COMMON(reg, label) \
|
||||
b label
|
||||
|
||||
#endif
|
||||
|
||||
#define __KVM_HANDLER_PROLOG(area, n) \
|
||||
BEGIN_FTR_SECTION_NESTED(947) \
|
||||
ld r10,area+EX_CFAR(r13); \
|
||||
|
|
|
@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
|
|||
return cpumask_subset(mm_cpumask(mm),
|
||||
topology_sibling_cpumask(smp_processor_id()));
|
||||
}
|
||||
|
||||
static inline int mm_is_thread_local(struct mm_struct *mm)
|
||||
{
|
||||
return cpumask_equal(mm_cpumask(mm),
|
||||
cpumask_of(smp_processor_id()));
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int mm_is_core_local(struct mm_struct *mm)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int mm_is_thread_local(struct mm_struct *mm)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -95,19 +95,35 @@ __start_interrupts:
|
|||
/* No virt vectors corresponding with 0x0..0x100 */
|
||||
EXC_VIRT_NONE(0x4000, 0x4100)
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC_P7_NAP
|
||||
/*
|
||||
* If running native on arch 2.06 or later, check if we are waking up
|
||||
* from nap/sleep/winkle, and branch to idle handler.
|
||||
*/
|
||||
#define IDLETEST(n) \
|
||||
BEGIN_FTR_SECTION ; \
|
||||
mfspr r10,SPRN_SRR1 ; \
|
||||
rlwinm. r10,r10,47-31,30,31 ; \
|
||||
beq- 1f ; \
|
||||
cmpwi cr3,r10,2 ; \
|
||||
BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
|
||||
1: \
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
|
||||
#else
|
||||
#define IDLETEST NOTEST
|
||||
#endif
|
||||
|
||||
EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
|
||||
SET_SCRATCH0(r13)
|
||||
#ifdef CONFIG_PPC_P7_NAP
|
||||
BEGIN_FTR_SECTION
|
||||
/* Running native on arch 2.06 or later, check if we are
|
||||
* waking up from nap/sleep/winkle.
|
||||
*/
|
||||
mfspr r13,SPRN_SRR1
|
||||
rlwinm. r13,r13,47-31,30,31
|
||||
beq 9f
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
|
||||
IDLETEST, 0x100)
|
||||
|
||||
cmpwi cr3,r13,2
|
||||
GET_PACA(r13)
|
||||
EXC_REAL_END(system_reset, 0x100, 0x200)
|
||||
EXC_VIRT_NONE(0x4100, 0x4200)
|
||||
|
||||
#ifdef CONFIG_PPC_P7_NAP
|
||||
EXC_COMMON_BEGIN(system_reset_idle_common)
|
||||
bl pnv_restore_hyp_resource
|
||||
|
||||
li r0,PNV_THREAD_RUNNING
|
||||
|
@ -130,14 +146,8 @@ BEGIN_FTR_SECTION
|
|||
blt cr3,2f
|
||||
b pnv_wakeup_loss
|
||||
2: b pnv_wakeup_noloss
|
||||
#endif
|
||||
|
||||
9:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
|
||||
#endif /* CONFIG_PPC_P7_NAP */
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
|
||||
NOTEST, 0x100)
|
||||
EXC_REAL_END(system_reset, 0x100, 0x200)
|
||||
EXC_VIRT_NONE(0x4100, 0x4200)
|
||||
EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
|
@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
|
|||
TRAMP_KVM(PACA_EXGEN, 0xb00)
|
||||
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
|
||||
|
||||
|
||||
#define LOAD_SYSCALL_HANDLER(reg) \
|
||||
ld reg,PACAKBASE(r13); \
|
||||
ori reg,reg,(ABS_ADDR(system_call_common))@l;
|
||||
#define LOAD_SYSCALL_HANDLER(reg) \
|
||||
__LOAD_HANDLER(reg, system_call_common)
|
||||
|
||||
/* Syscall routine is used twice, in reloc-off and reloc-on paths */
|
||||
#define SYSCALL_PSERIES_1 \
|
||||
|
|
|
@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args)
|
|||
if (!stepped) {
|
||||
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
|
||||
"0x%lx will be disabled.", info->address);
|
||||
perf_event_disable(bp);
|
||||
perf_event_disable_inatomic(bp);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
|
|
|
@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
|
|||
* Threads will spin in HMT_LOW until the lock bit is cleared.
|
||||
* r14 - pointer to core_idle_state
|
||||
* r15 - used to load contents of core_idle_state
|
||||
* r9 - used as a temporary variable
|
||||
*/
|
||||
|
||||
core_idle_lock_held:
|
||||
|
@ -99,6 +100,8 @@ core_idle_lock_held:
|
|||
bne 3b
|
||||
HMT_MEDIUM
|
||||
lwarx r15,0,r14
|
||||
andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
|
||||
bne core_idle_lock_held
|
||||
blr
|
||||
|
||||
/*
|
||||
|
@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
|
|||
std r9,_MSR(r1)
|
||||
std r1,PACAR1(r13)
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/* Tell KVM we're entering idle */
|
||||
li r4,KVM_HWTHREAD_IN_IDLE
|
||||
stb r4,HSTATE_HWTHREAD_STATE(r13)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Go to real mode to do the nap, as required by the architecture.
|
||||
* Also, we need to be in real mode before setting hwthread_state,
|
||||
|
@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
|
|||
|
||||
.globl pnv_enter_arch207_idle_mode
|
||||
pnv_enter_arch207_idle_mode:
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/* Tell KVM we're entering idle */
|
||||
li r4,KVM_HWTHREAD_IN_IDLE
|
||||
/******************************************************/
|
||||
/* N O T E W E L L ! ! ! N O T E W E L L */
|
||||
/* The following store to HSTATE_HWTHREAD_STATE(r13) */
|
||||
/* MUST occur in real mode, i.e. with the MMU off, */
|
||||
/* and the MMU must stay off until we clear this flag */
|
||||
/* and test HSTATE_HWTHREAD_REQ(r13) in the system */
|
||||
/* reset interrupt vector in exceptions-64s.S. */
|
||||
/* The reason is that another thread can switch the */
|
||||
/* MMU to a guest context whenever this flag is set */
|
||||
/* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
|
||||
/* that would potentially cause this thread to start */
|
||||
/* executing instructions from guest memory in */
|
||||
/* hypervisor mode, leading to a host crash or data */
|
||||
/* corruption, or worse. */
|
||||
/******************************************************/
|
||||
stb r4,HSTATE_HWTHREAD_STATE(r13)
|
||||
#endif
|
||||
stb r3,PACA_THREAD_IDLE_STATE(r13)
|
||||
cmpwi cr3,r3,PNV_THREAD_SLEEP
|
||||
bge cr3,2f
|
||||
|
@ -250,6 +267,12 @@ enter_winkle:
|
|||
* r3 - requested stop state
|
||||
*/
|
||||
power_enter_stop:
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/* Tell KVM we're entering idle */
|
||||
li r4,KVM_HWTHREAD_IN_IDLE
|
||||
/* DO THIS IN REAL MODE! See comment above. */
|
||||
stb r4,HSTATE_HWTHREAD_STATE(r13)
|
||||
#endif
|
||||
/*
|
||||
* Check if the requested state is a deep idle state.
|
||||
*/
|
||||
|
|
|
@ -1012,7 +1012,7 @@ void restore_tm_state(struct pt_regs *regs)
|
|||
/* Ensure that restore_math() will restore */
|
||||
if (msr_diff & MSR_FP)
|
||||
current->thread.load_fp = 1;
|
||||
#ifdef CONFIG_ALIVEC
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
|
||||
current->thread.load_vec = 1;
|
||||
#endif
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/pnv-pci.h>
|
||||
#include <asm/opal.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#include "book3s_xics.h"
|
||||
|
||||
|
|
|
@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
|
|||
if (unlikely(pid == MMU_NO_CONTEXT))
|
||||
goto no_context;
|
||||
|
||||
if (!mm_is_core_local(mm)) {
|
||||
if (!mm_is_thread_local(mm)) {
|
||||
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
||||
|
||||
if (lock_tlbie)
|
||||
|
@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
|
|||
if (unlikely(pid == MMU_NO_CONTEXT))
|
||||
goto no_context;
|
||||
|
||||
if (!mm_is_core_local(mm)) {
|
||||
if (!mm_is_thread_local(mm)) {
|
||||
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
||||
|
||||
if (lock_tlbie)
|
||||
|
@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
|
|||
pid = mm ? mm->context.id : 0;
|
||||
if (unlikely(pid == MMU_NO_CONTEXT))
|
||||
goto bail;
|
||||
if (!mm_is_core_local(mm)) {
|
||||
if (!mm_is_thread_local(mm)) {
|
||||
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
||||
|
||||
if (lock_tlbie)
|
||||
|
@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
|
|||
{
|
||||
unsigned long pid;
|
||||
unsigned long addr;
|
||||
int local = mm_is_core_local(mm);
|
||||
int local = mm_is_thread_local(mm);
|
||||
unsigned long ap = mmu_get_ap(psize);
|
||||
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
||||
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
|
||||
|
|
|
@ -12,9 +12,7 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
unsigned long return_address(int depth);
|
||||
|
||||
#define ftrace_return_address(n) return_address(n)
|
||||
#define ftrace_return_address(n) __builtin_return_address(n)
|
||||
|
||||
void _mcount(void);
|
||||
void ftrace_caller(void);
|
||||
|
|
|
@ -192,7 +192,7 @@ struct task_struct;
|
|||
struct mm_struct;
|
||||
struct seq_file;
|
||||
|
||||
typedef int (*dump_trace_func_t)(void *data, unsigned long address);
|
||||
typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
|
||||
void dump_trace(dump_trace_func_t func, void *data,
|
||||
struct task_struct *task, unsigned long sp);
|
||||
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
#define __IGNORE_time
|
||||
#define __IGNORE_pkey_mprotect
|
||||
#define __IGNORE_pkey_alloc
|
||||
#define __IGNORE_pkey_free
|
||||
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
#define __ARCH_WANT_SYS_ALARM
|
||||
|
|
|
@ -2014,12 +2014,12 @@ void show_code(struct pt_regs *regs)
|
|||
*ptr++ = '\t';
|
||||
ptr += print_insn(ptr, code + start, addr);
|
||||
start += opsize;
|
||||
printk("%s", buffer);
|
||||
pr_cont("%s", buffer);
|
||||
ptr = buffer;
|
||||
ptr += sprintf(ptr, "\n ");
|
||||
hops++;
|
||||
}
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
void print_fn_code(unsigned char *code, unsigned long len)
|
||||
|
|
|
@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
|
|||
if (sp < low || sp > high - sizeof(*sf))
|
||||
return sp;
|
||||
sf = (struct stack_frame *) sp;
|
||||
if (func(data, sf->gprs[8], 0))
|
||||
return sp;
|
||||
/* Follow the backchain. */
|
||||
while (1) {
|
||||
if (func(data, sf->gprs[8]))
|
||||
return sp;
|
||||
low = sp;
|
||||
sp = sf->back_chain;
|
||||
if (!sp)
|
||||
|
@ -49,6 +49,8 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
|
|||
if (sp <= low || sp > high - sizeof(*sf))
|
||||
return sp;
|
||||
sf = (struct stack_frame *) sp;
|
||||
if (func(data, sf->gprs[8], 1))
|
||||
return sp;
|
||||
}
|
||||
/* Zero backchain detected, check for interrupt frame. */
|
||||
sp = (unsigned long) (sf + 1);
|
||||
|
@ -56,7 +58,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
|
|||
return sp;
|
||||
regs = (struct pt_regs *) sp;
|
||||
if (!user_mode(regs)) {
|
||||
if (func(data, regs->psw.addr))
|
||||
if (func(data, regs->psw.addr, 1))
|
||||
return sp;
|
||||
}
|
||||
low = sp;
|
||||
|
@ -85,33 +87,12 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dump_trace);
|
||||
|
||||
struct return_address_data {
|
||||
unsigned long address;
|
||||
int depth;
|
||||
};
|
||||
|
||||
static int __return_address(void *data, unsigned long address)
|
||||
static int show_address(void *data, unsigned long address, int reliable)
|
||||
{
|
||||
struct return_address_data *rd = data;
|
||||
|
||||
if (rd->depth--)
|
||||
return 0;
|
||||
rd->address = address;
|
||||
return 1;
|
||||
}
|
||||
|
||||
unsigned long return_address(int depth)
|
||||
{
|
||||
struct return_address_data rd = { .depth = depth + 2 };
|
||||
|
||||
dump_trace(__return_address, &rd, NULL, current_stack_pointer());
|
||||
return rd.address;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(return_address);
|
||||
|
||||
static int show_address(void *data, unsigned long address)
|
||||
{
|
||||
printk("([<%016lx>] %pSR)\n", address, (void *)address);
|
||||
if (reliable)
|
||||
printk(" [<%016lx>] %pSR \n", address, (void *)address);
|
||||
else
|
||||
printk("([<%016lx>] %pSR)\n", address, (void *)address);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -138,14 +119,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
|
|||
else
|
||||
stack = (unsigned long *)task->thread.ksp;
|
||||
}
|
||||
printk(KERN_DEFAULT "Stack:\n");
|
||||
for (i = 0; i < 20; i++) {
|
||||
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
|
||||
break;
|
||||
if ((i * sizeof(long) % 32) == 0)
|
||||
printk("%s ", i == 0 ? "" : "\n");
|
||||
printk("%016lx ", *stack++);
|
||||
if (i % 4 == 0)
|
||||
printk(KERN_DEFAULT " ");
|
||||
pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
|
||||
}
|
||||
printk("\n");
|
||||
show_trace(task, (unsigned long)sp);
|
||||
}
|
||||
|
||||
|
@ -163,13 +144,13 @@ void show_registers(struct pt_regs *regs)
|
|||
mode = user_mode(regs) ? "User" : "Krnl";
|
||||
printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
|
||||
if (!user_mode(regs))
|
||||
printk(" (%pSR)", (void *)regs->psw.addr);
|
||||
printk("\n");
|
||||
pr_cont(" (%pSR)", (void *)regs->psw.addr);
|
||||
pr_cont("\n");
|
||||
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
|
||||
"P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
|
||||
psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
|
||||
printk(" RI:%x EA:%x", psw->ri, psw->eaba);
|
||||
printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
|
||||
pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
|
||||
printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
|
||||
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
|
||||
printk(" %016lx %016lx %016lx %016lx\n",
|
||||
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
|
||||
|
@ -205,14 +186,14 @@ void die(struct pt_regs *regs, const char *str)
|
|||
printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
|
||||
regs->int_code >> 17, ++die_counter);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
printk("PREEMPT ");
|
||||
pr_cont("PREEMPT ");
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
printk("SMP ");
|
||||
pr_cont("SMP ");
|
||||
#endif
|
||||
if (debug_pagealloc_enabled())
|
||||
printk("DEBUG_PAGEALLOC");
|
||||
printk("\n");
|
||||
pr_cont("DEBUG_PAGEALLOC");
|
||||
pr_cont("\n");
|
||||
notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
|
||||
print_modules();
|
||||
show_regs(regs);
|
||||
|
|
|
@ -222,7 +222,7 @@ static int __init service_level_perf_register(void)
|
|||
}
|
||||
arch_initcall(service_level_perf_register);
|
||||
|
||||
static int __perf_callchain_kernel(void *data, unsigned long address)
|
||||
static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
|
||||
{
|
||||
struct perf_callchain_entry_ctx *entry = data;
|
||||
|
||||
|
|
|
@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int save_address(void *data, unsigned long address)
|
||||
static int save_address(void *data, unsigned long address, int reliable)
|
||||
{
|
||||
return __save_address(data, address, 0);
|
||||
}
|
||||
|
||||
static int save_address_nosched(void *data, unsigned long address)
|
||||
static int save_address_nosched(void *data, unsigned long address, int reliable)
|
||||
{
|
||||
return __save_address(data, address, 1);
|
||||
}
|
||||
|
|
|
@ -217,6 +217,7 @@ static __init int setup_hugepagesz(char *opt)
|
|||
} else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
} else {
|
||||
hugetlb_bad_size();
|
||||
pr_err("hugepagesz= specifies an unsupported page size %s\n",
|
||||
string);
|
||||
return 0;
|
||||
|
|
|
@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
|
|||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
|
||||
{
|
||||
unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
||||
unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
|
||||
unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
|
||||
unsigned long start_pfn = PFN_DOWN(start);
|
||||
unsigned long size_pages = PFN_DOWN(size);
|
||||
unsigned long nr_pages;
|
||||
int rc, zone_enum;
|
||||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
struct zone *zone;
|
||||
int rc, i;
|
||||
|
||||
rc = vmem_add_mapping(start, size);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
while (size_pages > 0) {
|
||||
if (start_pfn < dma_end_pfn) {
|
||||
nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
|
||||
dma_end_pfn - start_pfn : size_pages;
|
||||
zone_enum = ZONE_DMA;
|
||||
} else if (start_pfn < normal_end_pfn) {
|
||||
nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
|
||||
normal_end_pfn - start_pfn : size_pages;
|
||||
zone_enum = ZONE_NORMAL;
|
||||
for (i = 0; i < MAX_NR_ZONES; i++) {
|
||||
zone = pgdat->node_zones + i;
|
||||
if (zone_idx(zone) != ZONE_MOVABLE) {
|
||||
/* Add range within existing zone limits, if possible */
|
||||
zone_start_pfn = zone->zone_start_pfn;
|
||||
zone_end_pfn = zone->zone_start_pfn +
|
||||
zone->spanned_pages;
|
||||
} else {
|
||||
nr_pages = size_pages;
|
||||
zone_enum = ZONE_MOVABLE;
|
||||
/* Add remaining range to ZONE_MOVABLE */
|
||||
zone_start_pfn = start_pfn;
|
||||
zone_end_pfn = start_pfn + size_pages;
|
||||
}
|
||||
rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
|
||||
start_pfn, size_pages);
|
||||
if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
|
||||
continue;
|
||||
nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
|
||||
zone_end_pfn - start_pfn : size_pages;
|
||||
rc = __add_pages(nid, zone, start_pfn, nr_pages);
|
||||
if (rc)
|
||||
break;
|
||||
start_pfn += nr_pages;
|
||||
size_pages -= nr_pages;
|
||||
if (!size_pages)
|
||||
break;
|
||||
}
|
||||
if (rc)
|
||||
vmem_remove_mapping(start, size);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
static int __s390_backtrace(void *data, unsigned long address)
|
||||
static int __s390_backtrace(void *data, unsigned long address, int reliable)
|
||||
{
|
||||
unsigned int *depth = data;
|
||||
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
|
||||
OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
|
||||
|
||||
CFLAGS_syscall_64.o += -Wno-override-init
|
||||
CFLAGS_syscall_32.o += -Wno-override-init
|
||||
CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
|
||||
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
|
||||
obj-y += common.o
|
||||
|
||||
|
|
|
@ -3607,10 +3607,14 @@ __init int intel_pmu_init(void)
|
|||
|
||||
/*
|
||||
* Quirk: v2 perfmon does not report fixed-purpose events, so
|
||||
* assume at least 3 events:
|
||||
* assume at least 3 events, when not running in a hypervisor:
|
||||
*/
|
||||
if (version > 1)
|
||||
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
|
||||
if (version > 1) {
|
||||
int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
|
||||
|
||||
x86_pmu.num_counters_fixed =
|
||||
max((int)edx.split.num_counters_fixed, assume);
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PDCM)) {
|
||||
u64 capabilities;
|
||||
|
|
|
@ -48,7 +48,8 @@
|
|||
* Scope: Core
|
||||
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
|
||||
* perf code: 0x02
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
|
||||
* SKL,KNL
|
||||
* Scope: Core
|
||||
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
|
||||
* perf code: 0x03
|
||||
|
@ -56,15 +57,16 @@
|
|||
* Scope: Core
|
||||
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
|
||||
* perf code: 0x00
|
||||
* Available model: SNB,IVB,HSW,BDW,SKL
|
||||
* Available model: SNB,IVB,HSW,BDW,SKL,KNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
|
||||
* perf code: 0x01
|
||||
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
|
||||
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
|
||||
* perf code: 0x02
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
|
||||
* SKL,KNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
|
||||
* perf code: 0x03
|
||||
|
@ -118,6 +120,7 @@ struct cstate_model {
|
|||
|
||||
/* Quirk flags */
|
||||
#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
|
||||
#define KNL_CORE_C6_MSR (1UL << 1)
|
||||
|
||||
struct perf_cstate_msr {
|
||||
u64 msr;
|
||||
|
@ -488,6 +491,18 @@ static const struct cstate_model slm_cstates __initconst = {
|
|||
.quirks = SLM_PKG_C6_USE_C7_MSR,
|
||||
};
|
||||
|
||||
|
||||
static const struct cstate_model knl_cstates __initconst = {
|
||||
.core_events = BIT(PERF_CSTATE_CORE_C6_RES),
|
||||
|
||||
.pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
|
||||
BIT(PERF_CSTATE_PKG_C3_RES) |
|
||||
BIT(PERF_CSTATE_PKG_C6_RES),
|
||||
.quirks = KNL_CORE_C6_MSR,
|
||||
};
|
||||
|
||||
|
||||
|
||||
#define X86_CSTATES_MODEL(model, states) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
|
||||
|
||||
|
@ -523,6 +538,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
|||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
|
||||
|
@ -558,6 +575,11 @@ static int __init cstate_probe(const struct cstate_model *cm)
|
|||
if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
|
||||
pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
|
||||
|
||||
/* KNL has different MSR for CORE C6 */
|
||||
if (cm->quirks & KNL_CORE_C6_MSR)
|
||||
pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
|
||||
|
||||
|
||||
has_cstate_core = cstate_probe_msr(cm->core_events,
|
||||
PERF_CSTATE_CORE_EVENT_MAX,
|
||||
core_msr, core_events_attrs);
|
||||
|
|
|
@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
|
|||
#define arch_phys_wc_add arch_phys_wc_add
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_PAT
|
||||
extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
|
||||
extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
|
||||
#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IO_H */
|
||||
|
|
|
@ -454,6 +454,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
|
|||
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
|
||||
|
||||
mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
|
||||
acpi_penalize_sci_irq(bus_irq, trigger, polarity);
|
||||
|
||||
/*
|
||||
* stash over-ride to indicate we've been here
|
||||
|
|
|
@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
|
|||
* We need the physical address of the container for both bitness since
|
||||
* boot_params.hdr.ramdisk_image is a physical address.
|
||||
*/
|
||||
cont = __pa(container);
|
||||
cont = __pa_nodebug(container);
|
||||
cont_va = container;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -18,8 +18,10 @@
|
|||
|
||||
#ifdef CC_USING_FENTRY
|
||||
# define function_hook __fentry__
|
||||
EXPORT_SYMBOL(__fentry__)
|
||||
#else
|
||||
# define function_hook mcount
|
||||
EXPORT_SYMBOL(mcount)
|
||||
#endif
|
||||
|
||||
/* All cases save the original rbp (8 bytes) */
|
||||
|
@ -295,7 +297,6 @@ trace:
|
|||
jmp fgraph_trace
|
||||
END(function_hook)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
EXPORT_SYMBOL(function_hook)
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
|
|
@ -625,8 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
|
|||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
|
||||
amd_disable_seq_and_redirect_scrub);
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
|
||||
#include <linux/jump_label.h>
|
||||
#include <asm/string_64.h>
|
||||
|
@ -657,3 +655,4 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
|
|||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -1221,11 +1221,16 @@ void __init setup_arch(char **cmdline_p)
|
|||
*/
|
||||
get_smp_config();
|
||||
|
||||
/*
|
||||
* Systems w/o ACPI and mptables might not have it mapped the local
|
||||
* APIC yet, but prefill_possible_map() might need to access it.
|
||||
*/
|
||||
init_apic_mappings();
|
||||
|
||||
prefill_possible_map();
|
||||
|
||||
init_cpu_to_node();
|
||||
|
||||
init_apic_mappings();
|
||||
io_apic_init_mappings();
|
||||
|
||||
kvm_guest_init();
|
||||
|
|
|
@ -47,7 +47,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
|||
get_stack_info(first_frame, state->task, &state->stack_info,
|
||||
&state->stack_mask);
|
||||
|
||||
if (!__kernel_text_address(*first_frame))
|
||||
/*
|
||||
* The caller can provide the address of the first frame directly
|
||||
* (first_frame) or indirectly (regs->sp) to indicate which stack frame
|
||||
* to start unwinding at. Skip ahead until we reach it.
|
||||
*/
|
||||
if (!unwind_done(state) &&
|
||||
(!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
|
||||
!__kernel_text_address(*first_frame)))
|
||||
unwind_next_frame(state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__unwind_start);
|
||||
|
|
|
@ -104,10 +104,10 @@ void __init kernel_randomize_memory(void)
|
|||
* consistent with the vaddr_start/vaddr_end variables.
|
||||
*/
|
||||
BUILD_BUG_ON(vaddr_start >= vaddr_end);
|
||||
BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
|
||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
|
||||
vaddr_end >= EFI_VA_START);
|
||||
BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
|
||||
config_enabled(CONFIG_EFI)) &&
|
||||
BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
|
||||
IS_ENABLED(CONFIG_EFI)) &&
|
||||
vaddr_end >= __START_KERNEL_map);
|
||||
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
|
||||
|
||||
|
|
|
@ -730,6 +730,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
|
|||
free_memtype(start, end);
|
||||
}
|
||||
|
||||
int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
|
||||
{
|
||||
enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
|
||||
|
||||
return io_reserve_memtype(start, start + size, &type);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
|
||||
|
||||
void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
|
||||
{
|
||||
io_free_memtype(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_io_free_memtype_wc);
|
||||
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
|
|
|
@ -1837,6 +1837,7 @@ static void __init init_hvm_pv_info(void)
|
|||
|
||||
xen_domain_type = XEN_HVM_DOMAIN;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int xen_cpu_up_prepare(unsigned int cpu)
|
||||
{
|
||||
|
@ -1887,6 +1888,7 @@ static int xen_cpu_up_online(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_PVHVM
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
static void xen_hvm_shutdown(void)
|
||||
{
|
||||
|
|
|
@ -133,6 +133,26 @@ retry:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(badblocks_check);
|
||||
|
||||
static void badblocks_update_acked(struct badblocks *bb)
|
||||
{
|
||||
u64 *p = bb->page;
|
||||
int i;
|
||||
bool unacked = false;
|
||||
|
||||
if (!bb->unacked_exist)
|
||||
return;
|
||||
|
||||
for (i = 0; i < bb->count ; i++) {
|
||||
if (!BB_ACK(p[i])) {
|
||||
unacked = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!unacked)
|
||||
bb->unacked_exist = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* badblocks_set() - Add a range of bad blocks to the table.
|
||||
* @bb: the badblocks structure that holds all badblock information
|
||||
|
@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
|
|||
bb->changed = 1;
|
||||
if (!acknowledged)
|
||||
bb->unacked_exist = 1;
|
||||
else
|
||||
badblocks_update_acked(bb);
|
||||
write_sequnlock_irqrestore(&bb->lock, flags);
|
||||
|
||||
return rv;
|
||||
|
@ -401,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
|
|||
}
|
||||
}
|
||||
|
||||
badblocks_update_acked(bb);
|
||||
bb->changed = 1;
|
||||
out:
|
||||
write_sequnlock_irq(&bb->lock);
|
||||
|
|
|
@ -342,6 +342,34 @@ static void flush_data_end_io(struct request *rq, int error)
|
|||
struct request_queue *q = rq->q;
|
||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
|
||||
|
||||
/*
|
||||
* Updating q->in_flight[] here for making this tag usable
|
||||
* early. Because in blk_queue_start_tag(),
|
||||
* q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
|
||||
* reserve tags for sync I/O.
|
||||
*
|
||||
* More importantly this way can avoid the following I/O
|
||||
* deadlock:
|
||||
*
|
||||
* - suppose there are 40 fua requests comming to flush queue
|
||||
* and queue depth is 31
|
||||
* - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
|
||||
* tag for async I/O any more
|
||||
* - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
|
||||
* and flush_data_end_io() is called
|
||||
* - the other rqs still can't go ahead if not updating
|
||||
* q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
|
||||
* are held in flush data queue and make no progress of
|
||||
* handling post flush rq
|
||||
* - only after the post flush rq is handled, all these rqs
|
||||
* can be completed
|
||||
*/
|
||||
|
||||
elv_completed_request(q, rq);
|
||||
|
||||
/* for avoiding double accounting */
|
||||
rq->cmd_flags &= ~REQ_STARTED;
|
||||
|
||||
/*
|
||||
* After populating an empty queue, kick it to avoid stall. Read
|
||||
* the comment in flush_end_io().
|
||||
|
|
|
@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q,
|
|||
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
|
||||
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
|
||||
|
||||
hctx->queued++;
|
||||
data->hctx = hctx;
|
||||
data->ctx = ctx;
|
||||
data->hctx = alloc_data.hctx;
|
||||
data->ctx = alloc_data.ctx;
|
||||
data->hctx->queued++;
|
||||
return rq;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include "acdispat.h"
|
||||
#include "acnamesp.h"
|
||||
#include "actables.h"
|
||||
#include "acinterp.h"
|
||||
|
||||
#define _COMPONENT ACPI_DISPATCHER
|
||||
ACPI_MODULE_NAME("dsinit")
|
||||
|
@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index,
|
|||
|
||||
/* Walk entire namespace from the supplied root */
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't use acpi_walk_namespace since we do not want to acquire
|
||||
* the namespace reader lock.
|
||||
*/
|
||||
status =
|
||||
acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
|
||||
ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object,
|
||||
NULL, &info, NULL);
|
||||
0, acpi_ds_init_one_object, NULL, &info,
|
||||
NULL);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
|
||||
}
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
|
||||
status = acpi_get_table_by_index(table_index, &table);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
|
|
@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
|
|||
"Method auto-serialization parse [%4.4s] %p\n",
|
||||
acpi_ut_get_node_name(node), node));
|
||||
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
/* Create/Init a root op for the method parse tree */
|
||||
|
||||
op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
|
||||
if (!op) {
|
||||
status = AE_NO_MEMORY;
|
||||
goto unlock;
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
acpi_ps_set_name(op, node->name.integer);
|
||||
|
@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
|
|||
acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
|
||||
if (!walk_state) {
|
||||
acpi_ps_free_op(op);
|
||||
status = AE_NO_MEMORY;
|
||||
goto unlock;
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
status = acpi_ds_init_aml_walk(walk_state, op, node,
|
||||
|
@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
|
|||
status = acpi_ps_parse_aml(walk_state);
|
||||
|
||||
acpi_ps_delete_parse_tree(op);
|
||||
unlock:
|
||||
acpi_ex_exit_interpreter();
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -730,26 +724,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
|
|||
|
||||
acpi_ds_method_data_delete_all(walk_state);
|
||||
|
||||
/*
|
||||
* If method is serialized, release the mutex and restore the
|
||||
* current sync level for this thread
|
||||
*/
|
||||
if (method_desc->method.mutex) {
|
||||
|
||||
/* Acquisition Depth handles recursive calls */
|
||||
|
||||
method_desc->method.mutex->mutex.acquisition_depth--;
|
||||
if (!method_desc->method.mutex->mutex.acquisition_depth) {
|
||||
walk_state->thread->current_sync_level =
|
||||
method_desc->method.mutex->mutex.
|
||||
original_sync_level;
|
||||
|
||||
acpi_os_release_mutex(method_desc->method.
|
||||
mutex->mutex.os_mutex);
|
||||
method_desc->method.mutex->mutex.thread_id = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete any namespace objects created anywhere within the
|
||||
* namespace by the execution of this method. Unless:
|
||||
|
@ -786,6 +760,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
|
|||
~ACPI_METHOD_MODIFIED_NAMESPACE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If method is serialized, release the mutex and restore the
|
||||
* current sync level for this thread
|
||||
*/
|
||||
if (method_desc->method.mutex) {
|
||||
|
||||
/* Acquisition Depth handles recursive calls */
|
||||
|
||||
method_desc->method.mutex->mutex.acquisition_depth--;
|
||||
if (!method_desc->method.mutex->mutex.acquisition_depth) {
|
||||
walk_state->thread->current_sync_level =
|
||||
method_desc->method.mutex->mutex.
|
||||
original_sync_level;
|
||||
|
||||
acpi_os_release_mutex(method_desc->method.
|
||||
mutex->mutex.os_mutex);
|
||||
method_desc->method.mutex->mutex.thread_id = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Decrement the thread count on the method */
|
||||
|
|
|
@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
|
|||
}
|
||||
}
|
||||
|
||||
acpi_ex_exit_interpreter();
|
||||
status =
|
||||
acpi_ev_initialize_region
|
||||
(acpi_ns_get_attached_object(node), FALSE);
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
/*
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include "accommon.h"
|
||||
#include "acevents.h"
|
||||
#include "acnamesp.h"
|
||||
#include "acinterp.h"
|
||||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evrgnini")
|
||||
|
@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
|
|||
}
|
||||
}
|
||||
|
||||
acpi_ex_exit_interpreter();
|
||||
status =
|
||||
acpi_ev_execute_reg_method(region_obj,
|
||||
ACPI_REG_CONNECT);
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
if (acpi_ns_locked) {
|
||||
status =
|
||||
|
|
|
@ -137,7 +137,9 @@ unlock:
|
|||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"**** Begin Table Object Initialization\n"));
|
||||
|
||||
acpi_ex_enter_interpreter();
|
||||
status = acpi_ds_initialize_objects(table_index, node);
|
||||
acpi_ex_exit_interpreter();
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"**** Completed Table Object Initialization\n"));
|
||||
|
|
|
@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
|
|||
ghes_do_proc(ghes, ghes->estatus);
|
||||
out:
|
||||
ghes_clear_estatus(ghes);
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void ghes_add_timer(struct ghes *ghes)
|
||||
|
|
|
@ -87,6 +87,7 @@ struct acpi_pci_link {
|
|||
|
||||
static LIST_HEAD(acpi_link_list);
|
||||
static DEFINE_MUTEX(acpi_link_lock);
|
||||
static int sci_irq = -1, sci_penalty;
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
PCI Link Device Management
|
||||
|
@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq)
|
|||
{
|
||||
int penalty = 0;
|
||||
|
||||
/*
|
||||
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
|
||||
* with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
|
||||
* use for PCI IRQs.
|
||||
*/
|
||||
if (irq == acpi_gbl_FADT.sci_interrupt) {
|
||||
u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
if (type != IRQ_TYPE_LEVEL_LOW)
|
||||
penalty += PIRQ_PENALTY_ISA_ALWAYS;
|
||||
else
|
||||
penalty += PIRQ_PENALTY_PCI_USING;
|
||||
}
|
||||
if (irq == sci_irq)
|
||||
penalty += sci_penalty;
|
||||
|
||||
if (irq < ACPI_MAX_ISA_IRQS)
|
||||
return penalty + acpi_isa_irq_penalty[irq];
|
||||
|
||||
penalty += acpi_irq_pci_sharing_penalty(irq);
|
||||
return penalty;
|
||||
return penalty + acpi_irq_pci_sharing_penalty(irq);
|
||||
}
|
||||
|
||||
int __init acpi_irq_penalty_init(void)
|
||||
|
@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
|
|||
acpi_device_bid(link->device));
|
||||
return -ENODEV;
|
||||
} else {
|
||||
if (link->irq.active < ACPI_MAX_ISA_IRQS)
|
||||
acpi_isa_irq_penalty[link->irq.active] +=
|
||||
PIRQ_PENALTY_PCI_USING;
|
||||
|
||||
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
|
||||
acpi_device_name(link->device),
|
||||
acpi_device_bid(link->device), link->irq.active);
|
||||
|
@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
|
|||
continue;
|
||||
|
||||
if (used)
|
||||
new_penalty = acpi_irq_get_penalty(irq) +
|
||||
new_penalty = acpi_isa_irq_penalty[irq] +
|
||||
PIRQ_PENALTY_ISA_USED;
|
||||
else
|
||||
new_penalty = 0;
|
||||
|
@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
|
|||
void acpi_penalize_isa_irq(int irq, int active)
|
||||
{
|
||||
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
|
||||
acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
|
||||
acpi_isa_irq_penalty[irq] +=
|
||||
(active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
|
||||
}
|
||||
|
||||
|
@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq)
|
|||
acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
|
||||
}
|
||||
|
||||
void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
|
||||
{
|
||||
sci_irq = irq;
|
||||
|
||||
if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
|
||||
polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
|
||||
sci_penalty = PIRQ_PENALTY_PCI_USING;
|
||||
else
|
||||
sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Over-ride default table to reserve additional IRQs for use by ISA
|
||||
* e.g. acpi_irq_isa=5
|
||||
|
|
|
@ -1002,7 +1002,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
|
|||
|
||||
|
||||
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
|
||||
uint32_t desc)
|
||||
u32 desc, bool need_strong_ref)
|
||||
{
|
||||
struct rb_node *n = proc->refs_by_desc.rb_node;
|
||||
struct binder_ref *ref;
|
||||
|
@ -1010,12 +1010,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
|
|||
while (n) {
|
||||
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
||||
|
||||
if (desc < ref->desc)
|
||||
if (desc < ref->desc) {
|
||||
n = n->rb_left;
|
||||
else if (desc > ref->desc)
|
||||
} else if (desc > ref->desc) {
|
||||
n = n->rb_right;
|
||||
else
|
||||
} else if (need_strong_ref && !ref->strong) {
|
||||
binder_user_error("tried to use weak ref as strong ref\n");
|
||||
return NULL;
|
||||
} else {
|
||||
return ref;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1285,7 +1289,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|||
} break;
|
||||
case BINDER_TYPE_HANDLE:
|
||||
case BINDER_TYPE_WEAK_HANDLE: {
|
||||
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
|
||||
struct binder_ref *ref;
|
||||
|
||||
ref = binder_get_ref(proc, fp->handle,
|
||||
fp->type == BINDER_TYPE_HANDLE);
|
||||
|
||||
if (ref == NULL) {
|
||||
pr_err("transaction release %d bad handle %d\n",
|
||||
|
@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
if (tr->target.handle) {
|
||||
struct binder_ref *ref;
|
||||
|
||||
ref = binder_get_ref(proc, tr->target.handle);
|
||||
ref = binder_get_ref(proc, tr->target.handle, true);
|
||||
if (ref == NULL) {
|
||||
binder_user_error("%d:%d got transaction to invalid handle\n",
|
||||
proc->pid, thread->pid);
|
||||
|
@ -1577,7 +1584,9 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
fp->type = BINDER_TYPE_HANDLE;
|
||||
else
|
||||
fp->type = BINDER_TYPE_WEAK_HANDLE;
|
||||
fp->binder = 0;
|
||||
fp->handle = ref->desc;
|
||||
fp->cookie = 0;
|
||||
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
|
||||
&thread->todo);
|
||||
|
||||
|
@ -1589,7 +1598,10 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
} break;
|
||||
case BINDER_TYPE_HANDLE:
|
||||
case BINDER_TYPE_WEAK_HANDLE: {
|
||||
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
|
||||
struct binder_ref *ref;
|
||||
|
||||
ref = binder_get_ref(proc, fp->handle,
|
||||
fp->type == BINDER_TYPE_HANDLE);
|
||||
|
||||
if (ref == NULL) {
|
||||
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
|
||||
|
@ -1624,7 +1636,9 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
return_error = BR_FAILED_REPLY;
|
||||
goto err_binder_get_ref_for_node_failed;
|
||||
}
|
||||
fp->binder = 0;
|
||||
fp->handle = new_ref->desc;
|
||||
fp->cookie = 0;
|
||||
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
|
||||
trace_binder_transaction_ref_to_ref(t, ref,
|
||||
new_ref);
|
||||
|
@ -1678,6 +1692,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
" fd %d -> %d\n", fp->handle, target_fd);
|
||||
/* TODO: fput? */
|
||||
fp->binder = 0;
|
||||
fp->handle = target_fd;
|
||||
} break;
|
||||
|
||||
|
@ -1800,7 +1815,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
ref->desc);
|
||||
}
|
||||
} else
|
||||
ref = binder_get_ref(proc, target);
|
||||
ref = binder_get_ref(proc, target,
|
||||
cmd == BC_ACQUIRE ||
|
||||
cmd == BC_RELEASE);
|
||||
if (ref == NULL) {
|
||||
binder_user_error("%d:%d refcount change on invalid ref %d\n",
|
||||
proc->pid, thread->pid, target);
|
||||
|
@ -1996,7 +2013,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(binder_uintptr_t);
|
||||
ref = binder_get_ref(proc, target);
|
||||
ref = binder_get_ref(proc, target, false);
|
||||
if (ref == NULL) {
|
||||
binder_user_error("%d:%d %s invalid ref %d\n",
|
||||
proc->pid, thread->pid,
|
||||
|
|
|
@ -1418,30 +1418,33 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
|
|||
* Message mode could be enforced. In this case assume that advantage
|
||||
* of multipe MSIs is negated and use single MSI mode instead.
|
||||
*/
|
||||
nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_MSI);
|
||||
if (nvec > 0) {
|
||||
if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
|
||||
hpriv->get_irq_vector = ahci_get_irq_vector;
|
||||
hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
|
||||
return nvec;
|
||||
if (n_ports > 1) {
|
||||
nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_MSI);
|
||||
if (nvec > 0) {
|
||||
if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
|
||||
hpriv->get_irq_vector = ahci_get_irq_vector;
|
||||
hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
|
||||
return nvec;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback to single MSI mode if the controller
|
||||
* enforced MRSM mode.
|
||||
*/
|
||||
printk(KERN_INFO
|
||||
"ahci: MRSM is on, fallback to single MSI\n");
|
||||
pci_free_irq_vectors(pdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback to single MSI mode if the controller enforced MRSM
|
||||
* mode.
|
||||
* -ENOSPC indicated we don't have enough vectors. Don't bother
|
||||
* trying a single vectors for any other error:
|
||||
*/
|
||||
printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
|
||||
pci_free_irq_vectors(pdev);
|
||||
if (nvec < 0 && nvec != -ENOSPC)
|
||||
return nvec;
|
||||
}
|
||||
|
||||
/*
|
||||
* -ENOSPC indicated we don't have enough vectors. Don't bother trying
|
||||
* a single vectors for any other error:
|
||||
*/
|
||||
if (nvec < 0 && nvec != -ENOSPC)
|
||||
return nvec;
|
||||
|
||||
/*
|
||||
* If the host is not capable of supporting per-port vectors, fall
|
||||
* back to single MSI before finally attempting single MSI-X.
|
||||
|
@ -1617,7 +1620,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* legacy intx interrupts */
|
||||
pci_intx(pdev, 1);
|
||||
}
|
||||
hpriv->irq = pdev->irq;
|
||||
hpriv->irq = pci_irq_vector(pdev, 0);
|
||||
|
||||
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
|
||||
host->flags |= ATA_HOST_PARALLEL_SCAN;
|
||||
|
|
|
@ -213,14 +213,16 @@ config DEBUG_DEVRES
|
|||
If you are unsure about this, Say N here.
|
||||
|
||||
config DEBUG_TEST_DRIVER_REMOVE
|
||||
bool "Test driver remove calls during probe"
|
||||
bool "Test driver remove calls during probe (UNSTABLE)"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
Say Y here if you want the Driver core to test driver remove functions
|
||||
by calling probe, remove, probe. This tests the remove path without
|
||||
having to unbind the driver or unload the driver module.
|
||||
|
||||
If you are unsure about this, say N here.
|
||||
This option is expected to find errors and may render your system
|
||||
unusable. You should say N here unless you are explicitly looking to
|
||||
test this functionality.
|
||||
|
||||
config SYS_HYPERVISOR
|
||||
bool
|
||||
|
|
|
@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
|
|||
case DAC960_PD_Controller:
|
||||
if (!request_region(Controller->IO_Address, 0x80,
|
||||
Controller->FullModelName)) {
|
||||
DAC960_Error("IO port 0x%d busy for Controller at\n",
|
||||
DAC960_Error("IO port 0x%lx busy for Controller at\n",
|
||||
Controller, Controller->IO_Address);
|
||||
goto Failure;
|
||||
}
|
||||
|
@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
|
|||
case DAC960_P_Controller:
|
||||
if (!request_region(Controller->IO_Address, 0x80,
|
||||
Controller->FullModelName)){
|
||||
DAC960_Error("IO port 0x%d busy for Controller at\n",
|
||||
DAC960_Error("IO port 0x%lx busy for Controller at\n",
|
||||
Controller, Controller->IO_Address);
|
||||
goto Failure;
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd)
|
|||
spin_lock(&nbd->sock_lock);
|
||||
|
||||
if (!nbd->sock) {
|
||||
spin_unlock_irq(&nbd->sock_lock);
|
||||
spin_unlock(&nbd->sock_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -111,6 +111,7 @@ config OMAP_OCP2SCP
|
|||
config QCOM_EBI2
|
||||
bool "Qualcomm External Bus Interface 2 (EBI2)"
|
||||
depends on HAS_IOMEM
|
||||
depends on ARCH_QCOM || COMPILE_TEST
|
||||
help
|
||||
Say y here to enable support for the Qualcomm External Bus
|
||||
Interface 2, which can be used to connect things like NAND Flash,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue