Merge branch 'linus' into locking/core, to pick up fixes before merging new changes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2016-06-14 11:17:42 +02:00
commit 245050c287
474 changed files with 4059 additions and 2958 deletions

View File

@ -56,6 +56,7 @@ stable kernels.
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
| Cavium | ThunderX SMMUv2 | #27704 | N/A |

View File

@ -62,6 +62,7 @@ Required properties:
display-timings are used instead.
Optional properties (required if display-timings are used):
- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- display-timings : A node that describes the display timings as defined in
Documentation/devicetree/bindings/display/display-timing.txt.
- fsl,data-mapping : should be "spwg" or "jeida"

View File

@ -7,6 +7,7 @@ Required properties:
- "ti,ina220" for ina220
- "ti,ina226" for ina226
- "ti,ina230" for ina230
- "ti,ina231" for ina231
- reg: I2C address
Optional properties:

View File

@ -44,8 +44,8 @@ Required properties:
- our-claim-gpio: The GPIO that we use to claim the bus.
- their-claim-gpios: The GPIOs that the other sides use to claim the bus.
Note that some implementations may only support a single other master.
- Standard I2C mux properties. See mux.txt in this directory.
- Single I2C child bus node at reg 0. See mux.txt in this directory.
- Standard I2C mux properties. See i2c-mux.txt in this directory.
- Single I2C child bus node at reg 0. See i2c-mux.txt in this directory.
Optional properties:
- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.

View File

@ -27,7 +27,8 @@ Required properties:
- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
parents.
Furthermore, I2C mux properties and child nodes. See mux.txt in this directory.
Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this
directory.
Example:

View File

@ -22,8 +22,8 @@ Required properties:
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
port is connected to.
- mux-gpios: list of gpios used to control the muxer
* Standard I2C mux properties. See mux.txt in this directory.
* I2C child bus nodes. See mux.txt in this directory.
* Standard I2C mux properties. See i2c-mux.txt in this directory.
* I2C child bus nodes. See i2c-mux.txt in this directory.
Optional properties:
- idle-state: value to set the muxer to when idle. When no value is
@ -33,7 +33,7 @@ For each i2c child node, an I2C child bus will be created. They will
be numbered based on their order in the device tree.
Whenever an access is made to a device on a child bus, the value set
in the revelant node's reg property will be output using the list of
in the relevant node's reg property will be output using the list of
GPIOs, the first in the list holding the least-significant value.
If an idle state is defined, using the idle-state (optional) property,

View File

@ -28,9 +28,9 @@ Also required are:
* Standard pinctrl properties that specify the pin mux state for each child
bus. See ../pinctrl/pinctrl-bindings.txt.
* Standard I2C mux properties. See mux.txt in this directory.
* Standard I2C mux properties. See i2c-mux.txt in this directory.
* I2C child bus nodes. See mux.txt in this directory.
* I2C child bus nodes. See i2c-mux.txt in this directory.
For each named state defined in the pinctrl-names property, an I2C child bus
will be created. I2C child bus numbers are assigned based on the index into

View File

@ -7,8 +7,8 @@ Required properties:
- compatible: i2c-mux-reg
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
port is connected to.
* Standard I2C mux properties. See mux.txt in this directory.
* I2C child bus nodes. See mux.txt in this directory.
* Standard I2C mux properties. See i2c-mux.txt in this directory.
* I2C child bus nodes. See i2c-mux.txt in this directory.
Optional properties:
- reg: this pair of <offset size> specifies the register to control the mux.
@ -24,7 +24,7 @@ Optional properties:
given, it defaults to the last value used.
Whenever an access is made to a device on a child bus, the value set
in the revelant node's reg property will be output to the register.
in the relevant node's reg property will be output to the register.
If an idle state is defined, using the idle-state (optional) property,
whenever an access is not being made to a device on a child bus, the

View File

@ -13,10 +13,10 @@ Optional properties:
initialization. This is an array of 28 values(u8).
- marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
firmware will use the pin to wakeup host system.
firmware will use the pin to wakeup host system (u16).
- marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
platform. The value will be configured to firmware. This
is needed to work chip's sleep feature as expected.
is needed to work chip's sleep feature as expected (u16).
- interrupt-parent: phandle of the parent interrupt controller
- interrupts : interrupt pin number to the cpu. Driver will request an irq based
on this interrupt number. During system suspend, the irq will be
@ -50,7 +50,7 @@ calibration data is also available in below example.
0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
0x00 0x00 0xf0 0x00>;
marvell,wakeup-pin = <0x0d>;
marvell,wakeup-gap-ms = <0x64>;
marvell,wakeup-pin = /bits/ 16 <0x0d>;
marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
};
};

View File

@ -255,6 +255,7 @@ synology Synology, Inc.
SUNW Sun Microsystems, Inc
tbs TBS Technologies
tcl Toby Churchill Ltd.
technexion TechNexion
technologic Technologic Systems
thine THine Electronics, Inc.
ti Texas Instruments
@ -269,6 +270,7 @@ tronsmart Tronsmart
truly Truly Semiconductors Limited
tyan Tyan Computer Corporation
upisemi uPI Semiconductor Corp.
uniwest United Western Technologies Corp (UniWest)
urt United Radiant Technology Corporation
usi Universal Scientific Industrial Co., Ltd.
v3 V3 Semiconductor

View File

@ -1,141 +1,26 @@
Each mount of the devpts filesystem is now distinct such that ptys
and their indicies allocated in one mount are independent from ptys
and their indicies in all other mounts.
To support containers, we now allow multiple instances of devpts filesystem,
such that indices of ptys allocated in one instance are independent of indices
allocated in other instances of devpts.
All mounts of the devpts filesystem now create a /dev/pts/ptmx node
with permissions 0000.
To preserve backward compatibility, this support for multiple instances is
enabled only if:
To retain backwards compatibility the a ptmx device node (aka any node
created with "mknod name c 5 2") when opened will look for an instance
of devpts under the name "pts" in the same directory as the ptmx device
node.
- CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and
- '-o newinstance' mount option is specified while mounting devpts
IOW, devpts now supports both single-instance and multi-instance semantics.
If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and
this referred to as the "legacy" mode. In this mode, the new mount options
(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message
on console.
If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the
'newinstance' option (as in current start-up scripts) the new mount binds
to the initial kernel mount of devpts. This mode is referred to as the
'single-instance' mode and the current, single-instance semantics are
preserved, i.e PTYs are common across the system.
The only difference between this single-instance mode and the legacy mode
is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which
can safely be ignored.
If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified,
the mount is considered to be in the multi-instance mode and a new instance
of the devpts fs is created. Any ptys created in this instance are independent
of ptys in other instances of devpts. Like in the single-instance mode, the
/dev/pts/ptmx node is present. To effectively use the multi-instance mode,
open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or
bind-mount.
Eg: A container startup script could do the following:
$ chmod 0666 /dev/pts/ptmx
$ rm /dev/ptmx
$ ln -s pts/ptmx /dev/ptmx
$ ns_exec -cm /bin/bash
# We are now in new container
$ umount /dev/pts
$ mount -t devpts -o newinstance lxcpts /dev/pts
$ sshd -p 1234
where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs
/bin/bash in the child process. A pty created by the sshd is not visible in
the original mount of /dev/pts.
As an option instead of placing a /dev/ptmx device node at /dev/ptmx
it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or
to bind mount /dev/ptx/ptmx to /dev/ptmx. If you opt for using
the devpts filesystem in this manner devpts should be mounted with
the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called.
Total count of pty pairs in all instances is limited by sysctls:
kernel.pty.max = 4096 - global limit
kernel.pty.reserve = 1024 - reserve for initial instance
kernel.pty.reserve = 1024 - reserved for filesystems mounted from the initial mount namespace
kernel.pty.nr - current count of ptys
Per-instance limit could be set by adding mount option "max=<count>".
This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
User-space changes
------------------
In multi-instance mode (i.e '-o newinstance' mount option is specified at least
once), following user-space issues should be noted.
1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored
and no change is needed to system-startup scripts.
2. To effectively use multi-instance mode (i.e -o newinstance is specified)
administrators or startup scripts should "redirect" open of /dev/ptmx to
/dev/pts/ptmx using either a bind mount or symlink.
$ mount -t devpts -o newinstance devpts /dev/pts
followed by either
$ rm /dev/ptmx
$ ln -s pts/ptmx /dev/ptmx
$ chmod 666 /dev/pts/ptmx
or
$ mount -o bind /dev/pts/ptmx /dev/ptmx
3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it
enables better error-reporting and treats both single-instance and
multi-instance mounts similarly.
But this method requires that system-startup scripts set the mode of
/dev/pts/ptmx correctly (default mode is 0000). The scripts can set the
mode by, either
- adding ptmxmode mount option to devpts entry in /etc/fstab, or
- using 'chmod 0666 /dev/pts/ptmx'
4. If multi-instance mode mount is needed for containers, but the system
startup scripts have not yet been updated, container-startup scripts
should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single-
instance mounts.
Or, in general, container-startup scripts should use:
mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts
if [ ! -L /dev/ptmx ]; then
mount -o bind /dev/pts/ptmx /dev/ptmx
fi
When all devpts mounts are multi-instance, /dev/ptmx can permanently be
a symlink to pts/ptmx and the bind mount can be ignored.
5. A multi-instance mount that is not accompanied by the /dev/ptmx to
/dev/pts/ptmx redirection would result in an unusable/unreachable pty.
mount -t devpts -o newinstance lxcpts /dev/pts
immediately followed by:
open("/dev/ptmx")
would create a pty, say /dev/pts/7, in the initial kernel mount.
But /dev/pts/7 would be invisible in the new mount.
6. The permissions for /dev/pts/ptmx node should be specified when mounting
/dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000).
mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts
The permissions can be later be changed as usual with 'chmod'.
chmod 666 /dev/pts/ptmx
7. A mount of devpts without the 'newinstance' option results in binding to
initial kernel mount. This behavior while preserving legacy semantics,
does not provide strict isolation in a container environment. i.e by
mounting devpts without the 'newinstance' option, a container could
get visibility into the 'host' or root container's devpts.
To workaround this and have strict isolation, all mounts of devpts,
including the mount in the root container, should use the newinstance
option.

View File

@ -170,21 +170,92 @@ document trapinfo
address the kernel panicked.
end
define dump_log_idx
set $idx = $arg0
if ($argc > 1)
set $prev_flags = $arg1
else
set $prev_flags = 0
end
set $msg = ((struct printk_log *) (log_buf + $idx))
set $prefix = 1
set $newline = 1
set $log = log_buf + $idx + sizeof(*$msg)
# prev & LOG_CONT && !(msg->flags & LOG_PREIX)
if (($prev_flags & 8) && !($msg->flags & 4))
set $prefix = 0
end
# msg->flags & LOG_CONT
if ($msg->flags & 8)
# (prev & LOG_CONT && !(prev & LOG_NEWLINE))
if (($prev_flags & 8) && !($prev_flags & 2))
set $prefix = 0
end
# (!(msg->flags & LOG_NEWLINE))
if (!($msg->flags & 2))
set $newline = 0
end
end
if ($prefix)
printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
end
if ($msg->text_len != 0)
eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
end
if ($newline)
printf "\n"
end
if ($msg->dict_len > 0)
set $dict = $log + $msg->text_len
set $idx = 0
set $line = 1
while ($idx < $msg->dict_len)
if ($line)
printf " "
set $line = 0
end
set $c = $dict[$idx]
if ($c == '\0')
printf "\n"
set $line = 1
else
if ($c < ' ' || $c >= 127 || $c == '\\')
printf "\\x%02x", $c
else
printf "%c", $c
end
end
set $idx = $idx + 1
end
printf "\n"
end
end
document dump_log_idx
Dump a single log given its index in the log buffer. The first
parameter is the index into log_buf, the second is optional and
specified the previous log buffer's flags, used for properly
formatting continued lines.
end
define dmesg
set $i = 0
set $end_idx = (log_end - 1) & (log_buf_len - 1)
set $i = log_first_idx
set $end_idx = log_first_idx
set $prev_flags = 0
while ($i < logged_chars)
set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
if ($idx + 100 <= $end_idx) || \
($end_idx <= $idx && $idx + 100 < log_buf_len)
printf "%.100s", &log_buf[$idx]
set $i = $i + 100
while (1)
set $msg = ((struct printk_log *) (log_buf + $i))
if ($msg->len == 0)
set $i = 0
else
printf "%c", log_buf[$idx]
set $i = $i + 1
dump_log_idx $i $prev_flags
set $i = $i + $msg->len
set $prev_flags = $msg->flags
end
if ($i == $end_idx)
loop_break
end
end
end

View File

@ -826,7 +826,8 @@ The keyctl syscall functions are:
(*) Compute a Diffie-Hellman shared secret or public key
long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
char *buffer, size_t buflen);
char *buffer, size_t buflen,
void *reserved);
The params struct contains serial numbers for three keys:
@ -843,6 +844,8 @@ The keyctl syscall functions are:
public key. If the base is the remote public key, the result is
the shared secret.
The reserved argument must be set to NULL.
The buffer length must be at least the length of the prime, or zero.
If the buffer length is nonzero, the length of the result is

View File

@ -3086,6 +3086,7 @@ M: Stephen Boyd <sboyd@codeaurora.org>
L: linux-clk@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
S: Maintained
F: Documentation/devicetree/bindings/clock/
F: drivers/clk/
X: drivers/clk/clkdev.c
F: include/linux/clk-pr*
@ -8016,6 +8017,7 @@ Q: http://patchwork.kernel.org/project/linux-wireless/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
S: Maintained
F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
@ -8413,10 +8415,9 @@ F: drivers/i2c/busses/i2c-ocores.c
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh+dt@kernel.org>
M: Frank Rowand <frowand.list@gmail.com>
M: Grant Likely <grant.likely@linaro.org>
L: devicetree@vger.kernel.org
W: http://www.devicetree.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
S: Maintained
F: drivers/of/
F: include/linux/of*.h
@ -8424,12 +8425,10 @@ F: scripts/dtc/
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
M: Pawel Moll <pawel.moll@arm.com>
M: Mark Rutland <mark.rutland@arm.com>
M: Ian Campbell <ijc+devicetree@hellion.org.uk>
M: Kumar Gala <galak@codeaurora.org>
L: devicetree@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
S: Maintained
F: Documentation/devicetree/
F: arch/*/boot/dts/

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc3
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*

View File

@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
def_bool y
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
def_bool n
config ARCH_FLATMEM_ENABLE
def_bool y
@ -186,9 +186,6 @@ if SMP
config ARC_HAS_COH_CACHES
def_bool n
config ARC_HAS_REENTRANT_IRQ_LV2
def_bool n
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
@ -366,25 +363,10 @@ config NODES_SHIFT
if ISA_ARCOMPACT
config ARC_COMPACT_IRQ_LEVELS
bool "ARCompact IRQ Priorities: High(2)/Low(1)"
bool "Setup Timer IRQ as high Priority"
default n
# Timer HAS to be high priority, for any other high priority config
select ARC_IRQ3_LV2
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
if ARC_COMPACT_IRQ_LEVELS
config ARC_IRQ3_LV2
bool
config ARC_IRQ5_LV2
bool
config ARC_IRQ6_LV2
bool
endif #ARC_COMPACT_IRQ_LEVELS
depends on !SMP
config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch"
@ -407,11 +389,6 @@ config ARC_HAS_LLSC
default y
depends on !ARC_CANT_LLSC
config ARC_STAR_9000923308
bool "Workaround for llock/scond livelock"
default n
depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
default y
@ -471,7 +448,7 @@ config LINUX_LINK_BASE
config HIGHMEM
bool "High Memory Support"
select DISCONTIGMEM
select ARCH_DISCONTIGMEM_ENABLE
help
With ARC 2G:2G address split, only upper 2G is directly addressable by
kernel. Enable this to potentially allow access to rest of 2G and PAE

View File

@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
#default target for make without any arguements.
#default target for make without any arguments.
KBUILD_IMAGE := bootpImage
all: $(KBUILD_IMAGE)

View File

@ -23,8 +23,6 @@
/ {
clock-frequency = <500000000>; /* 500 MHZ */
soc100 {
bus-frequency = <166666666>;

View File

@ -23,8 +23,6 @@
/ {
clock-frequency = <500000000>; /* 500 MHZ */
soc100 {
bus-frequency = <166666666>;

View File

@ -15,7 +15,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <750000000>; /* 750 MHZ */
#address-cells = <1>;
#size-cells = <1>;

View File

@ -14,7 +14,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -14,7 +14,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -18,7 +18,6 @@
/ {
compatible = "ezchip,arc-nps";
clock-frequency = <83333333>; /* 83.333333 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;

View File

@ -11,7 +11,6 @@
/ {
compatible = "snps,nsim";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;

View File

@ -11,7 +11,6 @@
/ {
compatible = "snps,nsimosci";
clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;

View File

@ -11,7 +11,6 @@
/ {
compatible = "snps,nsimosci_hs";
clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;

View File

@ -11,7 +11,6 @@
/ {
compatible = "snps,nsimosci_hs";
clock-frequency = <5000000>; /* 5 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;

View File

@ -13,7 +13,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };

View File

@ -8,7 +8,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };

View File

@ -8,7 +8,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };

View File

@ -14,7 +14,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -15,7 +15,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -25,50 +25,17 @@
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#ifdef CONFIG_ARC_STAR_9000923308
#define SCOND_FAIL_RETRY_VAR_DEF \
unsigned int delay = 1, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" bz 4f \n" \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
"4: ; --- success --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
#else /* !CONFIG_ARC_STAR_9000923308 */
#define SCOND_FAIL_RETRY_VAR_DEF
#define SCOND_FAIL_RETRY_ASM \
" bnz 1b \n" \
#define SCOND_FAIL_RETRY_VARS
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
unsigned int val; \
\
__asm__ __volatile__( \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
" \n" \
SCOND_FAIL_RETRY_ASM \
\
" bnz 1b \n" \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
unsigned int val; \
\
/* \
* Explicit full memory barrier needed before/after as \
@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
" \n" \
SCOND_FAIL_RETRY_ASM \
\
" bnz 1b \n" \
: [val] "=&r" (val) \
SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \

View File

@ -76,8 +76,8 @@
* We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR.
* Above brlo alone would treat it as a valid L1-L2 sceanrio
* instead of shouting alound
* Above brlo alone would treat it as a valid L1-L2 scenario
* instead of shouting around
* The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack

View File

@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
local_flush_tlb_all();
/*
* Above checke for rollover of 8 bit ASID in 32 bit container.
* Above check for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/

View File

@ -47,7 +47,7 @@
* Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB.
* e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
* e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
* seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos

View File

@ -78,7 +78,7 @@ struct task_struct;
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
* Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
#define TSK_K_ESP(tsk) (tsk->thread.ksp)

View File

@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.

View File

@ -20,11 +20,6 @@
#ifdef CONFIG_ARC_HAS_LLSC
/*
* A normal LLOCK/SCOND based system, w/o need for livelock workaround
*/
#ifndef CONFIG_ARC_STAR_9000923308
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
smp_mb();
}
#else /* CONFIG_ARC_STAR_9000923308 */
/*
* HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
* coherency transactions in the SCU. The exclusive line state keeps rotating
* among contenting cores leading to a never ending cycle. So break the cycle
* by deferring the retry of failed exclusive access (SCOND). The actual delay
* needed is function of number of contending cores as well as the unrelated
* coherency traffic from other cores. To keep the code simple, start off with
* small delay of 1 which would suffice most cases and in case of contention
* double the delay. Eventually the delay is sufficient such that the coherency
* pipeline is drained, thus a subsequent exclusive access would succeed.
*/
#define SCOND_FAIL_RETRY_VAR_DEF \
unsigned int delay, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
" \n" \
"4: ; --- done --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*
* if (rw->counter > 0) {
* rw->counter--;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
" sub %[val], %[val], 1 \n" /* reader lock */
" scond %[val], [%[rwlock]] \n"
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
" sub %[val], %[val], 1 \n" /* counter-- */
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
* rw->counter = 0;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz 4f \n"
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter++;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" add %[val], %[val], 1 \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter))
: "memory", "cc");
smp_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" scond %[UNLOCKED], [%[rwlock]]\n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
: "memory", "cc");
smp_mb();
}
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
#endif /* CONFIG_ARC_STAR_9000923308 */
#else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock)

View File

@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
* SYSCALL_TRACE is anways seperately/unconditionally tested right after a
* SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/

View File

@ -32,7 +32,7 @@
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/*
* Algorthmically, for __user_ok() we want do:
* Algorithmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code.

View File

@ -74,7 +74,7 @@
__tmp ^ __in; \
})
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \
({ \

View File

@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1)
VECTOR instr_service ; 0x10, Instrn Error (0x2)
; ******************** Device ISRs **********************
#ifdef CONFIG_ARC_IRQ3_LV2
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
VECTOR handle_interrupt_level1
#ifdef CONFIG_ARC_IRQ5_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
#ifdef CONFIG_ARC_IRQ6_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
.rept 25
.rept 28
VECTOR handle_interrupt_level1 ; Other devices
.endr

View File

@ -28,10 +28,8 @@ void arc_init_IRQ(void)
{
int level_mask = 0;
/* setup any high priority Interrupts (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
/* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
/*
* Write to register, even if no LV2 IRQs configured to reset it

View File

@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
int64_t delta = new_raw_count - prev_raw_count;
/*
* We don't afaraid of hwc->prev_count changing beneath our feet
* We aren't afraid of hwc->prev_count changing beneath our feet
* because there's no way for us to re-enter this function anytime.
*/
local64_set(&hwc->prev_count, new_raw_count);

View File

@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
/*
* If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline,
* Appent to embedded DT cmdline.
* append to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line
*/
if (uboot_tag == 1) {

View File

@ -34,7 +34,7 @@
* -ViXS were still seeing crashes when using insmod to load drivers.
* It turned out that the code to change Execute permssions for TLB entries
* of user was not guarded for interrupts (mod_tlb_permission)
* This was cauing TLB entries to be overwritten on unrelated indexes
* This was causing TLB entries to be overwritten on unrelated indexes
*
* Vineetg: July 15th 2008: Bug #94183
* -Exception happens in Delay slot of a JMP, and before user space resumes,

View File

@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
return 0;
}
/* called on user read(): display the couters */
/* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */

View File

@ -215,7 +215,7 @@ slc_chk:
* ------------------
* This ver of MMU supports variable page sizes (1k-16k): although Linux will
* only support 8k (default), 16k and 4k.
* However from hardware perspective, smaller page sizes aggrevate aliasing
* However from hardware perspective, smaller page sizes aggravate aliasing
* meaning more vaddr bits needed to disambiguate the cache-line-op ;
* the existing scheme of piggybacking won't work for certain configurations.
* Two new registers IC_PTAG and DC_PTAG inttoduced.
@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
/*
* This is technically for MMU v4, using the MMU v3 programming model
* Special work for HS38 aliasing I-cache configuratino with PAE40
* Special work for HS38 aliasing I-cache configuration with PAE40
* - upper 8 bits of paddr need to be written into PTAG_HI
* - (and needs to be written before the lower 32 bits)
* Note that PTAG_HI is hoisted outside the line loop
@ -936,7 +936,7 @@ void arc_cache_init(void)
ic->ver, CONFIG_ARC_MMU_VER);
/*
* In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
if (is_isa_arcv2() && ic->alias)

View File

@ -10,7 +10,7 @@
* DMA Coherent API Notes
*
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
* implemented by accessintg it using a kernel virtual address, with
* implemented by accessing it using a kernel virtual address, with
* Cache bit off in the TLB entry.
*
* The default DMA address == Phy address which is 0x8000_0000 based.

View File

@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
if (ret)
return ret;
vfp_flush_hwstate(thread);
thread->vfpstate.hard = new_vfp;
vfp_flush_hwstate(thread);
return 0;
}

View File

@ -547,7 +547,7 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
init.name = dev_name(cpu_dev);
init.ops = &clk_spc_ops;
init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
init.flags = CLK_GET_RATE_NOCACHE;
init.num_parents = 0;
return devm_clk_register(cpu_dev, &spc->hw);

View File

@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT
config MMU
def_bool y
config ARM64_PAGE_SHIFT
int
default 16 if ARM64_64K_PAGES
default 14 if ARM64_16K_PAGES
default 12
config ARM64_CONT_SHIFT
int
default 5 if ARM64_64K_PAGES
default 7 if ARM64_16K_PAGES
default 4
config ARCH_MMAP_RND_BITS_MIN
default 14 if ARM64_64K_PAGES
default 16 if ARM64_16K_PAGES
@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375
If unsure, say Y.
config CAVIUM_ERRATUM_23144
bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
depends on NUMA
default y
help
ITS SYNC command hang for cross node io and collections/cpu mapping.
If unsure, say Y.
config CAVIUM_ERRATUM_23154
bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
default y

View File

@ -12,7 +12,8 @@ config ARM64_PTDUMP
who are working in architecture specific areas of the kernel.
It is probably not a good idea to enable this feature in a production
kernel.
If in doubt, say "N"
If in doubt, say N.
config PID_IN_CONTEXTIDR
bool "Write the current PID to the CONTEXTIDR register"
@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
value.
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
depends on MODULES
help
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
of module data. Such protection may interfere with run-time code
patching and dynamic kernel tracing - and they might also protect
against certain classes of kernel exploits.
If in doubt, say "N".
bool "Set loadable kernel module data as NX and text as RO"
depends on MODULES
default y
help
Is this is set, kernel module text and rodata will be made read-only.
This is to help catch accidental or malicious attempts to change the
kernel's executable code.
If in doubt, say Y.
config DEBUG_RODATA
bool "Make kernel text and rodata read-only"
@ -56,7 +57,7 @@ config DEBUG_RODATA
is to help catch accidental or malicious attempts to change the
kernel's executable code.
If in doubt, say Y
If in doubt, say Y.
config DEBUG_ALIGN_RODATA
depends on DEBUG_RODATA
@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA
alignment and potentially wasted space. Turn on this option if
performance is more important than memory pressure.
If in doubt, say N
If in doubt, say N.
source "drivers/hwtracing/coresight/Kconfig"

View File

@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o
# The byte offset of the kernel image in RAM from the start of RAM.
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
else
TEXT_OFFSET := 0x00080000
endif

View File

@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
#ifdef CONFIG_COMPAT
#ifdef __AARCH64EB__
#define COMPAT_ELF_PLATFORM ("v8b")
#else
#define COMPAT_ELF_PLATFORM ("v8l")
#endif
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
/* AArch32 registers. */

View File

@ -55,8 +55,9 @@
#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
/*
* PAGE_OFFSET - the virtual address of the start of the kernel image (top
* PAGE_OFFSET - the virtual address of the start of the linear map (top
* (VA_BITS - 1))
* KIMAGE_VADDR - the virtual address of the start of the kernel image
* VA_BITS - the maximum number of bits for virtual addresses.
* VA_START - the first kernel virtual address.
* TASK_SIZE - the maximum size of a user space task.

View File

@ -23,16 +23,8 @@
/* PAGE_SHIFT determines the page size */
/* CONT_SHIFT determines the number of pages which can be tracked together */
#ifdef CONFIG_ARM64_64K_PAGES
#define PAGE_SHIFT 16
#define CONT_SHIFT 5
#elif defined(CONFIG_ARM64_16K_PAGES)
#define PAGE_SHIFT 14
#define CONT_SHIFT 7
#else
#define PAGE_SHIFT 12
#define CONT_SHIFT 4
#endif
#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))

View File

@ -80,19 +80,6 @@ static inline void set_fs(mm_segment_t fs)
#define segment_eq(a, b) ((a) == (b))
/*
* Return 1 if addr < current->addr_limit, 0 otherwise.
*/
#define __addr_ok(addr) \
({ \
unsigned long flag; \
asm("cmp %1, %0; cset %0, lo" \
: "=&r" (flag) \
: "r" (addr), "0" (current_thread_info()->addr_limit) \
: "cc"); \
flag; \
})
/*
* Test whether a block of memory is a valid user space address.
* Returns 1 if the range is valid, 0 otherwise.

View File

@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
#define __NR_compat_syscalls 390
#define __NR_compat_syscalls 394
#endif
#define __ARCH_WANT_SYS_CLONE

View File

@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat)
__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
#define __NR_membarrier 389
__SYSCALL(__NR_membarrier, sys_membarrier)
#define __NR_mlock2 390
__SYSCALL(__NR_mlock2, sys_mlock2)
#define __NR_copy_file_range 391
__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
#define __NR_preadv2 392
__SYSCALL(__NR_preadv2, compat_sys_preadv2)
#define __NR_pwritev2 393
__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
/*
* Please add new compat syscalls above this comment and update

View File

@ -22,6 +22,8 @@
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/compat.h>
#include <linux/elf.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/personality.h>
@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = {
static int c_show(struct seq_file *m, void *v)
{
int i, j;
bool compat = personality(current->personality) == PER_LINUX32;
for_each_online_cpu(i) {
struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v)
* "processor". Give glibc what it expects.
*/
seq_printf(m, "processor\t: %d\n", i);
if (compat)
seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000UL/HZ),
@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v)
* software which does already (at least for 32-bit).
*/
seq_puts(m, "Features\t:");
if (personality(current->personality) == PER_LINUX32) {
if (compat) {
#ifdef CONFIG_COMPAT
for (j = 0; compat_hwcap_str[j]; j++)
if (compat_elf_hwcap & (1 << j))

View File

@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
handler[reason], esr, esr_get_class_string(esr));
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;

View File

@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
* Make sure stores to the GIC via the memory mapped interface
* are now visible to the system register interface.
*/
dsb(st);
if (!cpu_if->vgic_sre)
dsb(st);
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
continue;
if (cpu_if->vgic_elrsr & (1 << i)) {
if (cpu_if->vgic_elrsr & (1 << i))
cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
continue;
}
else
cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
__gic_v3_set_lr(0, i);
}
@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
write_gicreg(1, ICC_SRE_EL1);
if (!cpu_if->vgic_sre) {
/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
isb();
write_gicreg(1, ICC_SRE_EL1);
}
}
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
* been actually programmed with the value we want before
* starting to mess with the rest of the GIC.
*/
write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
isb();
if (!cpu_if->vgic_sre) {
write_gicreg(0, ICC_SRE_EL1);
isb();
}
val = read_gicreg(ICH_VTR_EL2);
max_lr_idx = vtr_to_max_lr_idx(val);
@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
* (re)distributors. This ensure the guest will read the
* correct values from the memory-mapped interface.
*/
isb();
dsb(sy);
if (!cpu_if->vgic_sre) {
isb();
dsb(sy);
}
vcpu->arch.vgic_cpu.live_lrs = live_lrs;
/*
* Prevent the guest from touching the GIC system registers if
* SRE isn't enabled for GICv3 emulation.
*/
if (!cpu_if->vgic_sre) {
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
ICC_SRE_EL2);
}
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
ICC_SRE_EL2);
}
void __hyp_text __vgic_v3_init_lrs(void)

View File

@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
return true;
}
static bool access_gic_sre(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
return true;
}
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_gic_sgi },
/* ICC_SRE_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
trap_raz_wi },
access_gic_sre },
/* CONTEXTIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),

View File

@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = {
struct pg_level {
const struct prot_bits *bits;
const char *name;
size_t num;
u64 mask;
};
@ -157,15 +158,19 @@ struct pg_level {
static struct pg_level pg_level[] = {
{
}, { /* pgd */
.name = "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pud */
.name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pmd */
.name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pte */
.name = "PTE",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
},
@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
delta >>= 10;
unit++;
}
seq_printf(st->seq, "%9lu%c", delta, *unit);
seq_printf(st->seq, "%9lu%c %s", delta, *unit,
pg_level[st->level].name);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits,
pg_level[st->level].num);

View File

@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* PTE_RDONLY is cleared by default in the asm below, so set it in
* back if necessary (read-only or clean PTE).
*/
if (!pte_write(entry) || !dirty)
if (!pte_write(entry) || !pte_sw_dirty(entry))
pte_val(entry) |= PTE_RDONLY;
/*

View File

@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt)
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else if (ps == (PAGE_SIZE * CONT_PTES)) {
hugetlb_add_hstate(CONT_PTE_SHIFT);
} else if (ps == (PMD_SIZE * CONT_PMDS)) {
hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
hugetlb_bad_size();
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt)
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
#ifdef CONFIG_ARM64_64K_PAGES
static __init int add_default_hugepagesz(void)
{
if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
hugetlb_add_hstate(CONT_PMD_SHIFT);
return 0;
}
arch_initcall(add_default_hugepagesz);
#endif

View File

@ -8,6 +8,8 @@ struct pt_regs;
void parisc_terminate(char *msg, struct pt_regs *regs,
int code, unsigned long offset) __noreturn __cold;
void die_if_kernel(char *str, struct pt_regs *regs, long err);
/* mm/fault.c */
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address);

View File

@ -324,8 +324,9 @@ int init_per_cpu(int cpunum)
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
cpunum, coproc_cfg.revision, coproc_cfg.model);
if (cpunum == 0)
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
cpunum, coproc_cfg.revision, coproc_cfg.model);
/*
** store status register to stack (hopefully aligned)

View File

@ -309,11 +309,6 @@ void __init time_init(void)
clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
NSEC_PER_MSEC, 0);
#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
/* At bootup only one 64bit CPU is online and cr16 is "stable" */
set_sched_clock_stable();
#endif
start_cpu_itimer(); /* get CPU 0 started */
/* register at clocksource framework */

View File

@ -28,6 +28,7 @@
#include <linux/ratelimit.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
#include <asm/traps.h>
/* #define DEBUG_UNALIGNED 1 */
@ -130,8 +131,6 @@
int unaligned_enabled __read_mostly = 1;
void die_if_kernel (char *str, struct pt_regs *regs, long err);
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
unsigned long saddr = regs->ior;
@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs)
break;
}
if (modify && R1(regs->iir))
if (ret == 0 && modify && R1(regs->iir))
regs->gr[R1(regs->iir)] = newbase;
@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs)
if (ret)
{
/*
* The unaligned handler failed.
* If we were called by __get_user() or __put_user() jump
* to it's exception fixup handler instead of crashing.
*/
if (!user_mode(regs) && fixup_exception(regs))
return;
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
die_if_kernel("Unaligned data reference", regs, 28);

View File

@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr)
if (addr >= kernel_unwind_table.start &&
addr <= kernel_unwind_table.end)
e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
else
else {
unsigned long flags;
spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->start &&
addr <= table->end)
@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr)
break;
}
}
spin_unlock_irqrestore(&unwind_lock, flags);
}
return e;
}
@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
insn = *(unsigned int *)npc;
if ((insn & 0xffffc000) == 0x37de0000 ||
(insn & 0xffe00000) == 0x6fc00000) {
if ((insn & 0xffffc001) == 0x37de0000 ||
(insn & 0xffe00001) == 0x6fc00000) {
/* ldo X(sp), sp, or stwm X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
((insn & 0x3fff) >> 1);
frame_size += (insn & 0x3fff) >> 1;
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
} else if ((insn & 0xffe00008) == 0x73c00008) {
} else if ((insn & 0xffe00009) == 0x73c00008) {
/* std,ma X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
(((insn >> 4) & 0x3ff) << 3);
frame_size += ((insn >> 4) & 0x3ff) << 3;
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
}
}
if (frame_size > e->Total_frame_size << 3)
frame_size = e->Total_frame_size << 3;
if (!unwind_special(info, e->region_start, frame_size)) {
info->prev_sp = info->sp - frame_size;
if (e->Millicode)

View File

@ -172,7 +172,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
pte_fragment_fre((unsigned long *)pte, 1);
pte_fragment_free((unsigned long *)pte, 1);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)

View File

@ -717,7 +717,7 @@
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
#define SPRN_MMCR2 769
#define SPRN_MMCR2 785
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@ -754,13 +754,13 @@
#define SPRN_PMC6 792
#define SPRN_PMC7 793
#define SPRN_PMC8 794
#define SPRN_SIAR 780
#define SPRN_SDAR 781
#define SPRN_SIER 784
#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
#define SPRN_SIAR 796
#define SPRN_SDAR 797
#define SPRN_TACR 888
#define SPRN_TCSCR 889
#define SPRN_CSIGR 890

View File

@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = {
W(0xffff0000), W(0x003e0000), /* POWER6 */
W(0xffff0000), W(0x003f0000), /* POWER7 */
W(0xffff0000), W(0x004b0000), /* POWER8E */
W(0xffff0000), W(0x004c0000), /* POWER8NVL */
W(0xffff0000), W(0x004d0000), /* POWER8 */
W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
@ -718,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
* must match by the macro below. Update the definition if
* the structure layout changes.
*/
#define IBM_ARCH_VEC_NRCORES_OFFSET 125
#define IBM_ARCH_VEC_NRCORES_OFFSET 133
W(NR_CPUS), /* number of cores supported */
0,
0,

View File

@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32][0]));
offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
return 0;
#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32][0]));
offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);

View File

@ -550,7 +550,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
}
}
/* This works for all page sizes, and for 256M and 1T segments */
*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
if (cpu_has_feature(CPU_FTR_ARCH_300))
*ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
else
*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
shift = mmu_psize_defs[size].shift;
avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);

View File

@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
},
};
/*
* 'R' and 'C' update notes:
* - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
* create writeable HPTEs without C set, because the hcall H_PROTECT
* that we use in that case will not update C
* - The above is however not a problem, because we also don't do that
* fancy "no flush" variant of eviction and we use H_REMOVE which will
* do the right thing and thus we don't have the race I described earlier
*
* - Under bare metal, we do have the race, so we need R and C set
* - We make sure R is always set and never lost
* - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
*/
unsigned long htab_convert_pte_flags(unsigned long pteflags)
{
unsigned long rflags = 0;
@ -186,9 +199,14 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
rflags |= 0x1;
}
/*
* Always add "C" bit for perf. Memory coherence is always enabled
* We can't allow hardware to update hpte bits. Hence always
* set 'R' bit and set 'C' if it is a write fault
* Memory coherence is always enabled
*/
rflags |= HPTE_R_C | HPTE_R_M;
rflags |= HPTE_R_R | HPTE_R_M;
if (pteflags & _PAGE_DIRTY)
rflags |= HPTE_R_C;
/*
* Add in WIG bits
*/

View File

@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
changed = !pmd_same(*(pmdp), entry);
if (changed) {
__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
/*
* Since we are not supporting SW TLB systems, we don't
* have any thing similar to flush_tlb_page_nohash()
*/
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
}

View File

@ -296,11 +296,6 @@ found:
void __init radix__early_init_mmu(void)
{
unsigned long lpcr;
/*
* setup LPCR UPRT based on mmu_features
*/
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
#ifdef CONFIG_PPC_64K_PAGES
/* PAGE_SIZE mappings */
@ -343,8 +338,11 @@ void __init radix__early_init_mmu(void)
__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
radix_init_page_sizes();
if (!firmware_has_feature(FW_FEATURE_LPAR))
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
radix_init_partition_table();
}
radix_init_pgtable();
}
@ -353,16 +351,15 @@ void radix__early_init_mmu_secondary(void)
{
unsigned long lpcr;
/*
* setup LPCR UPRT based on mmu_features
* update partition table control register and UPRT
*/
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
/*
* update partition table control register, 64 K size.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
mtspr(SPRN_PTCR,
__pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
}
}
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,

View File

@ -117,7 +117,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
*/
void radix__local_flush_tlb_mm(struct mm_struct *mm)
{
unsigned int pid;
unsigned long pid;
preempt_disable();
pid = mm->context.id;
@ -130,7 +130,7 @@ EXPORT_SYMBOL(radix__local_flush_tlb_mm);
void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid)
{
unsigned int pid;
unsigned long pid;
preempt_disable();
pid = mm ? mm->context.id : 0;
@ -160,7 +160,7 @@ static int mm_is_core_local(struct mm_struct *mm)
void radix__flush_tlb_mm(struct mm_struct *mm)
{
unsigned int pid;
unsigned long pid;
preempt_disable();
pid = mm->context.id;
@ -185,7 +185,7 @@ EXPORT_SYMBOL(radix__flush_tlb_mm);
void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid)
{
unsigned int pid;
unsigned long pid;
preempt_disable();
pid = mm ? mm->context.id : 0;

View File

@ -221,7 +221,7 @@ static bool soc_has_mclk_mux0_canin(void)
/* convenience wrappers around the common clk API */
static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
{
return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static inline struct clk *mpc512x_clk_factor(

View File

@ -172,7 +172,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
if (rc < 0)
goto out;
skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
if (!dump_skip(cprm, skip))
goto Eio;
out:

View File

@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2;
static int ibm_slot_error_detail;
static int ibm_get_config_addr_info;
static int ibm_get_config_addr_info2;
static int ibm_configure_bridge;
static int ibm_configure_pe;
/*
@ -81,7 +80,14 @@ static int pseries_eeh_init(void)
ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
ibm_configure_pe = rtas_token("ibm,configure-pe");
ibm_configure_bridge = rtas_token("ibm,configure-bridge");
/*
* ibm,configure-pe and ibm,configure-bridge have the same semantics,
* however ibm,configure-pe can be faster. If we can't find
* ibm,configure-pe then fall back to using ibm,configure-bridge.
*/
if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
ibm_configure_pe = rtas_token("ibm,configure-bridge");
/*
* Necessary sanity check. We needn't check "get-config-addr-info"
@ -93,8 +99,7 @@ static int pseries_eeh_init(void)
(ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
(ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
pr_info("EEH functionality not supported\n");
return -EINVAL;
}
@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
{
int config_addr;
int ret;
/* Waiting 0.2s maximum before skipping configuration */
int max_wait = 200;
/* Figure out the PE address */
config_addr = pe->config_addr;
if (pe->addr)
config_addr = pe->addr;
/* Use new configure-pe function, if supported */
if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
while (max_wait > 0) {
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
config_addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid));
} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
config_addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid));
} else {
return -EFAULT;
if (!ret)
return ret;
/*
* If RTAS returns a delay value that's above 100ms, cut it
* down to 100ms in case firmware made a mistake. For more
* on how these delay values work see rtas_busy_delay_time
*/
if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
ret <= RTAS_EXTENDED_DELAY_MAX)
ret = RTAS_EXTENDED_DELAY_MIN+2;
max_wait -= rtas_busy_delay_time(ret);
if (max_wait < 0)
break;
rtas_busy_delay(ret);
}
if (ret)
pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
__func__, pe->phb->global_number, pe->addr, ret);
pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
__func__, pe->phb->global_number, pe->addr, ret);
return ret;
}

View File

@ -927,7 +927,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
cfg_addr = (pdn->busno << 8) | pdn->devfn;
cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
cfg_addr, BUID_HI(buid), BUID_LO(buid));
@ -956,7 +956,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
cfg_addr = (pdn->busno << 8) | pdn->devfn;
cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */

View File

@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
for i in lib lib64 share end ; do \
if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
fi ; \
break ; \
fi ; \
if [ $$i = end ] ; then exit 1 ; fi ; \

View File

@ -714,7 +714,7 @@ static void cleanup_rapl_pmus(void)
int i;
for (i = 0; i < rapl_pmus->maxpkg; i++)
kfree(rapl_pmus->pmus + i);
kfree(rapl_pmus->pmus[i]);
kfree(rapl_pmus);
}

View File

@ -2868,27 +2868,10 @@ static struct intel_uncore_type bdx_uncore_cbox = {
.format_group = &hswep_uncore_cbox_format_group,
};
static struct intel_uncore_type bdx_uncore_sbox = {
.name = "sbox",
.num_counters = 4,
.num_boxes = 4,
.perf_ctr_bits = 48,
.event_ctl = HSWEP_S0_MSR_PMON_CTL0,
.perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
.event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
.box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
.msr_offset = HSWEP_SBOX_MSR_OFFSET,
.ops = &hswep_uncore_sbox_msr_ops,
.format_group = &hswep_uncore_sbox_format_group,
};
#define BDX_MSR_UNCORE_SBOX 3
static struct intel_uncore_type *bdx_msr_uncores[] = {
&bdx_uncore_ubox,
&bdx_uncore_cbox,
&hswep_uncore_pcu,
&bdx_uncore_sbox,
NULL,
};
@ -2897,10 +2880,6 @@ void bdx_uncore_cpu_init(void)
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
/* BDX-DE doesn't have SBOX */
if (boot_cpu_data.x86_model == 86)
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
static struct intel_uncore_type bdx_uncore_ha = {

View File

@ -0,0 +1,68 @@
#ifndef _ASM_X86_INTEL_FAMILY_H
#define _ASM_X86_INTEL_FAMILY_H
/*
* "Big Core" Processors (Branded as Core, Xeon, etc...)
*
* The "_X" parts are generally the EP and EX Xeons, or the
* "Extreme" ones, like Broadwell-E.
*
* Things ending in "2" are usually because we have no better
* name for them. There's no processor called "WESTMERE2".
*/
#define INTEL_FAM6_CORE_YONAH 0x0E
#define INTEL_FAM6_CORE2_MEROM 0x0F
#define INTEL_FAM6_CORE2_MEROM_L 0x16
#define INTEL_FAM6_CORE2_PENRYN 0x17
#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
#define INTEL_FAM6_NEHALEM 0x1E
#define INTEL_FAM6_NEHALEM_EP 0x1A
#define INTEL_FAM6_NEHALEM_EX 0x2E
#define INTEL_FAM6_WESTMERE 0x25
#define INTEL_FAM6_WESTMERE2 0x1F
#define INTEL_FAM6_WESTMERE_EP 0x2C
#define INTEL_FAM6_WESTMERE_EX 0x2F
#define INTEL_FAM6_SANDYBRIDGE 0x2A
#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
#define INTEL_FAM6_IVYBRIDGE 0x3A
#define INTEL_FAM6_IVYBRIDGE_X 0x3E
#define INTEL_FAM6_HASWELL_CORE 0x3C
#define INTEL_FAM6_HASWELL_X 0x3F
#define INTEL_FAM6_HASWELL_ULT 0x45
#define INTEL_FAM6_HASWELL_GT3E 0x46
#define INTEL_FAM6_BROADWELL_CORE 0x3D
#define INTEL_FAM6_BROADWELL_XEON_D 0x56
#define INTEL_FAM6_BROADWELL_GT3E 0x47
#define INTEL_FAM6_BROADWELL_X 0x4F
#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
#define INTEL_FAM6_SKYLAKE_X 0x55
#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
#define INTEL_FAM6_ATOM_LINCROFT 0x26
#define INTEL_FAM6_ATOM_PENWELL 0x27
#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
/* Xeon Phi */
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#endif /* _ASM_X86_INTEL_FAMILY_H */

View File

@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr,
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
: : "c" (msr), "a"(low), "d" (high) : "memory");
if (msr_tracepoint_active(__tracepoint_read_msr))
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EIO)
: "memory");
if (msr_tracepoint_active(__tracepoint_read_msr))
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
return err;
}

View File

@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
mem += IOAPIC_RESOURCE_NAME_SIZE;
ioapics[i].iomem_res = &res[num];
num++;
ioapics[i].iomem_res = res;
}
ioapic_resources = res;

View File

@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
u64 value;
/* re-enable TopologyExtensions if switched off by BIOS */
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (msr_set_bit(0xc0011005, 54) > 0) {
rdmsrl(0xc0011005, value);
if (value & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
}
}
}

View File

@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
local_irq_disable();
}
/*
* In IST context, we explicitly disable preemption. This serves two
* purposes: it makes it much less likely that we would accidentally
* schedule in IST context and it will force a warning if we somehow
* manage to schedule by accident.
*/
void ist_enter(struct pt_regs *regs)
{
if (user_mode(regs)) {
@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
rcu_nmi_enter();
}
/*
* We are atomic because we're on the IST stack; or we're on
* x86_32, in which case we still shouldn't schedule; or we're
* on x86_64 and entered from user mode, in which case we're
* still atomic unless ist_begin_non_atomic is called.
*/
preempt_count_add(HARDIRQ_OFFSET);
preempt_disable();
/* This code is a bit fragile. Test it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
void ist_exit(struct pt_regs *regs)
{
preempt_count_sub(HARDIRQ_OFFSET);
preempt_enable_no_resched();
if (!user_mode(regs))
rcu_nmi_exit();
@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
BUG_ON((unsigned long)(current_top_of_stack() -
current_stack_pointer()) >= THREAD_SIZE);
preempt_count_sub(HARDIRQ_OFFSET);
preempt_enable_no_resched();
}
/**
@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
*/
void ist_end_non_atomic(void)
{
preempt_count_add(HARDIRQ_OFFSET);
preempt_disable();
}
static nokprobe_inline int

View File

@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid_entry __user *entries)
{
int r, i;
struct kvm_cpuid_entry *cpuid_entries;
struct kvm_cpuid_entry *cpuid_entries = NULL;
r = -E2BIG;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
goto out;
r = -ENOMEM;
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
if (!cpuid_entries)
goto out;
r = -EFAULT;
if (copy_from_user(cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out_free;
if (cpuid->nent) {
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
cpuid->nent);
if (!cpuid_entries)
goto out;
r = -EFAULT;
if (copy_from_user(cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out;
}
for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
kvm_x86_ops->cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu);
out_free:
vfree(cpuid_entries);
out:
vfree(cpuid_entries);
return r;
}

View File

@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
*sptep = spte;
WRITE_ONCE(*sptep, spte);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
*sptep = spte;
WRITE_ONCE(*sptep, spte);
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte)
*/
smp_wmb();
ssptep->spte_low = sspte.spte_low;
WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
ssptep->spte_low = sspte.spte_low;
WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
/*
* If we map the spte from present to nonpresent, we should clear

View File

@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2:
case MSR_IA32_PERF_CTL:
msr_info->data = 0;
break;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
| KVM_VCPUEVENT_VALID_SMM))
return -EINVAL;
if (events->exception.injected &&
(events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
return -EINVAL;
process_nmi(vcpu);
vcpu->arch.exception.pending = events->exception.injected;
vcpu->arch.exception.nr = events->exception.nr;
@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
if (dbgregs->flags)
return -EINVAL;
if (dbgregs->dr6 & ~0xffffffffull)
return -EINVAL;
if (dbgregs->dr7 & ~0xffffffffull)
return -EINVAL;
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = dbgregs->dr6;
@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
slot = id_to_memslot(slots, id);
if (size) {
if (WARN_ON(slot->npages))
if (slot->npages)
return -EEXIST;
/*

View File

@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
ret = submit_bio_wait(type, bio);
if (ret == -EOPNOTSUPP)
ret = 0;
bio_put(bio);
}
blk_finish_plug(&plug);
@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
}
}
if (bio)
if (bio) {
ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
bio_put(bio);
}
return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
}
}
if (bio)
return submit_bio_wait(WRITE, bio);
if (bio) {
ret = submit_bio_wait(WRITE, bio);
bio_put(bio);
return ret;
}
return 0;
}

View File

@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split);
if (!is_flush_fua && !blk_queue_nomerges(q)) {
if (blk_attempt_plug_merge(q, bio, &request_count,
&same_queue_rq))
return BLK_QC_T_NONE;
} else
request_count = blk_plug_queued_count(q);
if (!is_flush_fua && !blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
return BLK_QC_T_NONE;
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split);
if (!is_flush_fua && !blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, &request_count, NULL))
return BLK_QC_T_NONE;
if (!is_flush_fua && !blk_queue_nomerges(q)) {
if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
return BLK_QC_T_NONE;
} else
request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))

View File

@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
pr->pblk = object.processor.pblk_address;
/*
* We don't care about error returns - we just try to mark
* these reserved so that nobody else is confused into thinking
* that this region might be unused..
*
* (In particular, allocating the IO range for Cardbus)
*/
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
}
/*

Some files were not shown because too many files have changed in this diff Show More