Linux 4.16-rc6
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAlqvCPYeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGOaAH/171cgZGFEXSONxK 3O1AAv61wN5K/ISMt6mnelWR6fZg195FarOx0Rnq7Ot8OWuVa8CGcyT4vX4Z7nb9 SVMQKNMPCVQE4WCDOv6S0njChmRC0BxBoVJtTN9fhywdYgX1KcaTS/drMRHACF5n rB9eouMQScfMzKGAW08gp5NvEGJ6W1SLX7La3/u0751dYisdJSP7+vFZNxUrGXEA yIPOQjFu0Tfo8GXz/BwC678RZVzVLN0sE6+/vM7zNnoDlsRVkdDIVMo3UiVqm/NK B37/TlZz8CYoapoKnRRB5giXnSPDSXtsikbGy3mcy0u5imGe+ZgdjrdYSaLk31cR NVZY08k= =pu3X -----END PGP SIGNATURE----- Merge tag 'v4.16-rc6' into perf/core, to pick up fixes Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
134933e557
|
@ -152,6 +152,11 @@ OCXL_IOCTL_IRQ_SET_FD:
|
|||
Associate an event fd to an AFU interrupt so that the user process
|
||||
can be notified when the AFU sends an interrupt.
|
||||
|
||||
OCXL_IOCTL_GET_METADATA:
|
||||
|
||||
Obtains configuration information from the card, such at the size of
|
||||
MMIO areas, the AFU version, and the PASID for the current context.
|
||||
|
||||
|
||||
mmap
|
||||
----
|
||||
|
|
|
@ -11,7 +11,11 @@ Required properties:
|
|||
interrupts.
|
||||
|
||||
Optional properties:
|
||||
- clocks: Optional reference to the clock used by the XOR engine.
|
||||
- clocks: Optional reference to the clocks used by the XOR engine.
|
||||
- clock-names: mandatory if there is a second clock, in this case the
|
||||
name must be "core" for the first clock and "reg" for the second
|
||||
one
|
||||
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ Required properties:
|
|||
- "renesas,etheravb-r8a7795" for the R8A7795 SoC.
|
||||
- "renesas,etheravb-r8a7796" for the R8A7796 SoC.
|
||||
- "renesas,etheravb-r8a77970" for the R8A77970 SoC.
|
||||
- "renesas,etheravb-r8a77980" for the R8A77980 SoC.
|
||||
- "renesas,etheravb-r8a77995" for the R8A77995 SoC.
|
||||
- "renesas,etheravb-rcar-gen3" as a fallback for the above
|
||||
R-Car Gen3 devices.
|
||||
|
|
|
@ -19,7 +19,7 @@ Required properties:
|
|||
configured in FS mode;
|
||||
- "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
|
||||
configured in HS mode;
|
||||
- "st,stm32f7xx-hsotg": The DWC2 USB HS controller instance in STM32F7xx SoCs
|
||||
- "st,stm32f7-hsotg": The DWC2 USB HS controller instance in STM32F7 SoCs
|
||||
configured in HS mode;
|
||||
- reg : Should contain 1 register range (address and length)
|
||||
- interrupts : Should contain 1 interrupt
|
||||
|
|
|
@ -4,6 +4,7 @@ Required properties:
|
|||
- compatible: Must contain one of the following:
|
||||
- "renesas,r8a7795-usb3-peri"
|
||||
- "renesas,r8a7796-usb3-peri"
|
||||
- "renesas,r8a77965-usb3-peri"
|
||||
- "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible
|
||||
device
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ Required properties:
|
|||
- "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
|
||||
- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
|
||||
- "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
|
||||
- "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device
|
||||
- "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
|
||||
- "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
|
||||
- "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
|
||||
|
|
|
@ -13,6 +13,7 @@ Required properties:
|
|||
- "renesas,xhci-r8a7793" for r8a7793 SoC
|
||||
- "renesas,xhci-r8a7795" for r8a7795 SoC
|
||||
- "renesas,xhci-r8a7796" for r8a7796 SoC
|
||||
- "renesas,xhci-r8a77965" for r8a77965 SoC
|
||||
- "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
|
||||
device
|
||||
- "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
|
||||
|
|
|
@ -111,7 +111,7 @@ TROUBLESHOOTING SERIAL CONSOLE PROBLEMS
|
|||
|
||||
- If you don't have an HCDP, the kernel doesn't know where
|
||||
your console lives until the driver discovers serial
|
||||
devices. Use "console=uart, io,0x3f8" (or appropriate
|
||||
devices. Use "console=uart,io,0x3f8" (or appropriate
|
||||
address for your machine).
|
||||
|
||||
Kernel and init script output works fine, but no "login:" prompt:
|
||||
|
|
|
@ -36,8 +36,7 @@ import glob
|
|||
|
||||
from docutils import nodes, statemachine
|
||||
from docutils.statemachine import ViewList
|
||||
from docutils.parsers.rst import directives
|
||||
from sphinx.util.compat import Directive
|
||||
from docutils.parsers.rst import directives, Directive
|
||||
from sphinx.ext.autodoc import AutodocReporter
|
||||
|
||||
__version__ = '1.0'
|
||||
|
|
|
@ -9925,6 +9925,13 @@ F: Documentation/ABI/stable/sysfs-bus-nvmem
|
|||
F: include/linux/nvmem-consumer.h
|
||||
F: include/linux/nvmem-provider.h
|
||||
|
||||
NXP SGTL5000 DRIVER
|
||||
M: Fabio Estevam <fabio.estevam@nxp.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/sgtl5000.txt
|
||||
F: sound/soc/codecs/sgtl5000*
|
||||
|
||||
NXP TDA998X DRM DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Supported
|
||||
|
@ -12107,6 +12114,7 @@ M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
|||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: sound/soc/samsung/
|
||||
F: Documentation/devicetree/bindings/sound/samsung*
|
||||
|
||||
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO
|
|||
|
||||
config MACH_DNS323
|
||||
bool "D-Link DNS-323"
|
||||
select GENERIC_NET_UTILS
|
||||
select I2C_BOARDINFO if I2C
|
||||
help
|
||||
Say 'Y' here if you want your kernel to support the
|
||||
|
@ -66,7 +65,6 @@ config MACH_DNS323
|
|||
|
||||
config MACH_TS209
|
||||
bool "QNAP TS-109/TS-209"
|
||||
select GENERIC_NET_UTILS
|
||||
help
|
||||
Say 'Y' here if you want your kernel to support the
|
||||
QNAP TS-109/TS-209 platform.
|
||||
|
@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL
|
|||
|
||||
config MACH_TS409
|
||||
bool "QNAP TS-409"
|
||||
select GENERIC_NET_UTILS
|
||||
help
|
||||
Say 'Y' here if you want your kernel to support the
|
||||
QNAP TS-409 platform.
|
||||
|
|
|
@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = {
|
|||
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
|
||||
};
|
||||
|
||||
/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
|
||||
* functions be kept somewhere?
|
||||
*/
|
||||
static int __init dns323_parse_hex_nibble(char n)
|
||||
{
|
||||
if (n >= '0' && n <= '9')
|
||||
return n - '0';
|
||||
|
||||
if (n >= 'A' && n <= 'F')
|
||||
return n - 'A' + 10;
|
||||
|
||||
if (n >= 'a' && n <= 'f')
|
||||
return n - 'a' + 10;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int __init dns323_parse_hex_byte(const char *b)
|
||||
{
|
||||
int hi;
|
||||
int lo;
|
||||
|
||||
hi = dns323_parse_hex_nibble(b[0]);
|
||||
lo = dns323_parse_hex_nibble(b[1]);
|
||||
|
||||
if (hi < 0 || lo < 0)
|
||||
return -1;
|
||||
|
||||
return (hi << 4) | lo;
|
||||
}
|
||||
|
||||
static int __init dns323_read_mac_addr(void)
|
||||
{
|
||||
u_int8_t addr[6];
|
||||
void __iomem *mac_page;
|
||||
int i;
|
||||
char *mac_page;
|
||||
|
||||
/* MAC address is stored as a regular ol' string in /dev/mtdblock4
|
||||
* (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
|
||||
|
@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void)
|
|||
if (!mac_page)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!mac_pton((__force const char *) mac_page, addr))
|
||||
goto error_fail;
|
||||
/* Sanity check the string we're looking at */
|
||||
for (i = 0; i < 5; i++) {
|
||||
if (*(mac_page + (i * 3) + 2) != ':') {
|
||||
goto error_fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
int byte;
|
||||
|
||||
byte = dns323_parse_hex_byte(mac_page + (i * 3));
|
||||
if (byte < 0) {
|
||||
goto error_fail;
|
||||
}
|
||||
|
||||
addr[i] = byte;
|
||||
}
|
||||
|
||||
iounmap(mac_page);
|
||||
printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
|
||||
|
|
|
@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
|
|||
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
|
||||
};
|
||||
|
||||
static int __init qnap_tsx09_parse_hex_nibble(char n)
|
||||
{
|
||||
if (n >= '0' && n <= '9')
|
||||
return n - '0';
|
||||
|
||||
if (n >= 'A' && n <= 'F')
|
||||
return n - 'A' + 10;
|
||||
|
||||
if (n >= 'a' && n <= 'f')
|
||||
return n - 'a' + 10;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int __init qnap_tsx09_parse_hex_byte(const char *b)
|
||||
{
|
||||
int hi;
|
||||
int lo;
|
||||
|
||||
hi = qnap_tsx09_parse_hex_nibble(b[0]);
|
||||
lo = qnap_tsx09_parse_hex_nibble(b[1]);
|
||||
|
||||
if (hi < 0 || lo < 0)
|
||||
return -1;
|
||||
|
||||
return (hi << 4) | lo;
|
||||
}
|
||||
|
||||
static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
|
||||
{
|
||||
u_int8_t addr[6];
|
||||
int i;
|
||||
|
||||
if (!mac_pton(addr_str, addr))
|
||||
return -1;
|
||||
for (i = 0; i < 6; i++) {
|
||||
int byte;
|
||||
|
||||
/*
|
||||
* Enforce "xx:xx:xx:xx:xx:xx\n" format.
|
||||
*/
|
||||
if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
|
||||
return -1;
|
||||
|
||||
byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
|
||||
if (byte < 0)
|
||||
return -1;
|
||||
addr[i] = byte;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
|
||||
|
||||
|
@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
|
|||
unsigned long addr;
|
||||
|
||||
for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
|
||||
void __iomem *nor_page;
|
||||
char *nor_page;
|
||||
int ret = 0;
|
||||
|
||||
nor_page = ioremap(addr, 1024);
|
||||
if (nor_page != NULL) {
|
||||
ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
|
||||
ret = qnap_tsx09_check_mac_addr(nor_page);
|
||||
iounmap(nor_page);
|
||||
}
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data)
|
|||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if (res.a0)
|
||||
if ((int)res.a0 < 0)
|
||||
return 0;
|
||||
cb = call_hvc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_hvc_start;
|
||||
|
@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data)
|
|||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if (res.a0)
|
||||
if ((int)res.a0 < 0)
|
||||
return 0;
|
||||
cb = call_smc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_smc_start;
|
||||
|
|
|
@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
vcpu_load(vcpu);
|
||||
|
||||
trace_kvm_set_guest_debug(vcpu, dbg->control);
|
||||
|
||||
if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
|
||||
|
@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
|
||||
out:
|
||||
vcpu_put(vcpu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
|
|||
* The following mapping attributes may be updated in live
|
||||
* kernel mappings without the need for break-before-make.
|
||||
*/
|
||||
static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
|
||||
static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
|
||||
|
||||
/* creating or taking down mappings is always safe */
|
||||
if (old == 0 || new == 0)
|
||||
|
@ -118,9 +118,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
|
|||
if ((old | new) & PTE_CONT)
|
||||
return false;
|
||||
|
||||
/* Transitioning from Global to Non-Global is safe */
|
||||
if (((old ^ new) == PTE_NG) && (new & PTE_NG))
|
||||
return true;
|
||||
/* Transitioning from Non-Global to Global is unsafe */
|
||||
if (old & ~new & PTE_NG)
|
||||
return false;
|
||||
|
||||
return ((old ^ new) & ~mask) == 0;
|
||||
}
|
||||
|
|
|
@ -66,38 +66,35 @@ ATOMIC_OPS(add, +)
|
|||
ATOMIC_OPS(sub, -)
|
||||
|
||||
#ifdef __OPTIMIZE__
|
||||
#define __ia64_atomic_const(i) __builtin_constant_p(i) ? \
|
||||
#define __ia64_atomic_const(i) \
|
||||
static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
|
||||
((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
|
||||
(i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
|
||||
|
||||
#define atomic_add_return(i, v) \
|
||||
({ \
|
||||
int __i = (i); \
|
||||
static const int __ia64_atomic_p = __ia64_atomic_const(i); \
|
||||
__ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) : \
|
||||
ia64_atomic_add(__i, v); \
|
||||
})
|
||||
|
||||
#define atomic_sub_return(i, v) \
|
||||
({ \
|
||||
int __i = (i); \
|
||||
static const int __ia64_atomic_p = __ia64_atomic_const(i); \
|
||||
__ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) : \
|
||||
ia64_atomic_sub(__i, v); \
|
||||
})
|
||||
(i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
|
||||
__ia64_atomic_p
|
||||
#else
|
||||
#define atomic_add_return(i, v) ia64_atomic_add(i, v)
|
||||
#define atomic_sub_return(i, v) ia64_atomic_sub(i, v)
|
||||
#define __ia64_atomic_const(i) 0
|
||||
#endif
|
||||
|
||||
#define atomic_add_return(i,v) \
|
||||
({ \
|
||||
int __ia64_aar_i = (i); \
|
||||
__ia64_atomic_const(i) \
|
||||
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
||||
: ia64_atomic_add(__ia64_aar_i, v); \
|
||||
})
|
||||
|
||||
#define atomic_sub_return(i,v) \
|
||||
({ \
|
||||
int __ia64_asr_i = (i); \
|
||||
__ia64_atomic_const(i) \
|
||||
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
||||
: ia64_atomic_sub(__ia64_asr_i, v); \
|
||||
})
|
||||
|
||||
#define atomic_fetch_add(i,v) \
|
||||
({ \
|
||||
int __ia64_aar_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|
||||
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|
||||
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|
||||
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
|
||||
__ia64_atomic_const(i) \
|
||||
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
|
||||
: ia64_atomic_fetch_add(__ia64_aar_i, v); \
|
||||
})
|
||||
|
@ -105,11 +102,7 @@ ATOMIC_OPS(sub, -)
|
|||
#define atomic_fetch_sub(i,v) \
|
||||
({ \
|
||||
int __ia64_asr_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|
||||
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|
||||
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|
||||
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
|
||||
__ia64_atomic_const(i) \
|
||||
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
|
||||
: ia64_atomic_fetch_sub(__ia64_asr_i, v); \
|
||||
})
|
||||
|
@ -170,11 +163,7 @@ ATOMIC64_OPS(sub, -)
|
|||
#define atomic64_add_return(i,v) \
|
||||
({ \
|
||||
long __ia64_aar_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|
||||
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|
||||
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|
||||
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
|
||||
__ia64_atomic_const(i) \
|
||||
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
||||
: ia64_atomic64_add(__ia64_aar_i, v); \
|
||||
})
|
||||
|
@ -182,11 +171,7 @@ ATOMIC64_OPS(sub, -)
|
|||
#define atomic64_sub_return(i,v) \
|
||||
({ \
|
||||
long __ia64_asr_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|
||||
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|
||||
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|
||||
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
|
||||
__ia64_atomic_const(i) \
|
||||
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
||||
: ia64_atomic64_sub(__ia64_asr_i, v); \
|
||||
})
|
||||
|
@ -194,11 +179,7 @@ ATOMIC64_OPS(sub, -)
|
|||
#define atomic64_fetch_add(i,v) \
|
||||
({ \
|
||||
long __ia64_aar_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|
||||
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|
||||
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|
||||
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
|
||||
__ia64_atomic_const(i) \
|
||||
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
|
||||
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
|
||||
})
|
||||
|
@ -206,11 +187,7 @@ ATOMIC64_OPS(sub, -)
|
|||
#define atomic64_fetch_sub(i,v) \
|
||||
({ \
|
||||
long __ia64_asr_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|
||||
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|
||||
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|
||||
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
|
||||
__ia64_atomic_const(i) \
|
||||
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
|
||||
: ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
|
||||
})
|
||||
|
|
|
@ -117,7 +117,7 @@ store_call_start(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
|
||||
printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]);
|
||||
printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
|
||||
printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
|
||||
#endif
|
||||
return size;
|
||||
|
@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
|
|||
u64 virt_addr=simple_strtoull(buf, NULL, 16);
|
||||
int ret;
|
||||
|
||||
ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
|
||||
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
|
||||
if (ret<=0) {
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk("Virtual address %lx is not existing.\n",virt_addr);
|
||||
|
|
|
@ -16,7 +16,7 @@ import re
|
|||
import sys
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print "Usage: %s FILE" % sys.argv[0]
|
||||
print("Usage: %s FILE" % sys.argv[0])
|
||||
sys.exit(2)
|
||||
|
||||
readelf = os.getenv("READELF", "readelf")
|
||||
|
@ -29,7 +29,7 @@ def check_func (func, slots, rlen_sum):
|
|||
global num_errors
|
||||
num_errors += 1
|
||||
if not func: func = "[%#x-%#x]" % (start, end)
|
||||
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
|
||||
print("ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum))
|
||||
return
|
||||
|
||||
num_funcs = 0
|
||||
|
@ -43,23 +43,23 @@ for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
|
|||
check_func(func, slots, rlen_sum)
|
||||
|
||||
func = m.group(1)
|
||||
start = long(m.group(2), 16)
|
||||
end = long(m.group(3), 16)
|
||||
start = int(m.group(2), 16)
|
||||
end = int(m.group(3), 16)
|
||||
slots = 3 * (end - start) / 16
|
||||
rlen_sum = 0L
|
||||
rlen_sum = 0
|
||||
num_funcs += 1
|
||||
else:
|
||||
m = rlen_pattern.match(line)
|
||||
if m:
|
||||
rlen_sum += long(m.group(1))
|
||||
rlen_sum += int(m.group(1))
|
||||
check_func(func, slots, rlen_sum)
|
||||
|
||||
if num_errors == 0:
|
||||
print "No errors detected in %u functions." % num_funcs
|
||||
print("No errors detected in %u functions." % num_funcs)
|
||||
else:
|
||||
if num_errors > 1:
|
||||
err="errors"
|
||||
else:
|
||||
err="error"
|
||||
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
|
||||
print("%u %s detected in %u functions." % (num_errors, err, num_funcs))
|
||||
sys.exit(1)
|
||||
|
|
|
@ -24,6 +24,7 @@ config MICROBLAZE
|
|||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select NO_BOOTMEM
|
||||
select HAVE_MEMBLOCK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_OPROFILE
|
||||
|
|
|
@ -8,7 +8,6 @@ menu "Platform options"
|
|||
|
||||
config OPT_LIB_FUNCTION
|
||||
bool "Optimalized lib function"
|
||||
depends on CPU_LITTLE_ENDIAN
|
||||
default y
|
||||
help
|
||||
Allows turn on optimalized library function (memcpy and memmove).
|
||||
|
@ -21,6 +20,7 @@ config OPT_LIB_FUNCTION
|
|||
config OPT_LIB_ASM
|
||||
bool "Optimalized lib function ASM"
|
||||
depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
|
||||
depends on CPU_BIG_ENDIAN
|
||||
default n
|
||||
help
|
||||
Allows turn on optimalized library function (memcpy and memmove).
|
||||
|
|
|
@ -44,7 +44,6 @@ void machine_shutdown(void);
|
|||
void machine_halt(void);
|
||||
void machine_power_off(void);
|
||||
|
||||
extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
|
||||
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
|
||||
|
||||
# endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -29,10 +29,6 @@
|
|||
* between mem locations with size of xfer spec'd in bytes
|
||||
*/
|
||||
|
||||
#ifdef __MICROBLAZEEL__
|
||||
#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
|
||||
#endif
|
||||
|
||||
#include <linux/linkage.h>
|
||||
.text
|
||||
.globl memcpy
|
||||
|
|
|
@ -32,9 +32,6 @@ int mem_init_done;
|
|||
#ifndef CONFIG_MMU
|
||||
unsigned int __page_offset;
|
||||
EXPORT_SYMBOL(__page_offset);
|
||||
|
||||
#else
|
||||
static int init_bootmem_done;
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
char *klimit = _end;
|
||||
|
@ -117,7 +114,6 @@ static void __init paging_init(void)
|
|||
|
||||
void __init setup_memory(void)
|
||||
{
|
||||
unsigned long map_size;
|
||||
struct memblock_region *reg;
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
|
@ -174,17 +170,6 @@ void __init setup_memory(void)
|
|||
pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
|
||||
pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
|
||||
|
||||
/*
|
||||
* Find an area to use for the bootmem bitmap.
|
||||
* We look for the first area which is at least
|
||||
* 128kB in length (128kB is enough for a bitmap
|
||||
* for 4GB of memory, using 4kB pages), plus 1 page
|
||||
* (in case the address isn't page-aligned).
|
||||
*/
|
||||
map_size = init_bootmem_node(NODE_DATA(0),
|
||||
PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
|
||||
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
|
||||
|
||||
/* Add active regions with valid PFNs */
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
|
@ -196,32 +181,9 @@ void __init setup_memory(void)
|
|||
&memblock.memory, 0);
|
||||
}
|
||||
|
||||
/* free bootmem is whole main memory */
|
||||
free_bootmem_with_active_regions(0, max_low_pfn);
|
||||
|
||||
/* reserve allocate blocks */
|
||||
for_each_memblock(reserved, reg) {
|
||||
unsigned long top = reg->base + reg->size - 1;
|
||||
|
||||
pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
|
||||
(u32) reg->base, (u32) reg->size, top,
|
||||
memory_start + lowmem_size - 1);
|
||||
|
||||
if (top <= (memory_start + lowmem_size - 1)) {
|
||||
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
|
||||
} else if (reg->base < (memory_start + lowmem_size - 1)) {
|
||||
unsigned long trunc_size = memory_start + lowmem_size -
|
||||
reg->base;
|
||||
reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
/* XXX need to clip this if using highmem? */
|
||||
sparse_memory_present_with_active_regions(0);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
init_bootmem_done = 1;
|
||||
#endif
|
||||
paging_init();
|
||||
}
|
||||
|
||||
|
@ -398,30 +360,16 @@ asmlinkage void __init mmu_init(void)
|
|||
/* This is only called until mem_init is done. */
|
||||
void __init *early_get_page(void)
|
||||
{
|
||||
void *p;
|
||||
if (init_bootmem_done) {
|
||||
p = alloc_bootmem_pages(PAGE_SIZE);
|
||||
} else {
|
||||
/*
|
||||
* Mem start + kernel_tlb -> here is limit
|
||||
* because of mem mapping from head.S
|
||||
*/
|
||||
p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
|
||||
memory_start + kernel_tlb));
|
||||
}
|
||||
return p;
|
||||
/*
|
||||
* Mem start + kernel_tlb -> here is limit
|
||||
* because of mem mapping from head.S
|
||||
*/
|
||||
return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
|
||||
memory_start + kernel_tlb));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask)
|
||||
{
|
||||
if (mem_init_done)
|
||||
return kmalloc(size, mask);
|
||||
else
|
||||
return alloc_bootmem(size);
|
||||
}
|
||||
|
||||
void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
|
||||
{
|
||||
void *p;
|
||||
|
|
|
@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
|
|||
}
|
||||
|
||||
board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
|
||||
if (!board_data)
|
||||
goto error;
|
||||
ath25_board.config = (struct ath25_boarddata *)board_data;
|
||||
memcpy_fromio(board_data, bcfg, 0x100);
|
||||
if (broken_boarddata) {
|
||||
|
|
|
@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
|
|||
}
|
||||
|
||||
host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
|
||||
if (!host_data)
|
||||
return -ENOMEM;
|
||||
raw_spin_lock_init(&host_data->lock);
|
||||
|
||||
addr = of_get_address(ciu_node, 0, NULL, NULL);
|
||||
|
|
|
@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
|
|||
return;
|
||||
}
|
||||
|
||||
if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
|
||||
"smp_ipi0", NULL))
|
||||
if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
|
||||
IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
|
||||
panic("Can't request IPI0 interrupt");
|
||||
if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
|
||||
"smp_ipi1", NULL))
|
||||
if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
|
||||
IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
|
||||
panic("Can't request IPI1 interrupt");
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@ choice
|
|||
config LEMOTE_FULOONG2E
|
||||
bool "Lemote Fuloong(2e) mini-PC"
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select CEVT_R4K
|
||||
select CSRC_R4K
|
||||
select SYS_HAS_CPU_LOONGSON2E
|
||||
|
@ -33,6 +35,8 @@ config LEMOTE_FULOONG2E
|
|||
config LEMOTE_MACH2F
|
||||
bool "Lemote Loongson 2F family machines"
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select BOARD_SCACHE
|
||||
select BOOT_ELF32
|
||||
select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
|
||||
|
@ -62,6 +66,8 @@ config LEMOTE_MACH2F
|
|||
config LOONGSON_MACH3X
|
||||
bool "Generic Loongson 3 family machines"
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select GENERIC_ISA_DMA_SUPPORT_BROKEN
|
||||
select BOOT_ELF32
|
||||
select BOARD_SCACHE
|
||||
|
|
|
@ -543,7 +543,8 @@ void flush_cache_mm(struct mm_struct *mm)
|
|||
rp3440, etc. So, avoid it if the mm isn't too big. */
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_all();
|
||||
if (mm->context)
|
||||
flush_tlb_all();
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
@ -571,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm)
|
|||
pfn = pte_pfn(*ptep);
|
||||
if (!pfn_valid(pfn))
|
||||
continue;
|
||||
if (unlikely(mm->context))
|
||||
flush_tlb_page(vma, addr);
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
@ -579,26 +582,46 @@ void flush_cache_mm(struct mm_struct *mm)
|
|||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
unsigned long addr;
|
||||
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
end - start >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_range(vma, start, end);
|
||||
if (vma->vm_mm->context)
|
||||
flush_tlb_range(vma, start, end);
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
flush_user_dcache_range_asm(start, end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(start, end);
|
||||
flush_tlb_range(vma, start, end);
|
||||
if (vma->vm_mm->context == mfsp(3)) {
|
||||
flush_user_dcache_range_asm(start, end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(start, end);
|
||||
flush_tlb_range(vma, start, end);
|
||||
return;
|
||||
}
|
||||
|
||||
pgd = vma->vm_mm->pgd;
|
||||
for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
pte_t *ptep = get_ptep(pgd, addr);
|
||||
if (!ptep)
|
||||
continue;
|
||||
pfn = pte_pfn(*ptep);
|
||||
if (pfn_valid(pfn)) {
|
||||
if (unlikely(vma->vm_mm->context))
|
||||
flush_tlb_page(vma, addr);
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
||||
{
|
||||
BUG_ON(!vma->vm_mm->context);
|
||||
|
||||
if (pfn_valid(pfn)) {
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
if (likely(vma->vm_mm->context))
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \
|
|||
libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
|
||||
libfdtheader := fdt.h libfdt.h libfdt_internal.h
|
||||
|
||||
$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
|
||||
$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
|
||||
treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
|
||||
$(addprefix $(obj)/,$(libfdtheader))
|
||||
|
||||
src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
|
||||
|
|
|
@ -874,7 +874,6 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
|
|||
.mmu = 0,
|
||||
.hash_ext = 0,
|
||||
.radix_ext = 0,
|
||||
.byte22 = 0,
|
||||
},
|
||||
|
||||
/* option vector 6: IBM PAPR hints */
|
||||
|
|
|
@ -195,6 +195,12 @@ static void kvmppc_pte_free(pte_t *ptep)
|
|||
kmem_cache_free(kvm_pte_cache, ptep);
|
||||
}
|
||||
|
||||
/* Like pmd_huge() and pmd_large(), but works regardless of config options */
|
||||
static inline int pmd_is_leaf(pmd_t pmd)
|
||||
{
|
||||
return !!(pmd_val(pmd) & _PAGE_PTE);
|
||||
}
|
||||
|
||||
static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
|
||||
unsigned int level, unsigned long mmu_seq)
|
||||
{
|
||||
|
@ -219,7 +225,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
|
|||
else
|
||||
new_pmd = pmd_alloc_one(kvm->mm, gpa);
|
||||
|
||||
if (level == 0 && !(pmd && pmd_present(*pmd)))
|
||||
if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
|
||||
new_ptep = kvmppc_pte_alloc();
|
||||
|
||||
/* Check if we might have been invalidated; let the guest retry if so */
|
||||
|
@ -244,12 +250,30 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
|
|||
new_pmd = NULL;
|
||||
}
|
||||
pmd = pmd_offset(pud, gpa);
|
||||
if (pmd_large(*pmd)) {
|
||||
/* Someone else has instantiated a large page here; retry */
|
||||
ret = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (level == 1 && !pmd_none(*pmd)) {
|
||||
if (pmd_is_leaf(*pmd)) {
|
||||
unsigned long lgpa = gpa & PMD_MASK;
|
||||
|
||||
/*
|
||||
* If we raced with another CPU which has just put
|
||||
* a 2MB pte in after we saw a pte page, try again.
|
||||
*/
|
||||
if (level == 0 && !new_ptep) {
|
||||
ret = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
/* Valid 2MB page here already, remove it */
|
||||
old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
|
||||
~0UL, 0, lgpa, PMD_SHIFT);
|
||||
kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
|
||||
if (old & _PAGE_DIRTY) {
|
||||
unsigned long gfn = lgpa >> PAGE_SHIFT;
|
||||
struct kvm_memory_slot *memslot;
|
||||
memslot = gfn_to_memslot(kvm, gfn);
|
||||
if (memslot && memslot->dirty_bitmap)
|
||||
kvmppc_update_dirty_map(memslot,
|
||||
gfn, PMD_SIZE);
|
||||
}
|
||||
} else if (level == 1 && !pmd_none(*pmd)) {
|
||||
/*
|
||||
* There's a page table page here, but we wanted
|
||||
* to install a large page. Tell the caller and let
|
||||
|
@ -412,28 +436,24 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
} else {
|
||||
page = pages[0];
|
||||
pfn = page_to_pfn(page);
|
||||
if (PageHuge(page)) {
|
||||
page = compound_head(page);
|
||||
pte_size <<= compound_order(page);
|
||||
if (PageCompound(page)) {
|
||||
pte_size <<= compound_order(compound_head(page));
|
||||
/* See if we can insert a 2MB large-page PTE here */
|
||||
if (pte_size >= PMD_SIZE &&
|
||||
(gpa & PMD_MASK & PAGE_MASK) ==
|
||||
(hva & PMD_MASK & PAGE_MASK)) {
|
||||
(gpa & (PMD_SIZE - PAGE_SIZE)) ==
|
||||
(hva & (PMD_SIZE - PAGE_SIZE))) {
|
||||
level = 1;
|
||||
pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
|
||||
}
|
||||
}
|
||||
/* See if we can provide write access */
|
||||
if (writing) {
|
||||
/*
|
||||
* We assume gup_fast has set dirty on the host PTE.
|
||||
*/
|
||||
pgflags |= _PAGE_WRITE;
|
||||
} else {
|
||||
local_irq_save(flags);
|
||||
ptep = find_current_mm_pte(current->mm->pgd,
|
||||
hva, NULL, NULL);
|
||||
if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
|
||||
if (ptep && pte_write(*ptep))
|
||||
pgflags |= _PAGE_WRITE;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -459,18 +479,15 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
pte = pfn_pte(pfn, __pgprot(pgflags));
|
||||
ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
|
||||
}
|
||||
if (ret == 0 || ret == -EAGAIN)
|
||||
ret = RESUME_GUEST;
|
||||
|
||||
if (page) {
|
||||
/*
|
||||
* We drop pages[0] here, not page because page might
|
||||
* have been set to the head page of a compound, but
|
||||
* we have to drop the reference on the correct tail
|
||||
* page to match the get inside gup()
|
||||
*/
|
||||
put_page(pages[0]);
|
||||
if (!ret && (pgflags & _PAGE_WRITE))
|
||||
set_page_dirty_lock(page);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
if (ret == 0 || ret == -EAGAIN)
|
||||
ret = RESUME_GUEST;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -644,7 +661,7 @@ void kvmppc_free_radix(struct kvm *kvm)
|
|||
continue;
|
||||
pmd = pmd_offset(pud, 0);
|
||||
for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
|
||||
if (pmd_huge(*pmd)) {
|
||||
if (pmd_is_leaf(*pmd)) {
|
||||
pmd_clear(pmd);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -2885,7 +2885,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||
*/
|
||||
trace_hardirqs_on();
|
||||
|
||||
guest_enter();
|
||||
guest_enter_irqoff();
|
||||
|
||||
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
|
||||
|
||||
|
@ -2893,8 +2893,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||
|
||||
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
|
||||
|
||||
guest_exit();
|
||||
|
||||
trace_hardirqs_off();
|
||||
set_irq_happened(trap);
|
||||
|
||||
|
@ -2937,6 +2935,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||
kvmppc_set_host_core(pcpu);
|
||||
|
||||
local_irq_enable();
|
||||
guest_exit();
|
||||
|
||||
/* Let secondaries go back to the offline loop */
|
||||
for (i = 0; i < controlled_threads; ++i) {
|
||||
|
@ -3656,15 +3655,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|||
goto up_out;
|
||||
|
||||
psize = vma_kernel_pagesize(vma);
|
||||
porder = __ilog2(psize);
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
/* We can handle 4k, 64k or 16M pages in the VRMA */
|
||||
err = -EINVAL;
|
||||
if (!(psize == 0x1000 || psize == 0x10000 ||
|
||||
psize == 0x1000000))
|
||||
goto out_srcu;
|
||||
if (psize >= 0x1000000)
|
||||
psize = 0x1000000;
|
||||
else if (psize >= 0x10000)
|
||||
psize = 0x10000;
|
||||
else
|
||||
psize = 0x1000;
|
||||
porder = __ilog2(psize);
|
||||
|
||||
senc = slb_pgsize_encoding(psize);
|
||||
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
|
||||
|
|
|
@ -320,7 +320,6 @@ kvm_novcpu_exit:
|
|||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
bl kvmhv_commence_exit
|
||||
nop
|
||||
lwz r12, STACK_SLOT_TRAP(r1)
|
||||
b kvmhv_switch_to_host
|
||||
|
||||
/*
|
||||
|
@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
|||
|
||||
secondary_too_late:
|
||||
li r12, 0
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
cmpdi r4, 0
|
||||
beq 11f
|
||||
stw r12, VCPU_TRAP(r4)
|
||||
|
@ -1558,12 +1558,12 @@ mc_cont:
|
|||
3: stw r5,VCPU_SLB_MAX(r9)
|
||||
|
||||
guest_bypass:
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
mr r3, r12
|
||||
/* Increment exit count, poke other threads to exit */
|
||||
bl kvmhv_commence_exit
|
||||
nop
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
lwz r12, VCPU_TRAP(r9)
|
||||
|
||||
/* Stop others sending VCPU interrupts to this physical CPU */
|
||||
li r0, -1
|
||||
|
@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
|
|||
* POWER7/POWER8 guest -> host partition switch code.
|
||||
* We don't have to lock against tlbies but we do
|
||||
* have to coordinate the hardware threads.
|
||||
* Here STACK_SLOT_TRAP(r1) contains the trap number.
|
||||
*/
|
||||
kvmhv_switch_to_host:
|
||||
/* Secondary threads wait for primary to do partition switch */
|
||||
|
@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION
|
|||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
|
||||
/* If HMI, call kvmppc_realmode_hmi_handler() */
|
||||
lwz r12, STACK_SLOT_TRAP(r1)
|
||||
cmpwi r12, BOOK3S_INTERRUPT_HMI
|
||||
bne 27f
|
||||
bl kvmppc_realmode_hmi_handler
|
||||
nop
|
||||
cmpdi r3, 0
|
||||
li r12, BOOK3S_INTERRUPT_HMI
|
||||
/*
|
||||
* At this point kvmppc_realmode_hmi_handler may have resync-ed
|
||||
* the TB, and if it has, we must not subtract the guest timebase
|
||||
|
@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION
|
|||
lwz r8, KVM_SPLIT_DO_RESTORE(r3)
|
||||
cmpwi r8, 0
|
||||
beq 47f
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
bl kvmhv_p9_restore_lpcr
|
||||
nop
|
||||
lwz r12, STACK_SLOT_TRAP(r1)
|
||||
b 48f
|
||||
47:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
|
@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
|||
li r0, KVM_GUEST_MODE_NONE
|
||||
stb r0, HSTATE_IN_GUEST(r13)
|
||||
|
||||
lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
|
||||
ld r0, SFS+PPC_LR_STKOFF(r1)
|
||||
addi r1, r1, SFS
|
||||
mtlr r0
|
||||
|
|
|
@ -1345,7 +1345,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
|
|||
int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, int is_default_endian)
|
||||
{
|
||||
enum emulation_result emulated;
|
||||
enum emulation_result emulated = EMULATE_DONE;
|
||||
|
||||
while (vcpu->arch.mmio_vmx_copy_nums) {
|
||||
emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
|
||||
|
@ -1608,7 +1608,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
|
||||
kvm_sigset_deactivate(vcpu);
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
out:
|
||||
#endif
|
||||
vcpu_put(vcpu);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -240,6 +240,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
|
|||
* goto out;
|
||||
*/
|
||||
PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
|
||||
PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
|
||||
PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
|
||||
PPC_BCC(COND_GE, out);
|
||||
|
||||
|
|
|
@ -63,6 +63,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
|||
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
|
||||
/* pgd_alloc() did not account this pmd */
|
||||
mm_inc_nr_pmds(mm);
|
||||
mm_inc_nr_puds(mm);
|
||||
}
|
||||
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
|
||||
return 0;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/dwarf.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
@ -230,7 +231,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
|
|||
.hidden \name
|
||||
.type \name,@function
|
||||
\name:
|
||||
.cfi_startproc
|
||||
CFI_STARTPROC
|
||||
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
|
||||
exrl 0,0f
|
||||
#else
|
||||
|
@ -239,7 +240,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
|
|||
#endif
|
||||
j .
|
||||
0: br \reg
|
||||
.cfi_endproc
|
||||
CFI_ENDPROC
|
||||
.endm
|
||||
|
||||
GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
|
||||
|
@ -426,13 +427,13 @@ ENTRY(system_call)
|
|||
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
|
||||
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
# clear user controlled register to prevent speculative use
|
||||
xgr %r0,%r0
|
||||
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
|
||||
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
|
||||
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
|
||||
stg %r14,__PT_FLAGS(%r11)
|
||||
.Lsysc_do_svc:
|
||||
# clear user controlled register to prevent speculative use
|
||||
xgr %r0,%r0
|
||||
# load address of system call table
|
||||
lg %r10,__THREAD_sysc_table(%r13,%r12)
|
||||
llgh %r8,__PT_INT_CODE+2(%r11)
|
||||
|
@ -1439,6 +1440,7 @@ cleanup_critical:
|
|||
stg %r15,__LC_SYSTEM_TIMER
|
||||
0: # update accounting time stamp
|
||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
# set up saved register r11
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
la %r9,STACK_FRAME_OVERHEAD(%r15)
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
#include <linux/module.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF);
|
||||
int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL);
|
||||
int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
|
||||
int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL);
|
||||
|
||||
static int __init nospectre_v2_setup_early(char *str)
|
||||
{
|
||||
|
|
|
@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
|
||||
{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
|
||||
{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
|
||||
{ "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
|
||||
{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
|
||||
{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
|
||||
{ "instruction_gs", VCPU_STAT(instruction_gs) },
|
||||
|
@ -2146,6 +2147,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
|
|||
/* we still need the basic sca for the ipte control */
|
||||
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
|
||||
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
|
||||
return;
|
||||
}
|
||||
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||
if (vcpu->kvm->arch.use_esca) {
|
||||
|
|
|
@ -163,13 +163,10 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
|
|||
pte_unmap(pte);
|
||||
}
|
||||
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
|
||||
static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t orig, pmd_t pmd)
|
||||
{
|
||||
pmd_t orig = *pmdp;
|
||||
|
||||
*pmdp = pmd;
|
||||
|
||||
if (mm == &init_mm)
|
||||
return;
|
||||
|
||||
|
@ -219,6 +216,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|||
}
|
||||
}
|
||||
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
pmd_t orig = *pmdp;
|
||||
|
||||
*pmdp = pmd;
|
||||
__set_pmd_acct(mm, addr, orig, pmd);
|
||||
}
|
||||
|
||||
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
|
@ -227,6 +233,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
|
|||
do {
|
||||
old = *pmdp;
|
||||
} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
|
||||
__set_pmd_acct(vma->vm_mm, address, old, pmd);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
|
|
@ -2307,7 +2307,7 @@ choice
|
|||
it can be used to assist security vulnerability exploitation.
|
||||
|
||||
This setting can be changed at boot time via the kernel command
|
||||
line parameter vsyscall=[native|emulate|none].
|
||||
line parameter vsyscall=[emulate|none].
|
||||
|
||||
On a system with recent enough glibc (2.14 or newer) and no
|
||||
static binaries, you can say None without a performance penalty
|
||||
|
@ -2315,15 +2315,6 @@ choice
|
|||
|
||||
If unsure, select "Emulate".
|
||||
|
||||
config LEGACY_VSYSCALL_NATIVE
|
||||
bool "Native"
|
||||
help
|
||||
Actual executable code is located in the fixed vsyscall
|
||||
address mapping, implementing time() efficiently. Since
|
||||
this makes the mapping executable, it can be used during
|
||||
security vulnerability exploitation (traditionally as
|
||||
ROP gadgets). This configuration is not recommended.
|
||||
|
||||
config LEGACY_VSYSCALL_EMULATE
|
||||
bool "Emulate"
|
||||
help
|
||||
|
|
|
@ -363,9 +363,7 @@ ENTRY(entry_INT80_compat)
|
|||
pushq 2*8(%rdi) /* regs->ip */
|
||||
pushq 1*8(%rdi) /* regs->orig_ax */
|
||||
|
||||
movq (%rdi), %rdi /* restore %rdi */
|
||||
|
||||
pushq %rdi /* pt_regs->di */
|
||||
pushq (%rdi) /* pt_regs->di */
|
||||
pushq %rsi /* pt_regs->si */
|
||||
pushq %rdx /* pt_regs->dx */
|
||||
pushq %rcx /* pt_regs->cx */
|
||||
|
@ -406,15 +404,3 @@ ENTRY(entry_INT80_compat)
|
|||
TRACE_IRQS_ON
|
||||
jmp swapgs_restore_regs_and_return_to_usermode
|
||||
END(entry_INT80_compat)
|
||||
|
||||
ENTRY(stub32_clone)
|
||||
/*
|
||||
* The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
|
||||
* The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
|
||||
*
|
||||
* The native 64-bit kernel's sys_clone() implements the latter,
|
||||
* so we need to swap arguments here before calling it:
|
||||
*/
|
||||
xchg %r8, %rcx
|
||||
jmp sys_clone
|
||||
ENDPROC(stub32_clone)
|
||||
|
|
|
@ -8,12 +8,12 @@
|
|||
#
|
||||
0 i386 restart_syscall sys_restart_syscall
|
||||
1 i386 exit sys_exit
|
||||
2 i386 fork sys_fork sys_fork
|
||||
2 i386 fork sys_fork
|
||||
3 i386 read sys_read
|
||||
4 i386 write sys_write
|
||||
5 i386 open sys_open compat_sys_open
|
||||
6 i386 close sys_close
|
||||
7 i386 waitpid sys_waitpid sys32_waitpid
|
||||
7 i386 waitpid sys_waitpid compat_sys_x86_waitpid
|
||||
8 i386 creat sys_creat
|
||||
9 i386 link sys_link
|
||||
10 i386 unlink sys_unlink
|
||||
|
@ -78,7 +78,7 @@
|
|||
69 i386 ssetmask sys_ssetmask
|
||||
70 i386 setreuid sys_setreuid16
|
||||
71 i386 setregid sys_setregid16
|
||||
72 i386 sigsuspend sys_sigsuspend sys_sigsuspend
|
||||
72 i386 sigsuspend sys_sigsuspend
|
||||
73 i386 sigpending sys_sigpending compat_sys_sigpending
|
||||
74 i386 sethostname sys_sethostname
|
||||
75 i386 setrlimit sys_setrlimit compat_sys_setrlimit
|
||||
|
@ -96,7 +96,7 @@
|
|||
87 i386 swapon sys_swapon
|
||||
88 i386 reboot sys_reboot
|
||||
89 i386 readdir sys_old_readdir compat_sys_old_readdir
|
||||
90 i386 mmap sys_old_mmap sys32_mmap
|
||||
90 i386 mmap sys_old_mmap compat_sys_x86_mmap
|
||||
91 i386 munmap sys_munmap
|
||||
92 i386 truncate sys_truncate compat_sys_truncate
|
||||
93 i386 ftruncate sys_ftruncate compat_sys_ftruncate
|
||||
|
@ -126,7 +126,7 @@
|
|||
117 i386 ipc sys_ipc compat_sys_ipc
|
||||
118 i386 fsync sys_fsync
|
||||
119 i386 sigreturn sys_sigreturn sys32_sigreturn
|
||||
120 i386 clone sys_clone stub32_clone
|
||||
120 i386 clone sys_clone compat_sys_x86_clone
|
||||
121 i386 setdomainname sys_setdomainname
|
||||
122 i386 uname sys_newuname
|
||||
123 i386 modify_ldt sys_modify_ldt
|
||||
|
@ -186,8 +186,8 @@
|
|||
177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
|
||||
178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
|
||||
179 i386 rt_sigsuspend sys_rt_sigsuspend
|
||||
180 i386 pread64 sys_pread64 sys32_pread
|
||||
181 i386 pwrite64 sys_pwrite64 sys32_pwrite
|
||||
180 i386 pread64 sys_pread64 compat_sys_x86_pread
|
||||
181 i386 pwrite64 sys_pwrite64 compat_sys_x86_pwrite
|
||||
182 i386 chown sys_chown16
|
||||
183 i386 getcwd sys_getcwd
|
||||
184 i386 capget sys_capget
|
||||
|
@ -196,14 +196,14 @@
|
|||
187 i386 sendfile sys_sendfile compat_sys_sendfile
|
||||
188 i386 getpmsg
|
||||
189 i386 putpmsg
|
||||
190 i386 vfork sys_vfork sys_vfork
|
||||
190 i386 vfork sys_vfork
|
||||
191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit
|
||||
192 i386 mmap2 sys_mmap_pgoff
|
||||
193 i386 truncate64 sys_truncate64 sys32_truncate64
|
||||
194 i386 ftruncate64 sys_ftruncate64 sys32_ftruncate64
|
||||
195 i386 stat64 sys_stat64 sys32_stat64
|
||||
196 i386 lstat64 sys_lstat64 sys32_lstat64
|
||||
197 i386 fstat64 sys_fstat64 sys32_fstat64
|
||||
193 i386 truncate64 sys_truncate64 compat_sys_x86_truncate64
|
||||
194 i386 ftruncate64 sys_ftruncate64 compat_sys_x86_ftruncate64
|
||||
195 i386 stat64 sys_stat64 compat_sys_x86_stat64
|
||||
196 i386 lstat64 sys_lstat64 compat_sys_x86_lstat64
|
||||
197 i386 fstat64 sys_fstat64 compat_sys_x86_fstat64
|
||||
198 i386 lchown32 sys_lchown
|
||||
199 i386 getuid32 sys_getuid
|
||||
200 i386 getgid32 sys_getgid
|
||||
|
@ -231,7 +231,7 @@
|
|||
# 222 is unused
|
||||
# 223 is unused
|
||||
224 i386 gettid sys_gettid
|
||||
225 i386 readahead sys_readahead sys32_readahead
|
||||
225 i386 readahead sys_readahead compat_sys_x86_readahead
|
||||
226 i386 setxattr sys_setxattr
|
||||
227 i386 lsetxattr sys_lsetxattr
|
||||
228 i386 fsetxattr sys_fsetxattr
|
||||
|
@ -256,7 +256,7 @@
|
|||
247 i386 io_getevents sys_io_getevents compat_sys_io_getevents
|
||||
248 i386 io_submit sys_io_submit compat_sys_io_submit
|
||||
249 i386 io_cancel sys_io_cancel
|
||||
250 i386 fadvise64 sys_fadvise64 sys32_fadvise64
|
||||
250 i386 fadvise64 sys_fadvise64 compat_sys_x86_fadvise64
|
||||
# 251 is available for reuse (was briefly sys_set_zone_reclaim)
|
||||
252 i386 exit_group sys_exit_group
|
||||
253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
|
||||
|
@ -278,7 +278,7 @@
|
|||
269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
|
||||
270 i386 tgkill sys_tgkill
|
||||
271 i386 utimes sys_utimes compat_sys_utimes
|
||||
272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64
|
||||
272 i386 fadvise64_64 sys_fadvise64_64 compat_sys_x86_fadvise64_64
|
||||
273 i386 vserver
|
||||
274 i386 mbind sys_mbind
|
||||
275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
|
||||
|
@ -306,7 +306,7 @@
|
|||
297 i386 mknodat sys_mknodat
|
||||
298 i386 fchownat sys_fchownat
|
||||
299 i386 futimesat sys_futimesat compat_sys_futimesat
|
||||
300 i386 fstatat64 sys_fstatat64 sys32_fstatat
|
||||
300 i386 fstatat64 sys_fstatat64 compat_sys_x86_fstatat
|
||||
301 i386 unlinkat sys_unlinkat
|
||||
302 i386 renameat sys_renameat
|
||||
303 i386 linkat sys_linkat
|
||||
|
@ -320,7 +320,7 @@
|
|||
311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list
|
||||
312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list
|
||||
313 i386 splice sys_splice
|
||||
314 i386 sync_file_range sys_sync_file_range sys32_sync_file_range
|
||||
314 i386 sync_file_range sys_sync_file_range compat_sys_x86_sync_file_range
|
||||
315 i386 tee sys_tee
|
||||
316 i386 vmsplice sys_vmsplice compat_sys_vmsplice
|
||||
317 i386 move_pages sys_move_pages compat_sys_move_pages
|
||||
|
@ -330,7 +330,7 @@
|
|||
321 i386 signalfd sys_signalfd compat_sys_signalfd
|
||||
322 i386 timerfd_create sys_timerfd_create
|
||||
323 i386 eventfd sys_eventfd
|
||||
324 i386 fallocate sys_fallocate sys32_fallocate
|
||||
324 i386 fallocate sys_fallocate compat_sys_x86_fallocate
|
||||
325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
|
||||
326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
|
||||
327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4
|
||||
|
|
|
@ -42,10 +42,8 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include "vsyscall_trace.h"
|
||||
|
||||
static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
|
||||
#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
|
||||
NATIVE;
|
||||
#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
|
||||
static enum { EMULATE, NONE } vsyscall_mode =
|
||||
#ifdef CONFIG_LEGACY_VSYSCALL_NONE
|
||||
NONE;
|
||||
#else
|
||||
EMULATE;
|
||||
|
@ -56,8 +54,6 @@ static int __init vsyscall_setup(char *str)
|
|||
if (str) {
|
||||
if (!strcmp("emulate", str))
|
||||
vsyscall_mode = EMULATE;
|
||||
else if (!strcmp("native", str))
|
||||
vsyscall_mode = NATIVE;
|
||||
else if (!strcmp("none", str))
|
||||
vsyscall_mode = NONE;
|
||||
else
|
||||
|
@ -139,10 +135,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|||
|
||||
WARN_ON_ONCE(address != regs->ip);
|
||||
|
||||
/* This should be unreachable in NATIVE mode. */
|
||||
if (WARN_ON(vsyscall_mode == NATIVE))
|
||||
return false;
|
||||
|
||||
if (vsyscall_mode == NONE) {
|
||||
warn_bad_vsyscall(KERN_INFO, regs,
|
||||
"vsyscall attempted with vsyscall=none");
|
||||
|
@ -370,9 +362,7 @@ void __init map_vsyscall(void)
|
|||
|
||||
if (vsyscall_mode != NONE) {
|
||||
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
|
||||
vsyscall_mode == NATIVE
|
||||
? PAGE_KERNEL_VSYSCALL
|
||||
: PAGE_KERNEL_VVAR);
|
||||
PAGE_KERNEL_VVAR);
|
||||
set_vsyscall_pgtable_user_bits(swapper_pg_dir);
|
||||
}
|
||||
|
||||
|
|
|
@ -51,15 +51,14 @@
|
|||
#define AA(__x) ((unsigned long)(__x))
|
||||
|
||||
|
||||
asmlinkage long sys32_truncate64(const char __user *filename,
|
||||
unsigned long offset_low,
|
||||
unsigned long offset_high)
|
||||
COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename,
|
||||
unsigned long, offset_low, unsigned long, offset_high)
|
||||
{
|
||||
return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
|
||||
unsigned long offset_high)
|
||||
COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd,
|
||||
unsigned long, offset_low, unsigned long, offset_high)
|
||||
{
|
||||
return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
|
||||
}
|
||||
|
@ -96,8 +95,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
|
|||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_stat64(const char __user *filename,
|
||||
struct stat64 __user *statbuf)
|
||||
COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename,
|
||||
struct stat64 __user *, statbuf)
|
||||
{
|
||||
struct kstat stat;
|
||||
int ret = vfs_stat(filename, &stat);
|
||||
|
@ -107,8 +106,8 @@ asmlinkage long sys32_stat64(const char __user *filename,
|
|||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_lstat64(const char __user *filename,
|
||||
struct stat64 __user *statbuf)
|
||||
COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename,
|
||||
struct stat64 __user *, statbuf)
|
||||
{
|
||||
struct kstat stat;
|
||||
int ret = vfs_lstat(filename, &stat);
|
||||
|
@ -117,7 +116,8 @@ asmlinkage long sys32_lstat64(const char __user *filename,
|
|||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
|
||||
COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd,
|
||||
struct stat64 __user *, statbuf)
|
||||
{
|
||||
struct kstat stat;
|
||||
int ret = vfs_fstat(fd, &stat);
|
||||
|
@ -126,8 +126,9 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
|
|||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename,
|
||||
struct stat64 __user *statbuf, int flag)
|
||||
COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd,
|
||||
const char __user *, filename,
|
||||
struct stat64 __user *, statbuf, int, flag)
|
||||
{
|
||||
struct kstat stat;
|
||||
int error;
|
||||
|
@ -153,7 +154,7 @@ struct mmap_arg_struct32 {
|
|||
unsigned int offset;
|
||||
};
|
||||
|
||||
asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
|
||||
COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg)
|
||||
{
|
||||
struct mmap_arg_struct32 a;
|
||||
|
||||
|
@ -167,22 +168,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
|
|||
a.offset>>PAGE_SHIFT);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
|
||||
int options)
|
||||
COMPAT_SYSCALL_DEFINE3(x86_waitpid, compat_pid_t, pid, unsigned int __user *,
|
||||
stat_addr, int, options)
|
||||
{
|
||||
return compat_sys_wait4(pid, stat_addr, options, NULL);
|
||||
}
|
||||
|
||||
/* warning: next two assume little endian */
|
||||
asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
|
||||
u32 poslo, u32 poshi)
|
||||
COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf,
|
||||
u32, count, u32, poslo, u32, poshi)
|
||||
{
|
||||
return sys_pread64(fd, ubuf, count,
|
||||
((loff_t)AA(poshi) << 32) | AA(poslo));
|
||||
}
|
||||
|
||||
asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
|
||||
u32 count, u32 poslo, u32 poshi)
|
||||
COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf,
|
||||
u32, count, u32, poslo, u32, poshi)
|
||||
{
|
||||
return sys_pwrite64(fd, ubuf, count,
|
||||
((loff_t)AA(poshi) << 32) | AA(poslo));
|
||||
|
@ -193,8 +194,9 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
|
|||
* Some system calls that need sign extended arguments. This could be
|
||||
* done by a generic wrapper.
|
||||
*/
|
||||
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
|
||||
__u32 len_low, __u32 len_high, int advice)
|
||||
COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low,
|
||||
__u32, offset_high, __u32, len_low, __u32, len_high,
|
||||
int, advice)
|
||||
{
|
||||
return sys_fadvise64_64(fd,
|
||||
(((u64)offset_high)<<32) | offset_low,
|
||||
|
@ -202,31 +204,43 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
|
|||
advice);
|
||||
}
|
||||
|
||||
asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
|
||||
size_t count)
|
||||
COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo,
|
||||
unsigned int, off_hi, size_t, count)
|
||||
{
|
||||
return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
|
||||
unsigned n_low, unsigned n_hi, int flags)
|
||||
COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low,
|
||||
unsigned int, off_hi, unsigned int, n_low,
|
||||
unsigned int, n_hi, int, flags)
|
||||
{
|
||||
return sys_sync_file_range(fd,
|
||||
((u64)off_hi << 32) | off_low,
|
||||
((u64)n_hi << 32) | n_low, flags);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
|
||||
size_t len, int advice)
|
||||
COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo,
|
||||
unsigned int, offset_hi, size_t, len, int, advice)
|
||||
{
|
||||
return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
|
||||
len, advice);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
|
||||
unsigned offset_hi, unsigned len_lo,
|
||||
unsigned len_hi)
|
||||
COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
|
||||
unsigned int, offset_lo, unsigned int, offset_hi,
|
||||
unsigned int, len_lo, unsigned int, len_hi)
|
||||
{
|
||||
return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
|
||||
((u64)len_hi << 32) | len_lo);
|
||||
}
|
||||
|
||||
/*
|
||||
* The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS
|
||||
*/
|
||||
COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
|
||||
unsigned long, newsp, int __user *, parent_tidptr,
|
||||
unsigned long, tls_val, int __user *, child_tidptr)
|
||||
{
|
||||
return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr,
|
||||
tls_val);
|
||||
}
|
||||
|
|
|
@ -316,6 +316,7 @@
|
|||
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
|
||||
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
|
||||
#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
|
||||
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
|
||||
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
|
||||
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
|
||||
|
@ -328,6 +329,7 @@
|
|||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||
|
|
|
@ -39,6 +39,7 @@ struct device;
|
|||
|
||||
enum ucode_state {
|
||||
UCODE_OK = 0,
|
||||
UCODE_NEW,
|
||||
UCODE_UPDATED,
|
||||
UCODE_NFOUND,
|
||||
UCODE_ERROR,
|
||||
|
|
|
@ -183,7 +183,10 @@
|
|||
* otherwise we'll run out of registers. We don't care about CET
|
||||
* here, anyway.
|
||||
*/
|
||||
# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
|
||||
# define CALL_NOSPEC \
|
||||
ALTERNATIVE( \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
" jmp 904f;\n" \
|
||||
" .align 16\n" \
|
||||
"901: call 903f;\n" \
|
||||
|
|
|
@ -174,7 +174,6 @@ enum page_cache_mode {
|
|||
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
|
||||
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
|
||||
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
|
||||
#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
|
||||
#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
|
||||
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
|
||||
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
|
||||
|
@ -206,7 +205,6 @@ enum page_cache_mode {
|
|||
#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
|
||||
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
|
||||
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
|
||||
#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
|
||||
#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
|
||||
|
||||
#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
|
||||
|
|
|
@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[];
|
|||
|
||||
#if defined(CONFIG_X86_64)
|
||||
extern char __end_rodata_hpage_align[];
|
||||
extern char __entry_trampoline_start[], __entry_trampoline_end[];
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_SECTIONS_H */
|
||||
|
|
|
@ -20,31 +20,43 @@
|
|||
#include <asm/ia32.h>
|
||||
|
||||
/* ia32/sys_ia32.c */
|
||||
asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long);
|
||||
asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long);
|
||||
asmlinkage long compat_sys_x86_truncate64(const char __user *, unsigned long,
|
||||
unsigned long);
|
||||
asmlinkage long compat_sys_x86_ftruncate64(unsigned int, unsigned long,
|
||||
unsigned long);
|
||||
|
||||
asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *);
|
||||
asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *);
|
||||
asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *);
|
||||
asmlinkage long sys32_fstatat(unsigned int, const char __user *,
|
||||
asmlinkage long compat_sys_x86_stat64(const char __user *,
|
||||
struct stat64 __user *);
|
||||
asmlinkage long compat_sys_x86_lstat64(const char __user *,
|
||||
struct stat64 __user *);
|
||||
asmlinkage long compat_sys_x86_fstat64(unsigned int, struct stat64 __user *);
|
||||
asmlinkage long compat_sys_x86_fstatat(unsigned int, const char __user *,
|
||||
struct stat64 __user *, int);
|
||||
struct mmap_arg_struct32;
|
||||
asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *);
|
||||
asmlinkage long compat_sys_x86_mmap(struct mmap_arg_struct32 __user *);
|
||||
|
||||
asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
|
||||
asmlinkage long compat_sys_x86_waitpid(compat_pid_t, unsigned int __user *,
|
||||
int);
|
||||
|
||||
asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32);
|
||||
asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
|
||||
asmlinkage long compat_sys_x86_pread(unsigned int, char __user *, u32, u32,
|
||||
u32);
|
||||
asmlinkage long compat_sys_x86_pwrite(unsigned int, const char __user *, u32,
|
||||
u32, u32);
|
||||
|
||||
long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
|
||||
long sys32_vm86_warning(void);
|
||||
asmlinkage long compat_sys_x86_fadvise64_64(int, __u32, __u32, __u32, __u32,
|
||||
int);
|
||||
|
||||
asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
|
||||
asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
|
||||
unsigned, unsigned, int);
|
||||
asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int);
|
||||
asmlinkage long sys32_fallocate(int, int, unsigned,
|
||||
unsigned, unsigned, unsigned);
|
||||
asmlinkage ssize_t compat_sys_x86_readahead(int, unsigned int, unsigned int,
|
||||
size_t);
|
||||
asmlinkage long compat_sys_x86_sync_file_range(int, unsigned int, unsigned int,
|
||||
unsigned int, unsigned int,
|
||||
int);
|
||||
asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int,
|
||||
size_t, int);
|
||||
asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int,
|
||||
unsigned int, unsigned int);
|
||||
asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *,
|
||||
unsigned long, int __user *);
|
||||
|
||||
/* ia32/ia32_signal.c */
|
||||
asmlinkage long sys32_sigreturn(void);
|
||||
|
|
|
@ -30,6 +30,7 @@ struct mce {
|
|||
__u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
|
||||
__u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
|
||||
__u64 ppin; /* Protected Processor Inventory Number */
|
||||
__u32 microcode;/* Microcode revision */
|
||||
};
|
||||
|
||||
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
|
||||
|
|
|
@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
|
|||
/*
|
||||
* Early microcode releases for the Spectre v2 mitigation were broken.
|
||||
* Information taken from;
|
||||
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
|
||||
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
|
||||
* - https://kb.vmware.com/s/article/52345
|
||||
* - Microcode revisions observed in the wild
|
||||
* - Release note from 20180108 microcode release
|
||||
|
@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
|
|||
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
|
||||
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
|
||||
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
|
||||
{ INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
|
||||
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
|
||||
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
|
||||
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
|
||||
|
@ -144,6 +143,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
|
|||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We know that the hypervisor lie to us on the microcode version so
|
||||
* we may as well hope that it is running the correct version.
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
|
||||
if (c->x86_model == spectre_bad_microcodes[i].model &&
|
||||
c->x86_stepping == spectre_bad_microcodes[i].stepping)
|
||||
|
|
|
@ -56,6 +56,9 @@
|
|||
|
||||
static DEFINE_MUTEX(mce_log_mutex);
|
||||
|
||||
/* sysfs synchronization */
|
||||
static DEFINE_MUTEX(mce_sysfs_mutex);
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/mce.h>
|
||||
|
||||
|
@ -130,6 +133,8 @@ void mce_setup(struct mce *m)
|
|||
|
||||
if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
|
||||
rdmsrl(MSR_PPIN, m->ppin);
|
||||
|
||||
m->microcode = boot_cpu_data.microcode;
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU(struct mce, injectm);
|
||||
|
@ -262,7 +267,7 @@ static void __print_mce(struct mce *m)
|
|||
*/
|
||||
pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
|
||||
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
|
||||
cpu_data(m->extcpu).microcode);
|
||||
m->microcode);
|
||||
}
|
||||
|
||||
static void print_mce(struct mce *m)
|
||||
|
@ -2086,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s,
|
|||
if (kstrtou64(buf, 0, &new) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&mce_sysfs_mutex);
|
||||
if (mca_cfg.ignore_ce ^ !!new) {
|
||||
if (new) {
|
||||
/* disable ce features */
|
||||
|
@ -2098,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s,
|
|||
on_each_cpu(mce_enable_ce, (void *)1, 1);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mce_sysfs_mutex);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -2110,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s,
|
|||
if (kstrtou64(buf, 0, &new) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&mce_sysfs_mutex);
|
||||
if (mca_cfg.cmci_disabled ^ !!new) {
|
||||
if (new) {
|
||||
/* disable cmci */
|
||||
|
@ -2121,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s,
|
|||
on_each_cpu(mce_enable_ce, NULL, 1);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mce_sysfs_mutex);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -2128,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s,
|
|||
struct device_attribute *attr,
|
||||
const char *buf, size_t size)
|
||||
{
|
||||
ssize_t ret = device_store_int(s, attr, buf, size);
|
||||
unsigned long old_check_interval = check_interval;
|
||||
ssize_t ret = device_store_ulong(s, attr, buf, size);
|
||||
|
||||
if (check_interval == old_check_interval)
|
||||
return ret;
|
||||
|
||||
if (check_interval < 1)
|
||||
check_interval = 1;
|
||||
|
||||
mutex_lock(&mce_sysfs_mutex);
|
||||
mce_restart();
|
||||
mutex_unlock(&mce_sysfs_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
|||
return -EINVAL;
|
||||
|
||||
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
if (ret != UCODE_OK)
|
||||
if (ret > UCODE_UPDATED)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
|||
static enum ucode_state
|
||||
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
struct ucode_patch *p;
|
||||
enum ucode_state ret;
|
||||
|
||||
/* free old equiv table */
|
||||
free_equiv_cpu_table();
|
||||
|
||||
ret = __load_microcode_amd(family, data, size);
|
||||
|
||||
if (ret != UCODE_OK)
|
||||
if (ret != UCODE_OK) {
|
||||
cleanup();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* save BSP's matching patch for early load */
|
||||
if (save) {
|
||||
struct ucode_patch *p = find_patch(0);
|
||||
if (p) {
|
||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
|
||||
PATCH_MAX_SIZE));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
p = find_patch(0);
|
||||
if (!p) {
|
||||
return ret;
|
||||
} else {
|
||||
if (boot_cpu_data.microcode == p->patch_id)
|
||||
return ret;
|
||||
|
||||
ret = UCODE_NEW;
|
||||
}
|
||||
|
||||
/* save BSP's matching patch for early load */
|
||||
if (!save)
|
||||
return ret;
|
||||
|
||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,13 +22,16 @@
|
|||
#define pr_fmt(fmt) "microcode: " fmt
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
|
@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
|
|||
*/
|
||||
static DEFINE_MUTEX(microcode_mutex);
|
||||
|
||||
/*
|
||||
* Serialize late loading so that CPUs get updated one-by-one.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(update_lock);
|
||||
|
||||
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
|
||||
|
||||
struct cpu_info_ctx {
|
||||
|
@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct apply_microcode_ctx {
|
||||
enum ucode_state err;
|
||||
};
|
||||
|
||||
static void apply_microcode_local(void *arg)
|
||||
{
|
||||
struct apply_microcode_ctx *ctx = arg;
|
||||
enum ucode_state *err = arg;
|
||||
|
||||
ctx->err = microcode_ops->apply_microcode(smp_processor_id());
|
||||
*err = microcode_ops->apply_microcode(smp_processor_id());
|
||||
}
|
||||
|
||||
static int apply_microcode_on_target(int cpu)
|
||||
{
|
||||
struct apply_microcode_ctx ctx = { .err = 0 };
|
||||
enum ucode_state err;
|
||||
int ret;
|
||||
|
||||
ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
|
||||
if (!ret)
|
||||
ret = ctx.err;
|
||||
|
||||
ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
|
||||
if (!ret) {
|
||||
if (err == UCODE_ERROR)
|
||||
ret = 1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -489,19 +494,114 @@ static void __exit microcode_dev_exit(void)
|
|||
/* fake device for request_firmware */
|
||||
static struct platform_device *microcode_pdev;
|
||||
|
||||
static enum ucode_state reload_for_cpu(int cpu)
|
||||
/*
|
||||
* Late loading dance. Why the heavy-handed stomp_machine effort?
|
||||
*
|
||||
* - HT siblings must be idle and not execute other code while the other sibling
|
||||
* is loading microcode in order to avoid any negative interactions caused by
|
||||
* the loading.
|
||||
*
|
||||
* - In addition, microcode update on the cores must be serialized until this
|
||||
* requirement can be relaxed in the future. Right now, this is conservative
|
||||
* and good.
|
||||
*/
|
||||
#define SPINUNIT 100 /* 100 nsec */
|
||||
|
||||
static int check_online_cpus(void)
|
||||
{
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
enum ucode_state ustate;
|
||||
if (num_online_cpus() == num_present_cpus())
|
||||
return 0;
|
||||
|
||||
if (!uci->valid)
|
||||
return UCODE_OK;
|
||||
pr_err("Not all CPUs online, aborting microcode update.\n");
|
||||
|
||||
ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true);
|
||||
if (ustate != UCODE_OK)
|
||||
return ustate;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return apply_microcode_on_target(cpu);
|
||||
static atomic_t late_cpus_in;
|
||||
static atomic_t late_cpus_out;
|
||||
|
||||
static int __wait_for_cpus(atomic_t *t, long long timeout)
|
||||
{
|
||||
int all_cpus = num_online_cpus();
|
||||
|
||||
atomic_inc(t);
|
||||
|
||||
while (atomic_read(t) < all_cpus) {
|
||||
if (timeout < SPINUNIT) {
|
||||
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
|
||||
all_cpus - atomic_read(t));
|
||||
return 1;
|
||||
}
|
||||
|
||||
ndelay(SPINUNIT);
|
||||
timeout -= SPINUNIT;
|
||||
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns:
|
||||
* < 0 - on error
|
||||
* 0 - no update done
|
||||
* 1 - microcode was updated
|
||||
*/
|
||||
static int __reload_late(void *info)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
enum ucode_state err;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Wait for all CPUs to arrive. A load will not be attempted unless all
|
||||
* CPUs show up.
|
||||
* */
|
||||
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
|
||||
return -1;
|
||||
|
||||
spin_lock(&update_lock);
|
||||
apply_microcode_local(&err);
|
||||
spin_unlock(&update_lock);
|
||||
|
||||
if (err > UCODE_NFOUND) {
|
||||
pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
||||
return -1;
|
||||
/* siblings return UCODE_OK because their engine got updated already */
|
||||
} else if (err == UCODE_UPDATED || err == UCODE_OK) {
|
||||
ret = 1;
|
||||
} else {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the wait timeout to a safe value here since we're
|
||||
* serializing the microcode update and that could take a while on a
|
||||
* large number of CPUs. And that is fine as the *actual* timeout will
|
||||
* be determined by the last CPU finished updating and thus cut short.
|
||||
*/
|
||||
if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
|
||||
panic("Timeout during microcode update!\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reload microcode late on all CPUs. Wait for a sec until they
|
||||
* all gather together.
|
||||
*/
|
||||
static int microcode_reload_late(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
atomic_set(&late_cpus_in, 0);
|
||||
atomic_set(&late_cpus_out, 0);
|
||||
|
||||
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
||||
if (ret > 0)
|
||||
microcode_check();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t reload_store(struct device *dev,
|
||||
|
@ -509,10 +609,9 @@ static ssize_t reload_store(struct device *dev,
|
|||
const char *buf, size_t size)
|
||||
{
|
||||
enum ucode_state tmp_ret = UCODE_OK;
|
||||
bool do_callback = false;
|
||||
int bsp = boot_cpu_data.cpu_index;
|
||||
unsigned long val;
|
||||
ssize_t ret = 0;
|
||||
int cpu;
|
||||
|
||||
ret = kstrtoul(buf, 0, &val);
|
||||
if (ret)
|
||||
|
@ -521,29 +620,24 @@ static ssize_t reload_store(struct device *dev,
|
|||
if (val != 1)
|
||||
return size;
|
||||
|
||||
tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true);
|
||||
if (tmp_ret != UCODE_NEW)
|
||||
return size;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
ret = check_online_cpus();
|
||||
if (ret)
|
||||
goto put;
|
||||
|
||||
mutex_lock(µcode_mutex);
|
||||
for_each_online_cpu(cpu) {
|
||||
tmp_ret = reload_for_cpu(cpu);
|
||||
if (tmp_ret > UCODE_NFOUND) {
|
||||
pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
||||
|
||||
/* set retval for the first encountered reload error */
|
||||
if (!ret)
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (tmp_ret == UCODE_UPDATED)
|
||||
do_callback = true;
|
||||
}
|
||||
|
||||
if (!ret && do_callback)
|
||||
microcode_check();
|
||||
|
||||
ret = microcode_reload_late();
|
||||
mutex_unlock(µcode_mutex);
|
||||
|
||||
put:
|
||||
put_online_cpus();
|
||||
|
||||
if (!ret)
|
||||
if (ret >= 0)
|
||||
ret = size;
|
||||
|
||||
return ret;
|
||||
|
@ -611,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
|
|||
if (system_state != SYSTEM_RUNNING)
|
||||
return UCODE_NFOUND;
|
||||
|
||||
ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev,
|
||||
refresh_fw);
|
||||
|
||||
if (ustate == UCODE_OK) {
|
||||
ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw);
|
||||
if (ustate == UCODE_NEW) {
|
||||
pr_debug("CPU%d updated upon init\n", cpu);
|
||||
apply_microcode_on_target(cpu);
|
||||
}
|
||||
|
|
|
@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
|
|||
if (!mc)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Save us the MSR write below - which is a particular expensive
|
||||
* operation - when the other hyperthread has updated the microcode
|
||||
* already.
|
||||
*/
|
||||
rev = intel_get_microcode_revision();
|
||||
if (rev >= mc->hdr.rev) {
|
||||
uci->cpu_sig.rev = rev;
|
||||
return UCODE_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Writeback and invalidate caches before updating microcode to avoid
|
||||
* internal issues depending on what the microcode is updating.
|
||||
*/
|
||||
native_wbinvd();
|
||||
|
||||
/* write microcode via MSR 0x79 */
|
||||
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
|
||||
|
||||
|
@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
|
|||
|
||||
static enum ucode_state apply_microcode_intel(int cpu)
|
||||
{
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
struct microcode_intel *mc;
|
||||
struct ucode_cpu_info *uci;
|
||||
struct cpuinfo_x86 *c;
|
||||
static int prev_rev;
|
||||
u32 rev;
|
||||
|
||||
|
@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu)
|
|||
if (WARN_ON(raw_smp_processor_id() != cpu))
|
||||
return UCODE_ERROR;
|
||||
|
||||
uci = ucode_cpu_info + cpu;
|
||||
mc = uci->mc;
|
||||
/* Look for a newer patch in our cache: */
|
||||
mc = find_patch(uci);
|
||||
if (!mc) {
|
||||
/* Look for a newer patch in our cache: */
|
||||
mc = find_patch(uci);
|
||||
mc = uci->mc;
|
||||
if (!mc)
|
||||
return UCODE_NFOUND;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save us the MSR write below - which is a particular expensive
|
||||
* operation - when the other hyperthread has updated the microcode
|
||||
* already.
|
||||
*/
|
||||
rev = intel_get_microcode_revision();
|
||||
if (rev >= mc->hdr.rev) {
|
||||
uci->cpu_sig.rev = rev;
|
||||
c->microcode = rev;
|
||||
return UCODE_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Writeback and invalidate caches before updating microcode to avoid
|
||||
* internal issues depending on what the microcode is updating.
|
||||
*/
|
||||
native_wbinvd();
|
||||
|
||||
/* write microcode via MSR 0x79 */
|
||||
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
|
||||
|
||||
|
@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
|
|||
prev_rev = rev;
|
||||
}
|
||||
|
||||
c = &cpu_data(cpu);
|
||||
|
||||
uci->cpu_sig.rev = rev;
|
||||
c->microcode = rev;
|
||||
|
||||
|
@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
unsigned int leftover = size;
|
||||
unsigned int curr_mc_size = 0, new_mc_size = 0;
|
||||
unsigned int csig, cpf;
|
||||
enum ucode_state ret = UCODE_OK;
|
||||
|
||||
while (leftover) {
|
||||
struct microcode_header_intel mc_header;
|
||||
|
@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
new_mc = mc;
|
||||
new_mc_size = mc_size;
|
||||
mc = NULL; /* trigger new vmalloc */
|
||||
ret = UCODE_NEW;
|
||||
}
|
||||
|
||||
ucode_ptr += mc_size;
|
||||
|
@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
||||
cpu, new_rev, uci->cpu_sig.rev);
|
||||
|
||||
return UCODE_OK;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_ucode_fw(void *to, const void *from, size_t n)
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
/*
|
||||
* this changes the io permissions bitmap in the current task.
|
||||
*/
|
||||
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
|
||||
SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
|
||||
{
|
||||
struct thread_struct *t = ¤t->thread;
|
||||
struct tss_struct *tss;
|
||||
|
|
|
@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
|
|||
|
||||
bool arch_within_kprobe_blacklist(unsigned long addr)
|
||||
{
|
||||
bool is_in_entry_trampoline_section = false;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
is_in_entry_trampoline_section =
|
||||
(addr >= (unsigned long)__entry_trampoline_start &&
|
||||
addr < (unsigned long)__entry_trampoline_end);
|
||||
#endif
|
||||
return (addr >= (unsigned long)__kprobes_text_start &&
|
||||
addr < (unsigned long)__kprobes_text_end) ||
|
||||
(addr >= (unsigned long)__entry_text_start &&
|
||||
addr < (unsigned long)__entry_text_end);
|
||||
addr < (unsigned long)__entry_text_end) ||
|
||||
is_in_entry_trampoline_section;
|
||||
}
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
|
|
|
@ -43,6 +43,13 @@ static inline void signal_compat_build_tests(void)
|
|||
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
|
||||
#define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_code) != 8);
|
||||
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_signo) != 0);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_errno) != 4);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_code) != 8);
|
||||
/*
|
||||
* Ensure that the size of each si_field never changes.
|
||||
* If it does, it is a sign that the
|
||||
|
@ -63,36 +70,94 @@ static inline void signal_compat_build_tests(void)
|
|||
CHECK_CSI_SIZE (_kill, 2*sizeof(int));
|
||||
CHECK_SI_SIZE (_kill, 2*sizeof(int));
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0xC);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
|
||||
|
||||
CHECK_CSI_OFFSET(_timer);
|
||||
CHECK_CSI_SIZE (_timer, 3*sizeof(int));
|
||||
CHECK_SI_SIZE (_timer, 6*sizeof(int));
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x14);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_tid) != 0x0C);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_overrun) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14);
|
||||
|
||||
CHECK_CSI_OFFSET(_rt);
|
||||
CHECK_CSI_SIZE (_rt, 3*sizeof(int));
|
||||
CHECK_SI_SIZE (_rt, 4*sizeof(int));
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14);
|
||||
|
||||
CHECK_CSI_OFFSET(_sigchld);
|
||||
CHECK_CSI_SIZE (_sigchld, 5*sizeof(int));
|
||||
CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x18);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_utime) != 0x20);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x28);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_status) != 0x14);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_utime) != 0x18);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_stime) != 0x1C);
|
||||
|
||||
#ifdef CONFIG_X86_X32_ABI
|
||||
CHECK_CSI_OFFSET(_sigchld_x32);
|
||||
CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int));
|
||||
/* no _sigchld_x32 in the generic siginfo_t */
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) != 0x18);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) != 0x20);
|
||||
#endif
|
||||
|
||||
CHECK_CSI_OFFSET(_sigfault);
|
||||
CHECK_CSI_SIZE (_sigfault, 4*sizeof(int));
|
||||
CHECK_SI_SIZE (_sigfault, 8*sizeof(int));
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x20);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x28);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_lower) != 0x14);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_upper) != 0x18);
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
|
||||
|
||||
CHECK_CSI_OFFSET(_sigpoll);
|
||||
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
|
||||
CHECK_SI_SIZE (_sigpoll, 4*sizeof(int));
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x18);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_band) != 0x0C);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_fd) != 0x10);
|
||||
|
||||
CHECK_CSI_OFFSET(_sigsys);
|
||||
CHECK_CSI_SIZE (_sigsys, 3*sizeof(int));
|
||||
CHECK_SI_SIZE (_sigsys, 4*sizeof(int));
|
||||
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_syscall) != 0x18);
|
||||
BUILD_BUG_ON(offsetof(siginfo_t, si_arch) != 0x1C);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_call_addr) != 0x0C);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_syscall) != 0x10);
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_arch) != 0x14);
|
||||
|
||||
/* any new si_fields should be added here */
|
||||
}
|
||||
|
||||
|
|
|
@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
|
|||
return;
|
||||
|
||||
check_vip:
|
||||
if (VEFLAGS & X86_EFLAGS_VIP) {
|
||||
if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
|
||||
(X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
|
||||
save_v86_state(regs, VM86_STI);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -118,9 +118,11 @@ SECTIONS
|
|||
|
||||
#ifdef CONFIG_X86_64
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
VMLINUX_SYMBOL(__entry_trampoline_start) = .;
|
||||
_entry_trampoline = .;
|
||||
*(.entry_trampoline)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
VMLINUX_SYMBOL(__entry_trampoline_end) = .;
|
||||
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
|
||||
#endif
|
||||
|
||||
|
|
|
@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
else
|
||||
pte_access &= ~ACC_WRITE_MASK;
|
||||
|
||||
if (!kvm_is_mmio_pfn(pfn))
|
||||
spte |= shadow_me_mask;
|
||||
|
||||
spte |= (u64)pfn << PAGE_SHIFT;
|
||||
spte |= shadow_me_mask;
|
||||
|
||||
if (pte_access & ACC_WRITE_MASK) {
|
||||
|
||||
|
|
|
@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address)
|
|||
if (!pmd_k)
|
||||
return -1;
|
||||
|
||||
if (pmd_huge(*pmd_k))
|
||||
if (pmd_large(*pmd_k))
|
||||
return 0;
|
||||
|
||||
pte_k = pte_offset_kernel(pmd_k, address);
|
||||
|
@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address)
|
|||
if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
|
||||
BUG();
|
||||
|
||||
if (pud_huge(*pud))
|
||||
if (pud_large(*pud))
|
||||
return 0;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address)
|
|||
if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
|
||||
BUG();
|
||||
|
||||
if (pmd_huge(*pmd))
|
||||
if (pmd_large(*pmd))
|
||||
return 0;
|
||||
|
||||
pte_ref = pte_offset_kernel(pmd_ref, address);
|
||||
|
|
|
@ -332,7 +332,7 @@ static void __init pti_clone_user_shared(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Clone the ESPFIX P4D into the user space visinble page table
|
||||
* Clone the ESPFIX P4D into the user space visible page table
|
||||
*/
|
||||
static void __init pti_setup_espfix64(void)
|
||||
{
|
||||
|
|
|
@ -97,7 +97,7 @@ static struct img_ascii_lcd_config boston_config = {
|
|||
static void malta_update(struct img_ascii_lcd_ctx *ctx)
|
||||
{
|
||||
unsigned int i;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
for (i = 0; i < ctx->cfg->num_chars; i++) {
|
||||
err = regmap_write(ctx->regmap,
|
||||
|
@ -180,7 +180,7 @@ static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
|
|||
static void sead3_update(struct img_ascii_lcd_ctx *ctx)
|
||||
{
|
||||
unsigned int i;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
for (i = 0; i < ctx->cfg->num_chars; i++) {
|
||||
err = sead3_wait_lcd_idle(ctx);
|
||||
|
@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches);
|
|||
|
||||
/**
|
||||
* img_ascii_lcd_scroll() - scroll the display by a character
|
||||
* @arg: really a pointer to the private data structure
|
||||
* @t: really a pointer to the private data structure
|
||||
*
|
||||
* Scroll the current message along the LCD by one character, rearming the
|
||||
* timer if required.
|
||||
|
|
|
@ -1372,7 +1372,7 @@ static void panel_process_inputs(void)
|
|||
break;
|
||||
input->rise_timer = 0;
|
||||
input->state = INPUT_ST_RISING;
|
||||
/* no break here, fall through */
|
||||
/* fall through */
|
||||
case INPUT_ST_RISING:
|
||||
if ((phys_curr & input->mask) != input->value) {
|
||||
input->state = INPUT_ST_LOW;
|
||||
|
@ -1385,11 +1385,11 @@ static void panel_process_inputs(void)
|
|||
}
|
||||
input->high_timer = 0;
|
||||
input->state = INPUT_ST_HIGH;
|
||||
/* no break here, fall through */
|
||||
/* fall through */
|
||||
case INPUT_ST_HIGH:
|
||||
if (input_state_high(input))
|
||||
break;
|
||||
/* no break here, fall through */
|
||||
/* fall through */
|
||||
case INPUT_ST_FALLING:
|
||||
input_state_falling(input);
|
||||
}
|
||||
|
|
|
@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
|
|||
struct iov_iter i;
|
||||
ssize_t bw;
|
||||
|
||||
iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
|
||||
iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
|
||||
|
||||
file_start_write(file);
|
||||
bw = vfs_iter_write(file, &i, ppos, 0);
|
||||
|
|
|
@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);
|
|||
|
||||
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
|
||||
static void blkfront_gather_backend_features(struct blkfront_info *info);
|
||||
static int negotiate_mq(struct blkfront_info *info);
|
||||
|
||||
static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
|
||||
{
|
||||
|
@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,
|
|||
unsigned int i, max_page_order;
|
||||
unsigned int ring_page_order;
|
||||
|
||||
if (!info)
|
||||
return -ENODEV;
|
||||
|
||||
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"max-ring-page-order", 0);
|
||||
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
|
||||
info->nr_ring_pages = 1 << ring_page_order;
|
||||
|
||||
err = negotiate_mq(info);
|
||||
if (err)
|
||||
goto destroy_blkring;
|
||||
|
||||
for (i = 0; i < info->nr_rings; i++) {
|
||||
struct blkfront_ring_info *rinfo = &info->rinfo[i];
|
||||
|
||||
|
@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,
|
|||
}
|
||||
|
||||
info->xbdev = dev;
|
||||
err = negotiate_mq(info);
|
||||
if (err) {
|
||||
kfree(info);
|
||||
return err;
|
||||
}
|
||||
|
||||
mutex_init(&info->mutex);
|
||||
info->vdevice = vdevice;
|
||||
|
@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)
|
|||
|
||||
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
|
||||
|
||||
err = negotiate_mq(info);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = talk_to_blkback(dev, info);
|
||||
if (!err)
|
||||
blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/usb.h>
|
||||
#include <linux/usb/quirks.h>
|
||||
|
@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
/* The Bluetooth USB module build into some devices needs to be reset on resume,
|
||||
* this is a problem with the platform (likely shutting off all power) not with
|
||||
* the module itself. So we use a DMI list to match known broken platforms.
|
||||
*/
|
||||
static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
|
||||
{
|
||||
/* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
#define BTUSB_MAX_ISOC_FRAMES 10
|
||||
|
||||
#define BTUSB_INTR_RUNNING 0
|
||||
|
@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
hdev->send = btusb_send_frame;
|
||||
hdev->notify = btusb_notify;
|
||||
|
||||
if (dmi_check_system(btusb_needs_reset_resume_table))
|
||||
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
err = btusb_config_oob_wake(hdev);
|
||||
if (err)
|
||||
|
@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
if (id->driver_info & BTUSB_QCA_ROME) {
|
||||
data->setup_on_usb = btusb_setup_qca;
|
||||
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
|
||||
|
||||
/* QCA Rome devices lose their updated firmware over suspend,
|
||||
* but the USB hub doesn't notice any status change.
|
||||
* explicitly request a device reset on resume.
|
||||
*/
|
||||
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BT_HCIBTUSB_RTL
|
||||
|
|
|
@ -922,12 +922,13 @@ static int bcm_get_resources(struct bcm_device *dev)
|
|||
|
||||
dev->clk = devm_clk_get(dev->dev, NULL);
|
||||
|
||||
dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup",
|
||||
GPIOD_OUT_LOW);
|
||||
dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(dev->device_wakeup))
|
||||
return PTR_ERR(dev->device_wakeup);
|
||||
|
||||
dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW);
|
||||
dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(dev->shutdown))
|
||||
return PTR_ERR(dev->shutdown);
|
||||
|
||||
|
|
|
@ -386,6 +386,7 @@ config ATMEL_PIT
|
|||
|
||||
config ATMEL_ST
|
||||
bool "Atmel ST timer support" if COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
select TIMER_OF
|
||||
select MFD_SYSCON
|
||||
help
|
||||
|
|
|
@ -163,6 +163,7 @@ struct mv_xor_v2_device {
|
|||
void __iomem *dma_base;
|
||||
void __iomem *glob_base;
|
||||
struct clk *clk;
|
||||
struct clk *reg_clk;
|
||||
struct tasklet_struct irq_tasklet;
|
||||
struct list_head free_sw_desc;
|
||||
struct dma_device dmadev;
|
||||
|
@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
|
||||
if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
|
||||
if (!IS_ERR(xor_dev->reg_clk)) {
|
||||
ret = clk_prepare_enable(xor_dev->reg_clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
return PTR_ERR(xor_dev->reg_clk);
|
||||
}
|
||||
}
|
||||
|
||||
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
|
||||
ret = EPROBE_DEFER;
|
||||
goto disable_reg_clk;
|
||||
}
|
||||
if (!IS_ERR(xor_dev->clk)) {
|
||||
ret = clk_prepare_enable(xor_dev->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto disable_reg_clk;
|
||||
}
|
||||
|
||||
ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
|
||||
|
@ -866,8 +880,9 @@ free_hw_desq:
|
|||
free_msi_irqs:
|
||||
platform_msi_domain_free_irqs(&pdev->dev);
|
||||
disable_clk:
|
||||
if (!IS_ERR(xor_dev->clk))
|
||||
clk_disable_unprepare(xor_dev->clk);
|
||||
clk_disable_unprepare(xor_dev->clk);
|
||||
disable_reg_clk:
|
||||
clk_disable_unprepare(xor_dev->reg_clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -917,7 +917,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
|
|||
|
||||
rcar_dmac_chan_configure_desc(chan, desc);
|
||||
|
||||
max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
|
||||
max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
|
||||
|
||||
/*
|
||||
* Allocate and fill the transfer chunk descriptors. We own the only
|
||||
|
|
|
@ -639,7 +639,7 @@ static void __exit dcdbas_exit(void)
|
|||
platform_driver_unregister(&dcdbas_driver);
|
||||
}
|
||||
|
||||
module_init(dcdbas_init);
|
||||
subsys_initcall_sync(dcdbas_init);
|
||||
module_exit(dcdbas_exit);
|
||||
|
||||
MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
|
||||
|
|
|
@ -68,11 +68,11 @@ void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
|
|||
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
|
||||
efi_status_t status;
|
||||
efi_physical_addr_t log_location, log_last_entry;
|
||||
struct linux_efi_tpm_eventlog *log_tbl;
|
||||
struct linux_efi_tpm_eventlog *log_tbl = NULL;
|
||||
unsigned long first_entry_addr, last_entry_addr;
|
||||
size_t log_size, last_entry_size;
|
||||
efi_bool_t truncated;
|
||||
void *tcg2_protocol;
|
||||
void *tcg2_protocol = NULL;
|
||||
|
||||
status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
|
||||
&tcg2_protocol);
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -37,10 +36,9 @@ struct gpio_rcar_priv {
|
|||
struct platform_device *pdev;
|
||||
struct gpio_chip gpio_chip;
|
||||
struct irq_chip irq_chip;
|
||||
struct clk *clk;
|
||||
unsigned int irq_parent;
|
||||
atomic_t wakeup_path;
|
||||
bool has_both_edge_trigger;
|
||||
bool needs_clk;
|
||||
};
|
||||
|
||||
#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
|
||||
|
@ -186,13 +184,10 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
|
|||
}
|
||||
}
|
||||
|
||||
if (!p->clk)
|
||||
return 0;
|
||||
|
||||
if (on)
|
||||
clk_enable(p->clk);
|
||||
atomic_inc(&p->wakeup_path);
|
||||
else
|
||||
clk_disable(p->clk);
|
||||
atomic_dec(&p->wakeup_path);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -330,17 +325,14 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
|
|||
|
||||
struct gpio_rcar_info {
|
||||
bool has_both_edge_trigger;
|
||||
bool needs_clk;
|
||||
};
|
||||
|
||||
static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
|
||||
.has_both_edge_trigger = false,
|
||||
.needs_clk = false,
|
||||
};
|
||||
|
||||
static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
|
||||
.has_both_edge_trigger = true,
|
||||
.needs_clk = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id gpio_rcar_of_table[] = {
|
||||
|
@ -403,7 +395,6 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
|
|||
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
|
||||
*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
|
||||
p->has_both_edge_trigger = info->has_both_edge_trigger;
|
||||
p->needs_clk = info->needs_clk;
|
||||
|
||||
if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
|
||||
dev_warn(&p->pdev->dev,
|
||||
|
@ -440,16 +431,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, p);
|
||||
|
||||
p->clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(p->clk)) {
|
||||
if (p->needs_clk) {
|
||||
dev_err(dev, "unable to get clock\n");
|
||||
ret = PTR_ERR(p->clk);
|
||||
goto err0;
|
||||
}
|
||||
p->clk = NULL;
|
||||
}
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
|
@ -531,11 +512,24 @@ static int gpio_rcar_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused gpio_rcar_suspend(struct device *dev)
|
||||
{
|
||||
struct gpio_rcar_priv *p = dev_get_drvdata(dev);
|
||||
|
||||
if (atomic_read(&p->wakeup_path))
|
||||
device_set_wakeup_path(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, NULL);
|
||||
|
||||
static struct platform_driver gpio_rcar_device_driver = {
|
||||
.probe = gpio_rcar_probe,
|
||||
.remove = gpio_rcar_remove,
|
||||
.driver = {
|
||||
.name = "gpio_rcar",
|
||||
.pm = &gpio_rcar_pm_ops,
|
||||
.of_match_table = of_match_ptr(gpio_rcar_of_table),
|
||||
}
|
||||
};
|
||||
|
|
|
@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
|
|||
size_t size;
|
||||
u32 retry = 3;
|
||||
|
||||
if (amdgpu_acpi_pcie_notify_device_ready(adev))
|
||||
return -EINVAL;
|
||||
|
||||
/* Get the device handle */
|
||||
handle = ACPI_HANDLE(&adev->pdev->dev);
|
||||
if (!handle)
|
||||
|
|
|
@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
|
|||
/* don't do anything if sink is not display port, i.e.,
|
||||
* passive dp->(dvi|hdmi) adaptor
|
||||
*/
|
||||
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
|
||||
int saved_dpms = connector->dpms;
|
||||
/* Only turn off the display if it's physically disconnected */
|
||||
if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
|
||||
/* Don't try to start link training before we
|
||||
* have the dpcd */
|
||||
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
|
||||
return;
|
||||
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
|
||||
amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
|
||||
amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
|
||||
/* Don't start link training before we have the DPCD */
|
||||
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
|
||||
return;
|
||||
|
||||
/* set it to OFF so that drm_helper_connector_dpms()
|
||||
* won't return immediately since the current state
|
||||
* is ON at this point.
|
||||
*/
|
||||
connector->dpms = DRM_MODE_DPMS_OFF;
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
connector->dpms = saved_dpms;
|
||||
/* Turn the connector off and back on immediately, which
|
||||
* will trigger link training
|
||||
*/
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
|||
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
if (robj) {
|
||||
if (robj->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
|
||||
amdgpu_mn_unregister(robj);
|
||||
amdgpu_bo_unref(&robj);
|
||||
}
|
||||
|
|
|
@ -352,6 +352,7 @@ struct amdgpu_mode_info {
|
|||
u16 firmware_flags;
|
||||
/* pointer to backlight encoder */
|
||||
struct amdgpu_encoder *bl_encoder;
|
||||
u8 bl_level; /* saved backlight level */
|
||||
struct amdgpu_audio audio; /* audio stuff */
|
||||
int num_crtc; /* number of crtcs */
|
||||
int num_hpd; /* number of hpd pins */
|
||||
|
|
|
@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
|
||||
amdgpu_bo_kunmap(bo);
|
||||
|
||||
if (bo->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
amdgpu_bo_unref(&bo->parent);
|
||||
if (!list_empty(&bo->shadow_list)) {
|
||||
|
|
|
@ -481,7 +481,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
|
|||
result = 0;
|
||||
|
||||
if (*pos < 12) {
|
||||
early[0] = amdgpu_ring_get_rptr(ring);
|
||||
early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
|
||||
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
|
||||
early[2] = ring->wptr & ring->buf_mask;
|
||||
for (i = *pos / 4; i < 3 && size; i++) {
|
||||
|
|
|
@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
|||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
/* only valid for physical mode */
|
||||
if (adev->asic_type < CHIP_POLARIS10) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
|
||||
if (i == AMDGPU_MAX_UVD_HANDLES)
|
||||
return 0;
|
||||
if (i == adev->uvd.max_handles)
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#include <linux/backlight.h>
|
||||
#include "bif/bif_4_1_d.h"
|
||||
|
||||
static u8
|
||||
u8
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
|
||||
{
|
||||
u8 backlight_level;
|
||||
|
@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
|
|||
return backlight_level;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
|
||||
u8 backlight_level)
|
||||
{
|
||||
|
|
|
@ -24,6 +24,11 @@
|
|||
#ifndef __ATOMBIOS_ENCODER_H__
|
||||
#define __ATOMBIOS_ENCODER_H__
|
||||
|
||||
u8
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
|
||||
void
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
|
||||
u8 backlight_level);
|
||||
u8
|
||||
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
|
||||
void
|
||||
|
|
|
@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v10_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v10_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
|
||||
adev->mode_info.bl_level);
|
||||
|
||||
ret = dce_v10_0_hw_init(handle);
|
||||
|
||||
/* turn on the BL */
|
||||
|
|
|
@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v11_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v11_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
|
||||
adev->mode_info.bl_level);
|
||||
|
||||
ret = dce_v11_0_hw_init(handle);
|
||||
|
||||
/* turn on the BL */
|
||||
|
|
|
@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v6_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v6_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
|
||||
adev->mode_info.bl_level);
|
||||
|
||||
ret = dce_v6_0_hw_init(handle);
|
||||
|
||||
/* turn on the BL */
|
||||
|
@ -3093,7 +3101,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
|
|||
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
schedule_work(&adev->hotplug_work);
|
||||
DRM_INFO("IH: HPD%d\n", hpd + 1);
|
||||
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v8_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v8_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
|
||||
adev->mode_info.bl_level);
|
||||
|
||||
ret = dce_v8_0_hw_init(handle);
|
||||
|
||||
/* turn on the BL */
|
||||
|
|
|
@ -4384,34 +4384,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
|
|||
case CHIP_KAVERI:
|
||||
adev->gfx.config.max_shader_engines = 1;
|
||||
adev->gfx.config.max_tile_pipes = 4;
|
||||
if ((adev->pdev->device == 0x1304) ||
|
||||
(adev->pdev->device == 0x1305) ||
|
||||
(adev->pdev->device == 0x130C) ||
|
||||
(adev->pdev->device == 0x130F) ||
|
||||
(adev->pdev->device == 0x1310) ||
|
||||
(adev->pdev->device == 0x1311) ||
|
||||
(adev->pdev->device == 0x131C)) {
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
} else if ((adev->pdev->device == 0x1309) ||
|
||||
(adev->pdev->device == 0x130A) ||
|
||||
(adev->pdev->device == 0x130D) ||
|
||||
(adev->pdev->device == 0x1313) ||
|
||||
(adev->pdev->device == 0x131D)) {
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
} else if ((adev->pdev->device == 0x1306) ||
|
||||
(adev->pdev->device == 0x1307) ||
|
||||
(adev->pdev->device == 0x130B) ||
|
||||
(adev->pdev->device == 0x130E) ||
|
||||
(adev->pdev->device == 0x1315) ||
|
||||
(adev->pdev->device == 0x131B)) {
|
||||
adev->gfx.config.max_cu_per_sh = 4;
|
||||
adev->gfx.config.max_backends_per_se = 1;
|
||||
} else {
|
||||
adev->gfx.config.max_cu_per_sh = 3;
|
||||
adev->gfx.config.max_backends_per_se = 1;
|
||||
}
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_texture_channel_caches = 4;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "amdgpu_uvd.h"
|
||||
#include "amdgpu_vce.h"
|
||||
#include "atom.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "amdgpu_powerplay.h"
|
||||
#include "sid.h"
|
||||
#include "si_ih.h"
|
||||
|
@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
{
|
||||
struct pci_dev *root = adev->pdev->bus->self;
|
||||
int bridge_pos, gpu_pos;
|
||||
u32 speed_cntl, mask, current_data_rate;
|
||||
int ret, i;
|
||||
u32 speed_cntl, current_data_rate;
|
||||
int i;
|
||||
u16 tmp16;
|
||||
|
||||
if (pci_is_root_bus(adev->pdev->bus))
|
||||
|
@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (ret != 0)
|
||||
return;
|
||||
|
||||
if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
|
||||
if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
|
||||
return;
|
||||
|
||||
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
|
||||
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
|
||||
LC_CURRENT_DATA_RATE_SHIFT;
|
||||
if (mask & DRM_PCIE_SPEED_80) {
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
|
||||
if (current_data_rate == 2) {
|
||||
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
|
||||
return;
|
||||
}
|
||||
DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
|
||||
} else if (mask & DRM_PCIE_SPEED_50) {
|
||||
} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
|
||||
if (current_data_rate == 1) {
|
||||
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
|
||||
return;
|
||||
|
@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
if (!gpu_pos)
|
||||
return;
|
||||
|
||||
if (mask & DRM_PCIE_SPEED_80) {
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
|
||||
if (current_data_rate != 2) {
|
||||
u16 bridge_cfg, gpu_cfg;
|
||||
u16 bridge_cfg2, gpu_cfg2;
|
||||
|
@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
|
||||
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
|
||||
tmp16 &= ~0xf;
|
||||
if (mask & DRM_PCIE_SPEED_80)
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
||||
tmp16 |= 3;
|
||||
else if (mask & DRM_PCIE_SPEED_50)
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
|
||||
tmp16 |= 2;
|
||||
else
|
||||
tmp16 |= 1;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_dpm.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "sid.h"
|
||||
#include "r600_dpm.h"
|
||||
#include "si_dpm.h"
|
||||
|
@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
|
||||
u32 sys_mask,
|
||||
enum amdgpu_pcie_gen asic_gen,
|
||||
enum amdgpu_pcie_gen default_gen)
|
||||
{
|
||||
switch (asic_gen) {
|
||||
case AMDGPU_PCIE_GEN1:
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
case AMDGPU_PCIE_GEN2:
|
||||
return AMDGPU_PCIE_GEN2;
|
||||
case AMDGPU_PCIE_GEN3:
|
||||
return AMDGPU_PCIE_GEN3;
|
||||
default:
|
||||
if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
|
||||
return AMDGPU_PCIE_GEN3;
|
||||
else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
|
||||
return AMDGPU_PCIE_GEN2;
|
||||
else
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
}
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
}
|
||||
|
||||
static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
|
||||
u32 *p, u32 *u)
|
||||
{
|
||||
|
@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
|
|||
table->ACPIState.levels[0].vddc.index,
|
||||
&table->ACPIState.levels[0].std_vddc);
|
||||
}
|
||||
table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
AMDGPU_PCIE_GEN1);
|
||||
table->ACPIState.levels[0].gen2PCIE =
|
||||
(u8)amdgpu_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
AMDGPU_PCIE_GEN1);
|
||||
|
||||
if (si_pi->vddc_phase_shed_control)
|
||||
si_populate_phase_shedding_value(adev,
|
||||
|
@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
|
|||
pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
|
||||
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
|
||||
pl->flags = le32_to_cpu(clock_info->si.ulFlags);
|
||||
pl->pcie_gen = r600_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
clock_info->si.ucPCIEGen);
|
||||
pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
clock_info->si.ucPCIEGen);
|
||||
|
||||
/* patch up vddc if necessary */
|
||||
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
|
||||
|
@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
|
|||
struct si_power_info *si_pi;
|
||||
struct atom_clock_dividers dividers;
|
||||
int ret;
|
||||
u32 mask;
|
||||
|
||||
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
|
||||
if (si_pi == NULL)
|
||||
|
@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
|
|||
eg_pi = &ni_pi->eg;
|
||||
pi = &eg_pi->rv7xx;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (ret)
|
||||
si_pi->sys_pcie_mask = 0;
|
||||
else
|
||||
si_pi->sys_pcie_mask = mask;
|
||||
si_pi->sys_pcie_mask =
|
||||
(adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
|
||||
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
|
||||
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
|
||||
|
||||
|
|
|
@ -1037,6 +1037,10 @@ static void handle_hpd_rx_irq(void *param)
|
|||
!is_mst_root_connector) {
|
||||
/* Downstream Port status changed. */
|
||||
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
|
||||
|
||||
if (aconnector->fake_enable)
|
||||
aconnector->fake_enable = false;
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
|
||||
|
@ -2012,30 +2016,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
|
|||
dst.width = stream->timing.h_addressable;
|
||||
dst.height = stream->timing.v_addressable;
|
||||
|
||||
rmx_type = dm_state->scaling;
|
||||
if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
|
||||
if (src.width * dst.height <
|
||||
src.height * dst.width) {
|
||||
/* height needs less upscaling/more downscaling */
|
||||
dst.width = src.width *
|
||||
dst.height / src.height;
|
||||
} else {
|
||||
/* width needs less upscaling/more downscaling */
|
||||
dst.height = src.height *
|
||||
dst.width / src.width;
|
||||
if (dm_state) {
|
||||
rmx_type = dm_state->scaling;
|
||||
if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
|
||||
if (src.width * dst.height <
|
||||
src.height * dst.width) {
|
||||
/* height needs less upscaling/more downscaling */
|
||||
dst.width = src.width *
|
||||
dst.height / src.height;
|
||||
} else {
|
||||
/* width needs less upscaling/more downscaling */
|
||||
dst.height = src.height *
|
||||
dst.width / src.width;
|
||||
}
|
||||
} else if (rmx_type == RMX_CENTER) {
|
||||
dst = src;
|
||||
}
|
||||
} else if (rmx_type == RMX_CENTER) {
|
||||
dst = src;
|
||||
}
|
||||
|
||||
dst.x = (stream->timing.h_addressable - dst.width) / 2;
|
||||
dst.y = (stream->timing.v_addressable - dst.height) / 2;
|
||||
dst.x = (stream->timing.h_addressable - dst.width) / 2;
|
||||
dst.y = (stream->timing.v_addressable - dst.height) / 2;
|
||||
|
||||
if (dm_state->underscan_enable) {
|
||||
dst.x += dm_state->underscan_hborder / 2;
|
||||
dst.y += dm_state->underscan_vborder / 2;
|
||||
dst.width -= dm_state->underscan_hborder;
|
||||
dst.height -= dm_state->underscan_vborder;
|
||||
if (dm_state->underscan_enable) {
|
||||
dst.x += dm_state->underscan_hborder / 2;
|
||||
dst.y += dm_state->underscan_vborder / 2;
|
||||
dst.width -= dm_state->underscan_hborder;
|
||||
dst.height -= dm_state->underscan_vborder;
|
||||
}
|
||||
}
|
||||
|
||||
stream->src = src;
|
||||
|
@ -2360,12 +2366,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
|
||||
if (aconnector == NULL) {
|
||||
DRM_ERROR("aconnector is NULL!\n");
|
||||
goto drm_connector_null;
|
||||
}
|
||||
|
||||
if (dm_state == NULL) {
|
||||
DRM_ERROR("dm_state is NULL!\n");
|
||||
goto dm_state_null;
|
||||
return stream;
|
||||
}
|
||||
|
||||
drm_connector = &aconnector->base;
|
||||
|
@ -2377,18 +2378,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
*/
|
||||
if (aconnector->mst_port) {
|
||||
dm_dp_mst_dc_sink_create(drm_connector);
|
||||
goto mst_dc_sink_create_done;
|
||||
return stream;
|
||||
}
|
||||
|
||||
if (create_fake_sink(aconnector))
|
||||
goto stream_create_fail;
|
||||
return stream;
|
||||
}
|
||||
|
||||
stream = dc_create_stream_for_sink(aconnector->dc_sink);
|
||||
|
||||
if (stream == NULL) {
|
||||
DRM_ERROR("Failed to create stream for sink!\n");
|
||||
goto stream_create_fail;
|
||||
return stream;
|
||||
}
|
||||
|
||||
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
|
||||
|
@ -2414,9 +2415,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
} else {
|
||||
decide_crtc_timing_for_drm_display_mode(
|
||||
&mode, preferred_mode,
|
||||
dm_state->scaling != RMX_OFF);
|
||||
dm_state ? (dm_state->scaling != RMX_OFF) : false);
|
||||
}
|
||||
|
||||
if (!dm_state)
|
||||
drm_mode_set_crtcinfo(&mode, 0);
|
||||
|
||||
fill_stream_properties_from_drm_display_mode(stream,
|
||||
&mode, &aconnector->base);
|
||||
update_stream_scaling_settings(&mode, dm_state, stream);
|
||||
|
@ -2426,10 +2430,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
drm_connector,
|
||||
aconnector->dc_sink);
|
||||
|
||||
stream_create_fail:
|
||||
dm_state_null:
|
||||
drm_connector_null:
|
||||
mst_dc_sink_create_done:
|
||||
update_stream_signal(stream);
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
|
@ -2497,6 +2499,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
|
|||
return &state->base;
|
||||
}
|
||||
|
||||
|
||||
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
{
|
||||
enum dc_irq_source irq_source;
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
|
||||
return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
static int dm_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
return dm_set_vblank(crtc, true);
|
||||
}
|
||||
|
||||
static void dm_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
dm_set_vblank(crtc, false);
|
||||
}
|
||||
|
||||
/* Implemented only the options currently availible for the driver */
|
||||
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
|
||||
.reset = dm_crtc_reset_state,
|
||||
|
@ -2506,6 +2529,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
|
|||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.atomic_duplicate_state = dm_crtc_duplicate_state,
|
||||
.atomic_destroy_state = dm_crtc_destroy_state,
|
||||
.enable_vblank = dm_enable_vblank,
|
||||
.disable_vblank = dm_disable_vblank,
|
||||
};
|
||||
|
||||
static enum drm_connector_status
|
||||
|
@ -2800,7 +2825,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
stream = dc_create_stream_for_sink(dc_sink);
|
||||
stream = create_stream_for_sink(aconnector, mode, NULL);
|
||||
if (stream == NULL) {
|
||||
DRM_ERROR("Failed to create stream for sink!\n");
|
||||
goto fail;
|
||||
|
@ -3060,6 +3085,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
|
|||
if (!dm_plane_state->dc_state)
|
||||
return 0;
|
||||
|
||||
if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
|
||||
return -EINVAL;
|
||||
|
||||
if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
|
||||
return 0;
|
||||
|
||||
|
@ -4632,8 +4660,6 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
bool pflip_needed = !state->allow_modeset;
|
||||
int ret = 0;
|
||||
|
||||
if (pflip_needed)
|
||||
return ret;
|
||||
|
||||
/* Add new planes */
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||
|
@ -4648,6 +4674,8 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
|
||||
/* Remove any changed/removed planes */
|
||||
if (!enable) {
|
||||
if (pflip_needed)
|
||||
continue;
|
||||
|
||||
if (!old_plane_crtc)
|
||||
continue;
|
||||
|
@ -4679,6 +4707,7 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
*lock_and_validation_needed = true;
|
||||
|
||||
} else { /* Add new planes */
|
||||
struct dc_plane_state *dc_new_plane_state;
|
||||
|
||||
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
|
||||
continue;
|
||||
|
@ -4692,38 +4721,50 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
if (!dm_new_crtc_state->stream)
|
||||
continue;
|
||||
|
||||
if (pflip_needed)
|
||||
continue;
|
||||
|
||||
WARN_ON(dm_new_plane_state->dc_state);
|
||||
|
||||
dm_new_plane_state->dc_state = dc_create_plane_state(dc);
|
||||
dc_new_plane_state = dc_create_plane_state(dc);
|
||||
if (!dc_new_plane_state) {
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
|
||||
plane->base.id, new_plane_crtc->base.id);
|
||||
|
||||
if (!dm_new_plane_state->dc_state) {
|
||||
ret = -EINVAL;
|
||||
ret = fill_plane_attributes(
|
||||
new_plane_crtc->dev->dev_private,
|
||||
dc_new_plane_state,
|
||||
new_plane_state,
|
||||
new_crtc_state);
|
||||
if (ret) {
|
||||
dc_plane_state_release(dc_new_plane_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = fill_plane_attributes(
|
||||
new_plane_crtc->dev->dev_private,
|
||||
dm_new_plane_state->dc_state,
|
||||
new_plane_state,
|
||||
new_crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
/*
|
||||
* Any atomic check errors that occur after this will
|
||||
* not need a release. The plane state will be attached
|
||||
* to the stream, and therefore part of the atomic
|
||||
* state. It'll be released when the atomic state is
|
||||
* cleaned.
|
||||
*/
|
||||
if (!dc_add_plane_to_context(
|
||||
dc,
|
||||
dm_new_crtc_state->stream,
|
||||
dm_new_plane_state->dc_state,
|
||||
dc_new_plane_state,
|
||||
dm_state->context)) {
|
||||
|
||||
dc_plane_state_release(dc_new_plane_state);
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
dm_new_plane_state->dc_state = dc_new_plane_state;
|
||||
|
||||
/* Tell DC to do a full surface update every time there
|
||||
* is a plane change. Inefficient, but works for now.
|
||||
*/
|
||||
|
@ -4737,6 +4778,30 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_plane *plane;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
|
||||
|
||||
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
|
||||
struct drm_plane_state *plane_state =
|
||||
drm_atomic_get_plane_state(state, plane);
|
||||
|
||||
if (IS_ERR(plane_state))
|
||||
return -EDEADLK;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
|
||||
if (crtc->primary == plane && crtc_state->active) {
|
||||
if (!plane_state->fb)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
|
@ -4760,6 +4825,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
goto fail;
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
ret = dm_atomic_check_plane_state_fb(state, crtc);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
|
||||
!new_crtc_state->color_mgmt_changed)
|
||||
continue;
|
||||
|
|
|
@ -683,10 +683,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
|
|||
|
||||
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mode_info.num_crtc > 0)
|
||||
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
|
||||
else
|
||||
adev->crtc_irq.num_types = 0;
|
||||
|
||||
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
|
||||
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
|
||||
|
||||
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
|
||||
|
|
|
@ -189,6 +189,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
|
|||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
|
||||
|
||||
/*
|
||||
* TODO: Need to further figure out why ddc.algo is NULL while MST port exists
|
||||
*/
|
||||
if (!aconnector->port || !aconnector->port->aux.ddc.algo)
|
||||
return;
|
||||
|
||||
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
|
||||
|
||||
if (!edid) {
|
||||
|
|
|
@ -1358,13 +1358,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(
|
|||
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
|
||||
}
|
||||
|
||||
void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
|
||||
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
|
||||
{
|
||||
|
||||
if (dc == NULL)
|
||||
return;
|
||||
return false;
|
||||
|
||||
dal_irq_service_set(dc->res_pool->irqs, src, enable);
|
||||
return dal_irq_service_set(dc->res_pool->irqs, src, enable);
|
||||
}
|
||||
|
||||
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
|
||||
|
|
|
@ -1749,8 +1749,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
|
|||
link->link_enc,
|
||||
pipe_ctx->clock_source->id,
|
||||
display_color_depth,
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
|
||||
pipe_ctx->stream->signal,
|
||||
stream->phy_pix_clk);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue