Merge branch 'for-4.5/block-dax' into for-4.5/libnvdimm
This commit is contained in:
commit
8b63b6bfc1
|
@ -11,6 +11,10 @@ Required properties:
|
|||
0 = active high
|
||||
1 = active low
|
||||
|
||||
Optional properties:
|
||||
- little-endian : GPIO registers are used as little endian. If not
|
||||
present registers are used as big endian by default.
|
||||
|
||||
Example:
|
||||
|
||||
gpio0: gpio@1100 {
|
||||
|
|
|
@ -2975,6 +2975,7 @@ F: kernel/cpuset.c
|
|||
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
|
||||
M: Johannes Weiner <hannes@cmpxchg.org>
|
||||
M: Michal Hocko <mhocko@kernel.org>
|
||||
M: Vladimir Davydov <vdavydov@virtuozzo.com>
|
||||
L: cgroups@vger.kernel.org
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
|
@ -8286,7 +8287,7 @@ F: include/linux/delayacct.h
|
|||
F: kernel/delayacct.c
|
||||
|
||||
PERFORMANCE EVENTS SUBSYSTEM
|
||||
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
M: Arnaldo Carvalho de Melo <acme@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -2,6 +2,7 @@ config ARM
|
|||
bool
|
||||
default y
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||
|
|
|
@ -15,20 +15,6 @@ config ARM_PTDUMP
|
|||
kernel.
|
||||
If in doubt, say "N"
|
||||
|
||||
config STRICT_DEVMEM
|
||||
bool "Filter access to /dev/mem"
|
||||
depends on MMU
|
||||
---help---
|
||||
If this option is disabled, you allow userspace (root) access to all
|
||||
of memory, including kernel and userspace memory. Accidental
|
||||
access to this is obviously disastrous, but specific access can
|
||||
be used by people debugging the kernel.
|
||||
|
||||
If this option is switched on, the /dev/mem file only allows
|
||||
userspace access to memory mapped peripherals.
|
||||
|
||||
If in doubt, say Y.
|
||||
|
||||
# RMK wants arm kernels compiled with frame pointers or stack unwinding.
|
||||
# If you know what you are doing and are willing to live without stack
|
||||
# traces, you can get a slightly smaller kernel by setting this option to
|
||||
|
|
|
@ -74,7 +74,7 @@
|
|||
reg = <0x48240200 0x100>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&dpll_mpu_m2_ck>;
|
||||
clocks = <&mpu_periphclk>;
|
||||
};
|
||||
|
||||
local_timer: timer@48240600 {
|
||||
|
@ -82,7 +82,7 @@
|
|||
reg = <0x48240600 0x100>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&dpll_mpu_m2_ck>;
|
||||
clocks = <&mpu_periphclk>;
|
||||
};
|
||||
|
||||
l2-cache-controller@48242000 {
|
||||
|
|
|
@ -259,6 +259,14 @@
|
|||
ti,invert-autoidle-bit;
|
||||
};
|
||||
|
||||
mpu_periphclk: mpu_periphclk {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-factor-clock";
|
||||
clocks = <&dpll_mpu_m2_ck>;
|
||||
clock-mult = <1>;
|
||||
clock-div = <2>;
|
||||
};
|
||||
|
||||
dpll_ddr_ck: dpll_ddr_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "ti,am3-dpll-clock";
|
||||
|
|
|
@ -184,6 +184,7 @@
|
|||
regulator-name = "VDD_SDHC_1V8";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -118,7 +118,8 @@
|
|||
sdhci0: sdhci@ab0000 {
|
||||
compatible = "mrvl,pxav3-mmc";
|
||||
reg = <0xab0000 0x200>;
|
||||
clocks = <&chip_clk CLKID_SDIO1XIN>;
|
||||
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
|
||||
clock-names = "io", "core";
|
||||
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -126,7 +127,8 @@
|
|||
sdhci1: sdhci@ab0800 {
|
||||
compatible = "mrvl,pxav3-mmc";
|
||||
reg = <0xab0800 0x200>;
|
||||
clocks = <&chip_clk CLKID_SDIO1XIN>;
|
||||
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
|
||||
clock-names = "io", "core";
|
||||
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -135,7 +137,7 @@
|
|||
compatible = "mrvl,pxav3-mmc";
|
||||
reg = <0xab1000 0x200>;
|
||||
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>;
|
||||
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
|
||||
clock-names = "io", "core";
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -218,6 +218,7 @@
|
|||
reg = <0x480c8000 0x2000>;
|
||||
interrupts = <77>;
|
||||
ti,hwmods = "mailbox";
|
||||
#mbox-cells = <1>;
|
||||
ti,mbox-num-users = <4>;
|
||||
ti,mbox-num-fifos = <12>;
|
||||
mbox_dsp: mbox_dsp {
|
||||
|
@ -279,8 +280,11 @@
|
|||
ti,spi-num-cs = <4>;
|
||||
ti,hwmods = "mcspi1";
|
||||
dmas = <&edma 16 &edma 17
|
||||
&edma 18 &edma 19>;
|
||||
dma-names = "tx0", "rx0", "tx1", "rx1";
|
||||
&edma 18 &edma 19
|
||||
&edma 20 &edma 21
|
||||
&edma 22 &edma 23>;
|
||||
dma-names = "tx0", "rx0", "tx1", "rx1",
|
||||
"tx2", "rx2", "tx3", "rx3";
|
||||
};
|
||||
|
||||
mmc1: mmc@48060000 {
|
||||
|
|
|
@ -18,8 +18,3 @@
|
|||
reg = <0x80000000 0x10000000>;
|
||||
};
|
||||
};
|
||||
|
||||
&L2 {
|
||||
arm,data-latency = <2 1 2>;
|
||||
arm,tag-latency = <3 2 3>;
|
||||
};
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
reg = <0x40006000 0x1000>;
|
||||
cache-unified;
|
||||
cache-level = <2>;
|
||||
arm,data-latency = <1 1 1>;
|
||||
arm,data-latency = <3 3 3>;
|
||||
arm,tag-latency = <2 2 2>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -178,8 +178,10 @@
|
|||
compatible = "fsl,vf610-sai";
|
||||
reg = <0x40031000 0x1000>;
|
||||
interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks VF610_CLK_SAI2>;
|
||||
clock-names = "sai";
|
||||
clocks = <&clks VF610_CLK_SAI2>,
|
||||
<&clks VF610_CLK_SAI2_DIV>,
|
||||
<&clks 0>, <&clks 0>;
|
||||
clock-names = "bus", "mclk1", "mclk2", "mclk3";
|
||||
dma-names = "tx", "rx";
|
||||
dmas = <&edma0 0 21>,
|
||||
<&edma0 0 20>;
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
|
||||
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
|
||||
|
|
|
@ -4,7 +4,6 @@ menuconfig ARCH_AT91
|
|||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLK_AT91
|
||||
select PINCTRL
|
||||
select PINCTRL_AT91
|
||||
select SOC_BUS
|
||||
|
||||
if ARCH_AT91
|
||||
|
@ -17,6 +16,7 @@ config SOC_SAMA5D2
|
|||
select HAVE_AT91_USB_CLK
|
||||
select HAVE_AT91_H32MX
|
||||
select HAVE_AT91_GENERATED_CLK
|
||||
select PINCTRL_AT91PIO4
|
||||
help
|
||||
Select this if ou are using one of Atmel's SAMA5D2 family SoC.
|
||||
|
||||
|
@ -27,6 +27,7 @@ config SOC_SAMA5D3
|
|||
select HAVE_AT91_UTMI
|
||||
select HAVE_AT91_SMD
|
||||
select HAVE_AT91_USB_CLK
|
||||
select PINCTRL_AT91
|
||||
help
|
||||
Select this if you are using one of Atmel's SAMA5D3 family SoC.
|
||||
This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
|
||||
|
@ -40,6 +41,7 @@ config SOC_SAMA5D4
|
|||
select HAVE_AT91_SMD
|
||||
select HAVE_AT91_USB_CLK
|
||||
select HAVE_AT91_H32MX
|
||||
select PINCTRL_AT91
|
||||
help
|
||||
Select this if you are using one of Atmel's SAMA5D4 family SoC.
|
||||
|
||||
|
@ -50,6 +52,7 @@ config SOC_AT91RM9200
|
|||
select CPU_ARM920T
|
||||
select HAVE_AT91_USB_CLK
|
||||
select MIGHT_HAVE_PCI
|
||||
select PINCTRL_AT91
|
||||
select SOC_SAM_V4_V5
|
||||
select SRAM if PM
|
||||
help
|
||||
|
@ -65,6 +68,7 @@ config SOC_AT91SAM9
|
|||
select HAVE_AT91_UTMI
|
||||
select HAVE_FB_ATMEL
|
||||
select MEMORY
|
||||
select PINCTRL_AT91
|
||||
select SOC_SAM_V4_V5
|
||||
select SRAM if PM
|
||||
help
|
||||
|
|
|
@ -41,8 +41,10 @@
|
|||
* implementation should be moved down into the pinctrl driver and get
|
||||
* called as part of the generic suspend/resume path.
|
||||
*/
|
||||
#ifdef CONFIG_PINCTRL_AT91
|
||||
extern void at91_pinctrl_gpio_suspend(void);
|
||||
extern void at91_pinctrl_gpio_resume(void);
|
||||
#endif
|
||||
|
||||
static struct {
|
||||
unsigned long uhp_udp_mask;
|
||||
|
@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
|
|||
|
||||
static int at91_pm_enter(suspend_state_t state)
|
||||
{
|
||||
#ifdef CONFIG_PINCTRL_AT91
|
||||
at91_pinctrl_gpio_suspend();
|
||||
|
||||
#endif
|
||||
switch (state) {
|
||||
/*
|
||||
* Suspend-to-RAM is like STANDBY plus slow clock mode, so
|
||||
|
@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
|
|||
error:
|
||||
target_state = PM_SUSPEND_ON;
|
||||
|
||||
#ifdef CONFIG_PINCTRL_AT91
|
||||
at91_pinctrl_gpio_resume();
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
|
|||
void exynos_sys_powerdown_conf(enum sys_powerdown mode)
|
||||
{
|
||||
unsigned int i;
|
||||
const struct exynos_pmu_data *pmu_data;
|
||||
|
||||
const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data;
|
||||
if (!pmu_context)
|
||||
return;
|
||||
|
||||
pmu_data = pmu_context->pmu_data;
|
||||
|
||||
if (pmu_data->powerdown_conf)
|
||||
pmu_data->powerdown_conf(mode);
|
||||
|
|
|
@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
|
|||
writel(*vaddr++, bus_addr);
|
||||
}
|
||||
|
||||
static inline unsigned char __indirect_readb(const volatile void __iomem *p)
|
||||
static inline u8 __indirect_readb(const volatile void __iomem *p)
|
||||
{
|
||||
u32 addr = (u32)p;
|
||||
u32 n, byte_enables, data;
|
||||
|
@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
|
|||
*vaddr++ = readb(bus_addr);
|
||||
}
|
||||
|
||||
static inline unsigned short __indirect_readw(const volatile void __iomem *p)
|
||||
static inline u16 __indirect_readw(const volatile void __iomem *p)
|
||||
{
|
||||
u32 addr = (u32)p;
|
||||
u32 n, byte_enables, data;
|
||||
|
@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
|
|||
*vaddr++ = readw(bus_addr);
|
||||
}
|
||||
|
||||
static inline unsigned long __indirect_readl(const volatile void __iomem *p)
|
||||
static inline u32 __indirect_readl(const volatile void __iomem *p)
|
||||
{
|
||||
u32 addr = (__force u32)p;
|
||||
u32 data;
|
||||
|
@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
|
|||
((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
|
||||
|
||||
#define ioread8(p) ioread8(p)
|
||||
static inline unsigned int ioread8(const void __iomem *addr)
|
||||
static inline u8 ioread8(const void __iomem *addr)
|
||||
{
|
||||
unsigned long port = (unsigned long __force)addr;
|
||||
if (__is_io_address(port))
|
||||
|
@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
|
|||
}
|
||||
|
||||
#define ioread16(p) ioread16(p)
|
||||
static inline unsigned int ioread16(const void __iomem *addr)
|
||||
static inline u16 ioread16(const void __iomem *addr)
|
||||
{
|
||||
unsigned long port = (unsigned long __force)addr;
|
||||
if (__is_io_address(port))
|
||||
|
@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
|
|||
}
|
||||
|
||||
#define ioread32(p) ioread32(p)
|
||||
static inline unsigned int ioread32(const void __iomem *addr)
|
||||
static inline u32 ioread32(const void __iomem *addr)
|
||||
{
|
||||
unsigned long port = (unsigned long __force)addr;
|
||||
if (__is_io_address(port))
|
||||
|
|
|
@ -121,6 +121,7 @@ config ARCH_OMAP2PLUS_TYPICAL
|
|||
select NEON if CPU_V7
|
||||
select PM
|
||||
select REGULATOR
|
||||
select REGULATOR_FIXED_VOLTAGE
|
||||
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
|
||||
select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
|
||||
select VFP
|
||||
|
@ -201,7 +202,6 @@ config MACH_OMAP3_PANDORA
|
|||
depends on ARCH_OMAP3
|
||||
default y
|
||||
select OMAP_PACKAGE_CBB
|
||||
select REGULATOR_FIXED_VOLTAGE if REGULATOR
|
||||
|
||||
config MACH_NOKIA_N810
|
||||
bool
|
||||
|
|
|
@ -889,6 +889,7 @@ static void __init e680_init(void)
|
|||
|
||||
pxa_set_keypad_info(&e680_keypad_platform_data);
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(e680_devices));
|
||||
}
|
||||
|
@ -956,6 +957,7 @@ static void __init a1200_init(void)
|
|||
|
||||
pxa_set_keypad_info(&a1200_keypad_platform_data);
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
|
||||
}
|
||||
|
@ -1148,6 +1150,7 @@ static void __init a910_init(void)
|
|||
platform_device_register(&a910_camera);
|
||||
}
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(a910_devices));
|
||||
}
|
||||
|
@ -1215,6 +1218,7 @@ static void __init e6_init(void)
|
|||
|
||||
pxa_set_keypad_info(&e6_keypad_platform_data);
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(e6_devices));
|
||||
}
|
||||
|
@ -1256,6 +1260,7 @@ static void __init e2_init(void)
|
|||
|
||||
pxa_set_keypad_info(&e2_keypad_platform_data);
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(e2_devices));
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <plat/cpu.h>
|
||||
#include <plat/cpu-freq-core.h>
|
||||
|
||||
static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
|
||||
static struct cpufreq_frequency_table s3c2440_plls_12[] = {
|
||||
{ .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
|
||||
{ .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
|
||||
{ .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <plat/cpu.h>
|
||||
#include <plat/cpu-freq-core.h>
|
||||
|
||||
static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
|
||||
static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
|
||||
{ .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
|
||||
{ .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
|
||||
{ .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */
|
||||
|
|
|
@ -3,6 +3,7 @@ config ARM64
|
|||
select ACPI_CCA_REQUIRED if ACPI
|
||||
select ACPI_GENERIC_GSI if ACPI
|
||||
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
|
|
@ -14,20 +14,6 @@ config ARM64_PTDUMP
|
|||
kernel.
|
||||
If in doubt, say "N"
|
||||
|
||||
config STRICT_DEVMEM
|
||||
bool "Filter access to /dev/mem"
|
||||
depends on MMU
|
||||
help
|
||||
If this option is disabled, you allow userspace (root) access to all
|
||||
of memory, including kernel and userspace memory. Accidental
|
||||
access to this is obviously disastrous, but specific access can
|
||||
be used by people debugging the kernel.
|
||||
|
||||
If this option is switched on, the /dev/mem file only allows
|
||||
userspace access to memory mapped peripherals.
|
||||
|
||||
If in doubt, say Y.
|
||||
|
||||
config PID_IN_CONTEXTIDR
|
||||
bool "Write the current PID to the CONTEXTIDR register"
|
||||
help
|
||||
|
|
|
@ -269,6 +269,7 @@
|
|||
clock-frequency = <0>; /* Updated by bootloader */
|
||||
voltage-ranges = <1800 1800 3300 3300>;
|
||||
sdhci,auto-cmd12;
|
||||
little-endian;
|
||||
bus-width = <4>;
|
||||
};
|
||||
|
||||
|
@ -277,6 +278,7 @@
|
|||
reg = <0x0 0x2300000 0x0 0x10000>;
|
||||
interrupts = <0 36 0x4>; /* Level high type */
|
||||
gpio-controller;
|
||||
little-endian;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
@ -287,6 +289,7 @@
|
|||
reg = <0x0 0x2310000 0x0 0x10000>;
|
||||
interrupts = <0 36 0x4>; /* Level high type */
|
||||
gpio-controller;
|
||||
little-endian;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
@ -297,6 +300,7 @@
|
|||
reg = <0x0 0x2320000 0x0 0x10000>;
|
||||
interrupts = <0 37 0x4>; /* Level high type */
|
||||
gpio-controller;
|
||||
little-endian;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
@ -307,6 +311,7 @@
|
|||
reg = <0x0 0x2330000 0x0 0x10000>;
|
||||
interrupts = <0 37 0x4>; /* Level high type */
|
||||
gpio-controller;
|
||||
little-endian;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
|
|
@ -77,6 +77,7 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/*
|
||||
* Low-level accessors
|
||||
|
|
|
@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
* hardware updates of the pte (ptep_set_access_flags safely changes
|
||||
* valid ptes without going through an invalid entry).
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
|
||||
pte_valid(*ptep)) {
|
||||
BUG_ON(!pte_young(pte));
|
||||
BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
|
||||
if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
|
||||
pte_valid(*ptep) && pte_valid(pte)) {
|
||||
VM_WARN_ONCE(!pte_young(pte),
|
||||
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
|
||||
__func__, pte_val(*ptep), pte_val(pte));
|
||||
VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
|
||||
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
|
||||
__func__, pte_val(*ptep), pte_val(pte));
|
||||
}
|
||||
|
||||
set_pte(ptep, pte);
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
|
@ -140,7 +141,7 @@ SECTIONS
|
|||
ARM_EXIT_KEEP(EXIT_DATA)
|
||||
}
|
||||
|
||||
PERCPU_SECTION(64)
|
||||
PERCPU_SECTION(L1_CACHE_BYTES)
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
|
@ -158,7 +159,7 @@ SECTIONS
|
|||
. = ALIGN(PAGE_SIZE);
|
||||
_data = .;
|
||||
_sdata = .;
|
||||
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
|
||||
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
||||
PECOFF_EDATA_PADDING
|
||||
_edata = .;
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
*
|
||||
* ppc:
|
||||
|
|
|
@ -10,6 +10,7 @@ config FRV
|
|||
select HAVE_DEBUG_BUGVERBOSE
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select GENERIC_CPU_DEVICES
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select OLD_SIGSUSPEND3
|
||||
select OLD_SIGACTION
|
||||
|
|
|
@ -13,6 +13,7 @@ config M32R
|
|||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_ATOMIC64
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_USES_GETTIMEOFFSET
|
||||
select MODULES_USE_ELF_RELA
|
||||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
|
|
|
@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
|
||||
gfp = massage_gfp_flags(dev, gfp);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
|
||||
page = dma_alloc_from_contiguous(dev,
|
||||
count, get_order(size));
|
||||
if (!page)
|
||||
|
|
|
@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
|||
*/
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
|
||||
#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
|
||||
#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
|
||||
(parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
|
||||
#else
|
||||
#define pte_huge(pte) (0)
|
||||
#define pte_mkhuge(pte) (pte)
|
||||
|
|
|
@ -360,8 +360,9 @@
|
|||
#define __NR_execveat (__NR_Linux + 342)
|
||||
#define __NR_membarrier (__NR_Linux + 343)
|
||||
#define __NR_userfaultfd (__NR_Linux + 344)
|
||||
#define __NR_mlock2 (__NR_Linux + 345)
|
||||
|
||||
#define __NR_Linux_syscalls (__NR_userfaultfd + 1)
|
||||
#define __NR_Linux_syscalls (__NR_mlock2 + 1)
|
||||
|
||||
|
||||
#define __IGNORE_select /* newselect */
|
||||
|
|
|
@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
|
|||
}
|
||||
|
||||
|
||||
void __init pcibios_init_bus(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev = bus->self;
|
||||
unsigned short bridge_ctl;
|
||||
|
||||
/* We deal only with pci controllers and pci-pci bridges. */
|
||||
if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
|
||||
return;
|
||||
|
||||
/* PCI-PCI bridge - set the cache line and default latency
|
||||
(32) for primary and secondary buses. */
|
||||
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
|
||||
|
||||
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
|
||||
bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
|
||||
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
|
||||
}
|
||||
|
||||
/*
|
||||
* pcibios align resources() is called every time generic PCI code
|
||||
* wants to generate a new address. The process of looking for
|
||||
|
|
|
@ -440,6 +440,7 @@
|
|||
ENTRY_COMP(execveat)
|
||||
ENTRY_SAME(membarrier)
|
||||
ENTRY_SAME(userfaultfd)
|
||||
ENTRY_SAME(mlock2) /* 345 */
|
||||
|
||||
|
||||
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
|
||||
|
|
|
@ -159,6 +159,7 @@ config PPC
|
|||
select EDAC_SUPPORT
|
||||
select EDAC_ATOMIC_SCRUB
|
||||
select ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
|
||||
config GENERIC_CSUM
|
||||
|
|
|
@ -335,18 +335,6 @@ config PPC_EARLY_DEBUG_CPM_ADDR
|
|||
platform probing is done, all platforms selected must
|
||||
share the same address.
|
||||
|
||||
config STRICT_DEVMEM
|
||||
def_bool y
|
||||
prompt "Filter access to /dev/mem"
|
||||
help
|
||||
This option restricts access to /dev/mem. If this option is
|
||||
disabled, you allow userspace access to all memory, including
|
||||
kernel and userspace memory. Accidental memory access is likely
|
||||
to be disastrous.
|
||||
Memory access is required for experts who want to debug the kernel.
|
||||
|
||||
If you are unsure, say Y.
|
||||
|
||||
config FAIL_IOMMU
|
||||
bool "Fault-injection capability for IOMMU"
|
||||
depends on FAULT_INJECTION
|
||||
|
|
|
@ -227,23 +227,15 @@
|
|||
reg = <0x520 0x20>;
|
||||
|
||||
phy0: ethernet-phy@1f {
|
||||
interrupt-parent = <&mpic>;
|
||||
interrupts = <10 1>;
|
||||
reg = <0x1f>;
|
||||
};
|
||||
phy1: ethernet-phy@0 {
|
||||
interrupt-parent = <&mpic>;
|
||||
interrupts = <10 1>;
|
||||
reg = <0>;
|
||||
};
|
||||
phy2: ethernet-phy@1 {
|
||||
interrupt-parent = <&mpic>;
|
||||
interrupts = <10 1>;
|
||||
reg = <1>;
|
||||
};
|
||||
phy3: ethernet-phy@2 {
|
||||
interrupt-parent = <&mpic>;
|
||||
interrupts = <10 1>;
|
||||
reg = <2>;
|
||||
};
|
||||
tbi0: tbi-phy@11 {
|
||||
|
|
|
@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
|||
eeh_ops->configure_bridge(pe);
|
||||
eeh_pe_restore_bars(pe);
|
||||
|
||||
/*
|
||||
* If it's PHB PE, the frozen state on all available PEs should have
|
||||
* been cleared by the PHB reset. Otherwise, we unfreeze the PE and its
|
||||
* child PEs because they might be in frozen state.
|
||||
*/
|
||||
if (!(pe->type & EEH_PE_PHB)) {
|
||||
/* Clear frozen state */
|
||||
rc = eeh_clear_pe_frozen_state(pe, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Give the system 5 seconds to finish running the user-space
|
||||
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
|
||||
|
|
|
@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
|
|||
static unsigned int *opal_irqs;
|
||||
|
||||
static void opal_handle_irq_work(struct irq_work *work);
|
||||
static __be64 last_outstanding_events;
|
||||
static u64 last_outstanding_events;
|
||||
static struct irq_work opal_event_irq_work = {
|
||||
.func = opal_handle_irq_work,
|
||||
};
|
||||
|
||||
void opal_handle_events(uint64_t events)
|
||||
{
|
||||
int virq, hwirq = 0;
|
||||
u64 mask = opal_event_irqchip.mask;
|
||||
|
||||
if (!in_irq() && (events & mask)) {
|
||||
last_outstanding_events = events;
|
||||
irq_work_queue(&opal_event_irq_work);
|
||||
return;
|
||||
}
|
||||
|
||||
while (events & mask) {
|
||||
hwirq = fls64(events) - 1;
|
||||
if (BIT_ULL(hwirq) & mask) {
|
||||
virq = irq_find_mapping(opal_event_irqchip.domain,
|
||||
hwirq);
|
||||
if (virq)
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
events &= ~BIT_ULL(hwirq);
|
||||
}
|
||||
}
|
||||
|
||||
static void opal_event_mask(struct irq_data *d)
|
||||
{
|
||||
clear_bit(d->hwirq, &opal_event_irqchip.mask);
|
||||
|
@ -55,12 +78,12 @@ static void opal_event_mask(struct irq_data *d)
|
|||
|
||||
static void opal_event_unmask(struct irq_data *d)
|
||||
{
|
||||
__be64 events;
|
||||
|
||||
set_bit(d->hwirq, &opal_event_irqchip.mask);
|
||||
|
||||
opal_poll_events(&last_outstanding_events);
|
||||
if (last_outstanding_events & opal_event_irqchip.mask)
|
||||
/* Need to retrigger the interrupt */
|
||||
irq_work_queue(&opal_event_irq_work);
|
||||
opal_poll_events(&events);
|
||||
opal_handle_events(be64_to_cpu(events));
|
||||
}
|
||||
|
||||
static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
|
||||
|
@ -96,29 +119,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void opal_handle_events(uint64_t events)
|
||||
{
|
||||
int virq, hwirq = 0;
|
||||
u64 mask = opal_event_irqchip.mask;
|
||||
|
||||
if (!in_irq() && (events & mask)) {
|
||||
last_outstanding_events = events;
|
||||
irq_work_queue(&opal_event_irq_work);
|
||||
return;
|
||||
}
|
||||
|
||||
while (events & mask) {
|
||||
hwirq = fls64(events) - 1;
|
||||
if (BIT_ULL(hwirq) & mask) {
|
||||
virq = irq_find_mapping(opal_event_irqchip.domain,
|
||||
hwirq);
|
||||
if (virq)
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
events &= ~BIT_ULL(hwirq);
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t opal_interrupt(int irq, void *data)
|
||||
{
|
||||
__be64 events;
|
||||
|
@ -131,7 +131,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
|
|||
|
||||
static void opal_handle_irq_work(struct irq_work *work)
|
||||
{
|
||||
opal_handle_events(be64_to_cpu(last_outstanding_events));
|
||||
opal_handle_events(last_outstanding_events);
|
||||
}
|
||||
|
||||
static int opal_event_match(struct irq_domain *h, struct device_node *node,
|
||||
|
|
|
@ -66,6 +66,7 @@ config S390
|
|||
def_bool y
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
|
|
|
@ -5,18 +5,6 @@ config TRACE_IRQFLAGS_SUPPORT
|
|||
|
||||
source "lib/Kconfig.debug"
|
||||
|
||||
config STRICT_DEVMEM
|
||||
def_bool y
|
||||
prompt "Filter access to /dev/mem"
|
||||
---help---
|
||||
This option restricts access to /dev/mem. If this option is
|
||||
disabled, you allow userspace access to all memory, including
|
||||
kernel and userspace memory. Accidental memory access is likely
|
||||
to be disastrous.
|
||||
Memory access is required for experts who want to debug the kernel.
|
||||
|
||||
If you are unsure, say Y.
|
||||
|
||||
config S390_PTDUMP
|
||||
bool "Export kernel pagetable layout to userspace via debugfs"
|
||||
depends on DEBUG_KERNEL
|
||||
|
|
|
@ -278,7 +278,7 @@
|
|||
#define __NR_fsetxattr 256
|
||||
#define __NR_getxattr 257
|
||||
#define __NR_lgetxattr 258
|
||||
#define __NR_fgetxattr 269
|
||||
#define __NR_fgetxattr 259
|
||||
#define __NR_listxattr 260
|
||||
#define __NR_llistxattr 261
|
||||
#define __NR_flistxattr 262
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
*
|
||||
* ppc:
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
|
|
|
@ -19,6 +19,7 @@ config TILE
|
|||
select VIRT_TO_BUS
|
||||
select SYS_HYPERVISOR
|
||||
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select MODULES_USE_ELF_RELA
|
||||
|
@ -116,9 +117,6 @@ config ARCH_DISCONTIGMEM_DEFAULT
|
|||
config TRACE_IRQFLAGS_SUPPORT
|
||||
def_bool y
|
||||
|
||||
config STRICT_DEVMEM
|
||||
def_bool y
|
||||
|
||||
# SMP is required for Tilera Linux.
|
||||
config SMP
|
||||
def_bool y
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
*/
|
||||
|
|
|
@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
|
|||
# The wrappers will select whether using "malloc" or the kernel allocator.
|
||||
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
|
||||
|
||||
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt
|
||||
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
|
||||
|
||||
# Used by link-vmlinux.sh which has special support for um link
|
||||
export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
|
||||
|
|
|
@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
|
|||
|
||||
char *split_if_spec(char *str, ...)
|
||||
{
|
||||
char **arg, *end;
|
||||
char **arg, *end, *ret = NULL;
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, str);
|
||||
while ((arg = va_arg(ap, char **)) != NULL) {
|
||||
if (*str == '\0')
|
||||
return NULL;
|
||||
goto out;
|
||||
end = strchr(str, ',');
|
||||
if (end != str)
|
||||
*arg = str;
|
||||
if (end == NULL)
|
||||
return NULL;
|
||||
goto out;
|
||||
*end++ = '\0';
|
||||
str = end;
|
||||
}
|
||||
ret = str;
|
||||
out:
|
||||
va_end(ap);
|
||||
return str;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
|
|||
struct ksignal ksig;
|
||||
int handled_sig = 0;
|
||||
|
||||
while (get_signal(&ksig)) {
|
||||
if (get_signal(&ksig)) {
|
||||
handled_sig = 1;
|
||||
/* Whee! Actually deliver the signal. */
|
||||
handle_signal(&ksig, regs);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
config UNICORE32
|
||||
def_bool y
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select HAVE_MEMBLOCK
|
||||
|
|
|
@ -2,20 +2,6 @@ menu "Kernel hacking"
|
|||
|
||||
source "lib/Kconfig.debug"
|
||||
|
||||
config STRICT_DEVMEM
|
||||
bool "Filter access to /dev/mem"
|
||||
depends on MMU
|
||||
---help---
|
||||
If this option is disabled, you allow userspace (root) access to all
|
||||
of memory, including kernel and userspace memory. Accidental
|
||||
access to this is obviously disastrous, but specific access can
|
||||
be used by people debugging the kernel.
|
||||
|
||||
If this option is switched on, the /dev/mem file only allows
|
||||
userspace access to memory mapped peripherals.
|
||||
|
||||
If in doubt, say Y.
|
||||
|
||||
config EARLY_PRINTK
|
||||
def_bool DEBUG_OCD
|
||||
help
|
||||
|
|
|
@ -24,6 +24,7 @@ config X86
|
|||
select ARCH_DISCARD_MEMBLOCK
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FAST_MULTIPLIER
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
|
|
@ -5,23 +5,6 @@ config TRACE_IRQFLAGS_SUPPORT
|
|||
|
||||
source "lib/Kconfig.debug"
|
||||
|
||||
config STRICT_DEVMEM
|
||||
bool "Filter access to /dev/mem"
|
||||
---help---
|
||||
If this option is disabled, you allow userspace (root) access to all
|
||||
of memory, including kernel and userspace memory. Accidental
|
||||
access to this is obviously disastrous, but specific access can
|
||||
be used by people debugging the kernel. Note that with PAT support
|
||||
enabled, even in this case there are restrictions on /dev/mem
|
||||
use due to the cache aliasing requirements.
|
||||
|
||||
If this option is switched on, the /dev/mem file only allows
|
||||
userspace access to PCI space and the BIOS code and data regions.
|
||||
This is sufficient for dosemu and X and all common users of
|
||||
/dev/mem.
|
||||
|
||||
If in doubt, say Y.
|
||||
|
||||
config X86_VERBOSE_BOOTUP
|
||||
bool "Enable verbose x86 bootup info messages"
|
||||
default y
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
*
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
*
|
||||
|
@ -387,7 +387,7 @@ struct cpu_hw_events {
|
|||
/* Check flags and event code/umask, and set the HSW N/A flag */
|
||||
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
|
||||
__EVENT_CONSTRAINT(code, n, \
|
||||
INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
|
||||
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
|
||||
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
|
||||
|
||||
|
||||
|
@ -627,6 +627,7 @@ struct x86_perf_task_context {
|
|||
u64 lbr_from[MAX_LBR_ENTRIES];
|
||||
u64 lbr_to[MAX_LBR_ENTRIES];
|
||||
u64 lbr_info[MAX_LBR_ENTRIES];
|
||||
int tos;
|
||||
int lbr_callstack_users;
|
||||
int lbr_stack_state;
|
||||
};
|
||||
|
|
|
@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
|
|||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
|
||||
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
|
||||
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
||||
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
|
||||
|
|
|
@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
|
|||
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
|
||||
{
|
||||
if (event->attach_state & PERF_ATTACH_TASK)
|
||||
return perf_cgroup_from_task(event->hw.target);
|
||||
return perf_cgroup_from_task(event->hw.target, event->ctx);
|
||||
|
||||
return event->cgrp;
|
||||
}
|
||||
|
|
|
@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
|||
}
|
||||
|
||||
mask = x86_pmu.lbr_nr - 1;
|
||||
tos = intel_pmu_lbr_tos();
|
||||
tos = task_ctx->tos;
|
||||
for (i = 0; i < tos; i++) {
|
||||
lbr_idx = (tos - i) & mask;
|
||||
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
|
||||
|
@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
|||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||
}
|
||||
wrmsrl(x86_pmu.lbr_tos, tos);
|
||||
task_ctx->lbr_stack_state = LBR_NONE;
|
||||
}
|
||||
|
||||
|
@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
|
|||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||
}
|
||||
task_ctx->tos = tos;
|
||||
task_ctx->lbr_stack_state = LBR_VALID;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* x86 specific code for irq_work
|
||||
*
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
|
|
@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
|
|||
if (err)
|
||||
return 1;
|
||||
|
||||
err = convert_fxsr_from_user(&fpx, sc.fpstate);
|
||||
err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
|
||||
if (err)
|
||||
return 1;
|
||||
|
||||
|
@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
|
|||
{
|
||||
struct user_i387_struct fp;
|
||||
|
||||
err = copy_from_user(&fp, sc.fpstate,
|
||||
err = copy_from_user(&fp, (void *)sc.fpstate,
|
||||
sizeof(struct user_i387_struct));
|
||||
if (err)
|
||||
return 1;
|
||||
|
@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
|
|||
#endif
|
||||
#undef PUTREG
|
||||
sc.oldmask = mask;
|
||||
sc.fpstate = to_fp;
|
||||
sc.fpstate = (unsigned long)to_fp;
|
||||
|
||||
err = copy_to_user(to, &sc, sizeof(struct sigcontext));
|
||||
if (err)
|
||||
|
@ -468,12 +468,10 @@ long sys_sigreturn(void)
|
|||
struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
|
||||
sigset_t set;
|
||||
struct sigcontext __user *sc = &frame->sc;
|
||||
unsigned long __user *oldmask = &sc->oldmask;
|
||||
unsigned long __user *extramask = frame->extramask;
|
||||
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
|
||||
|
||||
if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
|
||||
copy_from_user(&set.sig[1], extramask, sig_size))
|
||||
if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) ||
|
||||
copy_from_user(&set.sig[1], frame->extramask, sig_size))
|
||||
goto segfault;
|
||||
|
||||
set_current_blocked(&set);
|
||||
|
@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
|
|||
{
|
||||
struct rt_sigframe __user *frame;
|
||||
int err = 0, sig = ksig->sig;
|
||||
unsigned long fp_to;
|
||||
|
||||
frame = (struct rt_sigframe __user *)
|
||||
round_down(stack_top - sizeof(struct rt_sigframe), 16);
|
||||
|
@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
|
|||
err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
|
||||
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
|
||||
set->sig[0]);
|
||||
err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
|
||||
|
||||
fp_to = (unsigned long)&frame->fpstate;
|
||||
|
||||
err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
|
||||
if (sizeof(*set) == 16) {
|
||||
err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
|
||||
err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
|
||||
|
|
|
@ -8,7 +8,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
|
|||
blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
|
||||
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
|
||||
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
|
||||
partitions/
|
||||
badblocks.o partitions/
|
||||
|
||||
obj-$(CONFIG_BOUNCE) += bounce.o
|
||||
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
|
||||
|
|
|
@ -0,0 +1,585 @@
|
|||
/*
|
||||
* Bad block management
|
||||
*
|
||||
* - Heavily based on MD badblocks code from Neil Brown
|
||||
*
|
||||
* Copyright (c) 2015, Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/badblocks.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/**
|
||||
* badblocks_check() - check a given range for bad sectors
|
||||
* @bb: the badblocks structure that holds all badblock information
|
||||
* @s: sector (start) at which to check for badblocks
|
||||
* @sectors: number of sectors to check for badblocks
|
||||
* @first_bad: pointer to store location of the first badblock
|
||||
* @bad_sectors: pointer to store number of badblocks after @first_bad
|
||||
*
|
||||
* We can record which blocks on each device are 'bad' and so just
|
||||
* fail those blocks, or that stripe, rather than the whole device.
|
||||
* Entries in the bad-block table are 64bits wide. This comprises:
|
||||
* Length of bad-range, in sectors: 0-511 for lengths 1-512
|
||||
* Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
|
||||
* A 'shift' can be set so that larger blocks are tracked and
|
||||
* consequently larger devices can be covered.
|
||||
* 'Acknowledged' flag - 1 bit. - the most significant bit.
|
||||
*
|
||||
* Locking of the bad-block table uses a seqlock so badblocks_check
|
||||
* might need to retry if it is very unlucky.
|
||||
* We will sometimes want to check for bad blocks in a bi_end_io function,
|
||||
* so we use the write_seqlock_irq variant.
|
||||
*
|
||||
* When looking for a bad block we specify a range and want to
|
||||
* know if any block in the range is bad. So we binary-search
|
||||
* to the last range that starts at-or-before the given endpoint,
|
||||
* (or "before the sector after the target range")
|
||||
* then see if it ends after the given start.
|
||||
*
|
||||
* Return:
|
||||
* 0: there are no known bad blocks in the range
|
||||
* 1: there are known bad block which are all acknowledged
|
||||
* -1: there are bad blocks which have not yet been acknowledged in metadata.
|
||||
* plus the start/length of the first bad section we overlap.
|
||||
*/
|
||||
int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
|
||||
sector_t *first_bad, int *bad_sectors)
|
||||
{
|
||||
int hi;
|
||||
int lo;
|
||||
u64 *p = bb->page;
|
||||
int rv;
|
||||
sector_t target = s + sectors;
|
||||
unsigned seq;
|
||||
|
||||
if (bb->shift > 0) {
|
||||
/* round the start down, and the end up */
|
||||
s >>= bb->shift;
|
||||
target += (1<<bb->shift) - 1;
|
||||
target >>= bb->shift;
|
||||
sectors = target - s;
|
||||
}
|
||||
/* 'target' is now the first block after the bad range */
|
||||
|
||||
retry:
|
||||
seq = read_seqbegin(&bb->lock);
|
||||
lo = 0;
|
||||
rv = 0;
|
||||
hi = bb->count;
|
||||
|
||||
/* Binary search between lo and hi for 'target'
|
||||
* i.e. for the last range that starts before 'target'
|
||||
*/
|
||||
/* INVARIANT: ranges before 'lo' and at-or-after 'hi'
|
||||
* are known not to be the last range before target.
|
||||
* VARIANT: hi-lo is the number of possible
|
||||
* ranges, and decreases until it reaches 1
|
||||
*/
|
||||
while (hi - lo > 1) {
|
||||
int mid = (lo + hi) / 2;
|
||||
sector_t a = BB_OFFSET(p[mid]);
|
||||
|
||||
if (a < target)
|
||||
/* This could still be the one, earlier ranges
|
||||
* could not.
|
||||
*/
|
||||
lo = mid;
|
||||
else
|
||||
/* This and later ranges are definitely out. */
|
||||
hi = mid;
|
||||
}
|
||||
/* 'lo' might be the last that started before target, but 'hi' isn't */
|
||||
if (hi > lo) {
|
||||
/* need to check all range that end after 's' to see if
|
||||
* any are unacknowledged.
|
||||
*/
|
||||
while (lo >= 0 &&
|
||||
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
|
||||
if (BB_OFFSET(p[lo]) < target) {
|
||||
/* starts before the end, and finishes after
|
||||
* the start, so they must overlap
|
||||
*/
|
||||
if (rv != -1 && BB_ACK(p[lo]))
|
||||
rv = 1;
|
||||
else
|
||||
rv = -1;
|
||||
*first_bad = BB_OFFSET(p[lo]);
|
||||
*bad_sectors = BB_LEN(p[lo]);
|
||||
}
|
||||
lo--;
|
||||
}
|
||||
}
|
||||
|
||||
if (read_seqretry(&bb->lock, seq))
|
||||
goto retry;
|
||||
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(badblocks_check);
|
||||
|
||||
/**
|
||||
* badblocks_set() - Add a range of bad blocks to the table.
|
||||
* @bb: the badblocks structure that holds all badblock information
|
||||
* @s: first sector to mark as bad
|
||||
* @sectors: number of sectors to mark as bad
|
||||
* @acknowledged: weather to mark the bad sectors as acknowledged
|
||||
*
|
||||
* This might extend the table, or might contract it if two adjacent ranges
|
||||
* can be merged. We binary-search to find the 'insertion' point, then
|
||||
* decide how best to handle it.
|
||||
*
|
||||
* Return:
|
||||
* 0: success
|
||||
* 1: failed to set badblocks (out of space)
|
||||
*/
|
||||
int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
|
||||
int acknowledged)
|
||||
{
|
||||
u64 *p;
|
||||
int lo, hi;
|
||||
int rv = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (bb->shift < 0)
|
||||
/* badblocks are disabled */
|
||||
return 0;
|
||||
|
||||
if (bb->shift) {
|
||||
/* round the start down, and the end up */
|
||||
sector_t next = s + sectors;
|
||||
|
||||
s >>= bb->shift;
|
||||
next += (1<<bb->shift) - 1;
|
||||
next >>= bb->shift;
|
||||
sectors = next - s;
|
||||
}
|
||||
|
||||
write_seqlock_irqsave(&bb->lock, flags);
|
||||
|
||||
p = bb->page;
|
||||
lo = 0;
|
||||
hi = bb->count;
|
||||
/* Find the last range that starts at-or-before 's' */
|
||||
while (hi - lo > 1) {
|
||||
int mid = (lo + hi) / 2;
|
||||
sector_t a = BB_OFFSET(p[mid]);
|
||||
|
||||
if (a <= s)
|
||||
lo = mid;
|
||||
else
|
||||
hi = mid;
|
||||
}
|
||||
if (hi > lo && BB_OFFSET(p[lo]) > s)
|
||||
hi = lo;
|
||||
|
||||
if (hi > lo) {
|
||||
/* we found a range that might merge with the start
|
||||
* of our new range
|
||||
*/
|
||||
sector_t a = BB_OFFSET(p[lo]);
|
||||
sector_t e = a + BB_LEN(p[lo]);
|
||||
int ack = BB_ACK(p[lo]);
|
||||
|
||||
if (e >= s) {
|
||||
/* Yes, we can merge with a previous range */
|
||||
if (s == a && s + sectors >= e)
|
||||
/* new range covers old */
|
||||
ack = acknowledged;
|
||||
else
|
||||
ack = ack && acknowledged;
|
||||
|
||||
if (e < s + sectors)
|
||||
e = s + sectors;
|
||||
if (e - a <= BB_MAX_LEN) {
|
||||
p[lo] = BB_MAKE(a, e-a, ack);
|
||||
s = e;
|
||||
} else {
|
||||
/* does not all fit in one range,
|
||||
* make p[lo] maximal
|
||||
*/
|
||||
if (BB_LEN(p[lo]) != BB_MAX_LEN)
|
||||
p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
|
||||
s = a + BB_MAX_LEN;
|
||||
}
|
||||
sectors = e - s;
|
||||
}
|
||||
}
|
||||
if (sectors && hi < bb->count) {
|
||||
/* 'hi' points to the first range that starts after 's'.
|
||||
* Maybe we can merge with the start of that range
|
||||
*/
|
||||
sector_t a = BB_OFFSET(p[hi]);
|
||||
sector_t e = a + BB_LEN(p[hi]);
|
||||
int ack = BB_ACK(p[hi]);
|
||||
|
||||
if (a <= s + sectors) {
|
||||
/* merging is possible */
|
||||
if (e <= s + sectors) {
|
||||
/* full overlap */
|
||||
e = s + sectors;
|
||||
ack = acknowledged;
|
||||
} else
|
||||
ack = ack && acknowledged;
|
||||
|
||||
a = s;
|
||||
if (e - a <= BB_MAX_LEN) {
|
||||
p[hi] = BB_MAKE(a, e-a, ack);
|
||||
s = e;
|
||||
} else {
|
||||
p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
|
||||
s = a + BB_MAX_LEN;
|
||||
}
|
||||
sectors = e - s;
|
||||
lo = hi;
|
||||
hi++;
|
||||
}
|
||||
}
|
||||
if (sectors == 0 && hi < bb->count) {
|
||||
/* we might be able to combine lo and hi */
|
||||
/* Note: 's' is at the end of 'lo' */
|
||||
sector_t a = BB_OFFSET(p[hi]);
|
||||
int lolen = BB_LEN(p[lo]);
|
||||
int hilen = BB_LEN(p[hi]);
|
||||
int newlen = lolen + hilen - (s - a);
|
||||
|
||||
if (s >= a && newlen < BB_MAX_LEN) {
|
||||
/* yes, we can combine them */
|
||||
int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
|
||||
|
||||
p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
|
||||
memmove(p + hi, p + hi + 1,
|
||||
(bb->count - hi - 1) * 8);
|
||||
bb->count--;
|
||||
}
|
||||
}
|
||||
while (sectors) {
|
||||
/* didn't merge (it all).
|
||||
* Need to add a range just before 'hi'
|
||||
*/
|
||||
if (bb->count >= MAX_BADBLOCKS) {
|
||||
/* No room for more */
|
||||
rv = 1;
|
||||
break;
|
||||
} else {
|
||||
int this_sectors = sectors;
|
||||
|
||||
memmove(p + hi + 1, p + hi,
|
||||
(bb->count - hi) * 8);
|
||||
bb->count++;
|
||||
|
||||
if (this_sectors > BB_MAX_LEN)
|
||||
this_sectors = BB_MAX_LEN;
|
||||
p[hi] = BB_MAKE(s, this_sectors, acknowledged);
|
||||
sectors -= this_sectors;
|
||||
s += this_sectors;
|
||||
}
|
||||
}
|
||||
|
||||
bb->changed = 1;
|
||||
if (!acknowledged)
|
||||
bb->unacked_exist = 1;
|
||||
write_sequnlock_irqrestore(&bb->lock, flags);
|
||||
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(badblocks_set);
|
||||
|
||||
/**
|
||||
* badblocks_clear() - Remove a range of bad blocks to the table.
|
||||
* @bb: the badblocks structure that holds all badblock information
|
||||
* @s: first sector to mark as bad
|
||||
* @sectors: number of sectors to mark as bad
|
||||
*
|
||||
* This may involve extending the table if we spilt a region,
|
||||
* but it must not fail. So if the table becomes full, we just
|
||||
* drop the remove request.
|
||||
*
|
||||
* Return:
|
||||
* 0: success
|
||||
* 1: failed to clear badblocks
|
||||
*/
|
||||
int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
|
||||
{
|
||||
u64 *p;
|
||||
int lo, hi;
|
||||
sector_t target = s + sectors;
|
||||
int rv = 0;
|
||||
|
||||
if (bb->shift > 0) {
|
||||
/* When clearing we round the start up and the end down.
|
||||
* This should not matter as the shift should align with
|
||||
* the block size and no rounding should ever be needed.
|
||||
* However it is better the think a block is bad when it
|
||||
* isn't than to think a block is not bad when it is.
|
||||
*/
|
||||
s += (1<<bb->shift) - 1;
|
||||
s >>= bb->shift;
|
||||
target >>= bb->shift;
|
||||
sectors = target - s;
|
||||
}
|
||||
|
||||
write_seqlock_irq(&bb->lock);
|
||||
|
||||
p = bb->page;
|
||||
lo = 0;
|
||||
hi = bb->count;
|
||||
/* Find the last range that starts before 'target' */
|
||||
while (hi - lo > 1) {
|
||||
int mid = (lo + hi) / 2;
|
||||
sector_t a = BB_OFFSET(p[mid]);
|
||||
|
||||
if (a < target)
|
||||
lo = mid;
|
||||
else
|
||||
hi = mid;
|
||||
}
|
||||
if (hi > lo) {
|
||||
/* p[lo] is the last range that could overlap the
|
||||
* current range. Earlier ranges could also overlap,
|
||||
* but only this one can overlap the end of the range.
|
||||
*/
|
||||
if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
|
||||
/* Partial overlap, leave the tail of this range */
|
||||
int ack = BB_ACK(p[lo]);
|
||||
sector_t a = BB_OFFSET(p[lo]);
|
||||
sector_t end = a + BB_LEN(p[lo]);
|
||||
|
||||
if (a < s) {
|
||||
/* we need to split this range */
|
||||
if (bb->count >= MAX_BADBLOCKS) {
|
||||
rv = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
|
||||
bb->count++;
|
||||
p[lo] = BB_MAKE(a, s-a, ack);
|
||||
lo++;
|
||||
}
|
||||
p[lo] = BB_MAKE(target, end - target, ack);
|
||||
/* there is no longer an overlap */
|
||||
hi = lo;
|
||||
lo--;
|
||||
}
|
||||
while (lo >= 0 &&
|
||||
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
|
||||
/* This range does overlap */
|
||||
if (BB_OFFSET(p[lo]) < s) {
|
||||
/* Keep the early parts of this range. */
|
||||
int ack = BB_ACK(p[lo]);
|
||||
sector_t start = BB_OFFSET(p[lo]);
|
||||
|
||||
p[lo] = BB_MAKE(start, s - start, ack);
|
||||
/* now low doesn't overlap, so.. */
|
||||
break;
|
||||
}
|
||||
lo--;
|
||||
}
|
||||
/* 'lo' is strictly before, 'hi' is strictly after,
|
||||
* anything between needs to be discarded
|
||||
*/
|
||||
if (hi - lo > 1) {
|
||||
memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
|
||||
bb->count -= (hi - lo - 1);
|
||||
}
|
||||
}
|
||||
|
||||
bb->changed = 1;
|
||||
out:
|
||||
write_sequnlock_irq(&bb->lock);
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(badblocks_clear);
|
||||
|
||||
/**
|
||||
* ack_all_badblocks() - Acknowledge all bad blocks in a list.
|
||||
* @bb: the badblocks structure that holds all badblock information
|
||||
*
|
||||
* This only succeeds if ->changed is clear. It is used by
|
||||
* in-kernel metadata updates
|
||||
*/
|
||||
void ack_all_badblocks(struct badblocks *bb)
|
||||
{
|
||||
if (bb->page == NULL || bb->changed)
|
||||
/* no point even trying */
|
||||
return;
|
||||
write_seqlock_irq(&bb->lock);
|
||||
|
||||
if (bb->changed == 0 && bb->unacked_exist) {
|
||||
u64 *p = bb->page;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bb->count ; i++) {
|
||||
if (!BB_ACK(p[i])) {
|
||||
sector_t start = BB_OFFSET(p[i]);
|
||||
int len = BB_LEN(p[i]);
|
||||
|
||||
p[i] = BB_MAKE(start, len, 1);
|
||||
}
|
||||
}
|
||||
bb->unacked_exist = 0;
|
||||
}
|
||||
write_sequnlock_irq(&bb->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ack_all_badblocks);
|
||||
|
||||
/**
|
||||
* badblocks_show() - sysfs access to bad-blocks list
|
||||
* @bb: the badblocks structure that holds all badblock information
|
||||
* @page: buffer received from sysfs
|
||||
* @unack: weather to show unacknowledged badblocks
|
||||
*
|
||||
* Return:
|
||||
* Length of returned data
|
||||
*/
|
||||
ssize_t badblocks_show(struct badblocks *bb, char *page, int unack)
|
||||
{
|
||||
size_t len;
|
||||
int i;
|
||||
u64 *p = bb->page;
|
||||
unsigned seq;
|
||||
|
||||
if (bb->shift < 0)
|
||||
return 0;
|
||||
|
||||
retry:
|
||||
seq = read_seqbegin(&bb->lock);
|
||||
|
||||
len = 0;
|
||||
i = 0;
|
||||
|
||||
while (len < PAGE_SIZE && i < bb->count) {
|
||||
sector_t s = BB_OFFSET(p[i]);
|
||||
unsigned int length = BB_LEN(p[i]);
|
||||
int ack = BB_ACK(p[i]);
|
||||
|
||||
i++;
|
||||
|
||||
if (unack && ack)
|
||||
continue;
|
||||
|
||||
len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
|
||||
(unsigned long long)s << bb->shift,
|
||||
length << bb->shift);
|
||||
}
|
||||
if (unack && len == 0)
|
||||
bb->unacked_exist = 0;
|
||||
|
||||
if (read_seqretry(&bb->lock, seq))
|
||||
goto retry;
|
||||
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(badblocks_show);
|
||||
|
||||
/**
|
||||
* badblocks_store() - sysfs access to bad-blocks list
|
||||
* @bb: the badblocks structure that holds all badblock information
|
||||
* @page: buffer received from sysfs
|
||||
* @len: length of data received from sysfs
|
||||
* @unack: weather to show unacknowledged badblocks
|
||||
*
|
||||
* Return:
|
||||
* Length of the buffer processed or -ve error.
|
||||
*/
|
||||
ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
|
||||
int unack)
|
||||
{
|
||||
unsigned long long sector;
|
||||
int length;
|
||||
char newline;
|
||||
|
||||
switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) {
|
||||
case 3:
|
||||
if (newline != '\n')
|
||||
return -EINVAL;
|
||||
case 2:
|
||||
if (length <= 0)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (badblocks_set(bb, sector, length, !unack))
|
||||
return -ENOSPC;
|
||||
else
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(badblocks_store);
|
||||
|
||||
static int __badblocks_init(struct device *dev, struct badblocks *bb,
|
||||
int enable)
|
||||
{
|
||||
bb->dev = dev;
|
||||
bb->count = 0;
|
||||
if (enable)
|
||||
bb->shift = 0;
|
||||
else
|
||||
bb->shift = -1;
|
||||
if (dev)
|
||||
bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
|
||||
else
|
||||
bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!bb->page) {
|
||||
bb->shift = -1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
seqlock_init(&bb->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* badblocks_init() - initialize the badblocks structure
|
||||
* @bb: the badblocks structure that holds all badblock information
|
||||
* @enable: weather to enable badblocks accounting
|
||||
*
|
||||
* Return:
|
||||
* 0: success
|
||||
* -ve errno: on error
|
||||
*/
|
||||
int badblocks_init(struct badblocks *bb, int enable)
|
||||
{
|
||||
return __badblocks_init(NULL, bb, enable);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(badblocks_init);
|
||||
|
||||
int devm_init_badblocks(struct device *dev, struct badblocks *bb)
|
||||
{
|
||||
if (!bb)
|
||||
return -EINVAL;
|
||||
return __badblocks_init(dev, bb, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_init_badblocks);
|
||||
|
||||
/**
|
||||
* badblocks_exit() - free the badblocks structure
|
||||
* @bb: the badblocks structure that holds all badblock information
|
||||
*/
|
||||
void badblocks_exit(struct badblocks *bb)
|
||||
{
|
||||
if (!bb)
|
||||
return;
|
||||
if (bb->dev)
|
||||
devm_kfree(bb->dev, bb->page);
|
||||
else
|
||||
kfree(bb->page);
|
||||
bb->page = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(badblocks_exit);
|
|
@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
|
|||
* of the main cic data structures. For now we allow a task to change
|
||||
* its cgroup only if it's the only owner of its ioc.
|
||||
*/
|
||||
static int blkcg_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
static int blkcg_can_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct cgroup_subsys_state *dst_css;
|
||||
struct io_context *ioc;
|
||||
int ret = 0;
|
||||
|
||||
/* task_lock() is needed to avoid races with exit_io_context() */
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
cgroup_taskset_for_each(task, dst_css, tset) {
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
|
||||
|
|
|
@ -3405,6 +3405,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!q->dev)
|
||||
return ret;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (q->nr_pending) {
|
||||
ret = -EBUSY;
|
||||
|
@ -3432,6 +3435,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
|
|||
*/
|
||||
void blk_post_runtime_suspend(struct request_queue *q, int err)
|
||||
{
|
||||
if (!q->dev)
|
||||
return;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (!err) {
|
||||
q->rpm_status = RPM_SUSPENDED;
|
||||
|
@ -3456,6 +3462,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
|
|||
*/
|
||||
void blk_pre_runtime_resume(struct request_queue *q)
|
||||
{
|
||||
if (!q->dev)
|
||||
return;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
q->rpm_status = RPM_RESUMING;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
@ -3478,6 +3487,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
|
|||
*/
|
||||
void blk_post_runtime_resume(struct request_queue *q, int err)
|
||||
{
|
||||
if (!q->dev)
|
||||
return;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (!err) {
|
||||
q->rpm_status = RPM_ACTIVE;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/idr.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/badblocks.h>
|
||||
|
||||
#include "blk.h"
|
||||
|
||||
|
@ -664,7 +665,6 @@ void del_gendisk(struct gendisk *disk)
|
|||
|
||||
kobject_put(disk->part0.holder_dir);
|
||||
kobject_put(disk->slave_dir);
|
||||
disk->driverfs_dev = NULL;
|
||||
if (!sysfs_deprecated)
|
||||
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
|
||||
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
|
||||
|
@ -672,6 +672,31 @@ void del_gendisk(struct gendisk *disk)
|
|||
}
|
||||
EXPORT_SYMBOL(del_gendisk);
|
||||
|
||||
/* sysfs access to bad-blocks list. */
|
||||
static ssize_t disk_badblocks_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
|
||||
if (!disk->bb)
|
||||
return sprintf(page, "\n");
|
||||
|
||||
return badblocks_show(disk->bb, page, 0);
|
||||
}
|
||||
|
||||
static ssize_t disk_badblocks_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *page, size_t len)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
|
||||
if (!disk->bb)
|
||||
return -ENXIO;
|
||||
|
||||
return badblocks_store(disk->bb, page, len, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* get_gendisk - get partitioning information for a given device
|
||||
* @devt: device to get partitioning information for
|
||||
|
@ -990,6 +1015,8 @@ static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show,
|
|||
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
|
||||
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
|
||||
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
|
||||
static DEVICE_ATTR(badblocks, S_IRUGO | S_IWUSR, disk_badblocks_show,
|
||||
disk_badblocks_store);
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
static struct device_attribute dev_attr_fail =
|
||||
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
|
||||
|
@ -1011,6 +1038,7 @@ static struct attribute *disk_attrs[] = {
|
|||
&dev_attr_capability.attr,
|
||||
&dev_attr_stat.attr,
|
||||
&dev_attr_inflight.attr,
|
||||
&dev_attr_badblocks.attr,
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
&dev_attr_fail.attr,
|
||||
#endif
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/badblocks.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
|
@ -406,6 +407,71 @@ static inline int is_unrecognized_ioctl(int ret)
|
|||
ret == -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FS_DAX
|
||||
bool blkdev_dax_capable(struct block_device *bdev)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
|
||||
if (!disk->fops->direct_access)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the partition is not aligned on a page boundary, we can't
|
||||
* do dax I/O to it.
|
||||
*/
|
||||
if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512))
|
||||
|| (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the device has known bad blocks, force all I/O through the
|
||||
* driver / page cache.
|
||||
*
|
||||
* TODO: support finer grained dax error handling
|
||||
*/
|
||||
if (disk->bb && disk->bb->count)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int blkdev_daxset(struct block_device *bdev, unsigned long argp)
|
||||
{
|
||||
unsigned long arg;
|
||||
int rc = 0;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
if (get_user(arg, (int __user *)(argp)))
|
||||
return -EFAULT;
|
||||
arg = !!arg;
|
||||
if (arg == !!(bdev->bd_inode->i_flags & S_DAX))
|
||||
return 0;
|
||||
|
||||
if (arg)
|
||||
arg = S_DAX;
|
||||
|
||||
if (arg && !blkdev_dax_capable(bdev))
|
||||
return -ENOTTY;
|
||||
|
||||
mutex_lock(&bdev->bd_inode->i_mutex);
|
||||
if (bdev->bd_map_count == 0)
|
||||
inode_set_flags(bdev->bd_inode, arg, S_DAX);
|
||||
else
|
||||
rc = -EBUSY;
|
||||
mutex_unlock(&bdev->bd_inode->i_mutex);
|
||||
return rc;
|
||||
}
|
||||
#else
|
||||
static int blkdev_daxset(struct block_device *bdev, int arg)
|
||||
{
|
||||
if (arg)
|
||||
return -ENOTTY;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long arg)
|
||||
{
|
||||
|
@ -568,6 +634,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
case BLKTRACESETUP:
|
||||
case BLKTRACETEARDOWN:
|
||||
return blk_trace_ioctl(bdev, cmd, argp);
|
||||
case BLKDAXSET:
|
||||
return blkdev_daxset(bdev, arg);
|
||||
case BLKDAXGET:
|
||||
return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX));
|
||||
break;
|
||||
case IOC_PR_REGISTER:
|
||||
return blkdev_pr_register(bdev, argp);
|
||||
case IOC_PR_RESERVE:
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/ndctl.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/sort.h>
|
||||
|
@ -1473,6 +1474,201 @@ static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
|
|||
/* devm will free nfit_blk */
|
||||
}
|
||||
|
||||
static int ars_get_cap(struct nvdimm_bus_descriptor *nd_desc,
|
||||
struct nd_cmd_ars_cap *cmd, u64 addr, u64 length)
|
||||
{
|
||||
cmd->address = addr;
|
||||
cmd->length = length;
|
||||
|
||||
return nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
|
||||
sizeof(*cmd));
|
||||
}
|
||||
|
||||
static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
|
||||
struct nd_cmd_ars_start *cmd, u64 addr, u64 length)
|
||||
{
|
||||
int rc;
|
||||
|
||||
cmd->address = addr;
|
||||
cmd->length = length;
|
||||
cmd->type = ND_ARS_PERSISTENT;
|
||||
|
||||
while (1) {
|
||||
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, cmd,
|
||||
sizeof(*cmd));
|
||||
if (rc)
|
||||
return rc;
|
||||
switch (cmd->status) {
|
||||
case 0:
|
||||
return 0;
|
||||
case 1:
|
||||
/* ARS unsupported, but we should never get here */
|
||||
return 0;
|
||||
case 2:
|
||||
return -EINVAL;
|
||||
case 3:
|
||||
/* ARS is in progress */
|
||||
msleep(1000);
|
||||
break;
|
||||
default:
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
|
||||
struct nd_cmd_ars_status *cmd)
|
||||
{
|
||||
int rc;
|
||||
|
||||
while (1) {
|
||||
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
|
||||
sizeof(*cmd));
|
||||
if (rc || cmd->status & 0xffff)
|
||||
return -ENXIO;
|
||||
|
||||
/* Check extended status (Upper two bytes) */
|
||||
switch (cmd->status >> 16) {
|
||||
case 0:
|
||||
return 0;
|
||||
case 1:
|
||||
/* ARS is in progress */
|
||||
msleep(1000);
|
||||
break;
|
||||
case 2:
|
||||
/* No ARS performed for the current boot */
|
||||
return 0;
|
||||
default:
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
|
||||
struct nd_cmd_ars_status *ars_status, u64 start)
|
||||
{
|
||||
int rc;
|
||||
u32 i;
|
||||
|
||||
/*
|
||||
* The address field returned by ars_status should be either
|
||||
* less than or equal to the address we last started ARS for.
|
||||
* The (start, length) returned by ars_status should also have
|
||||
* non-zero overlap with the range we started ARS for.
|
||||
* If this is not the case, bail.
|
||||
*/
|
||||
if (ars_status->address > start ||
|
||||
(ars_status->address + ars_status->length < start))
|
||||
return -ENXIO;
|
||||
|
||||
for (i = 0; i < ars_status->num_records; i++) {
|
||||
rc = nvdimm_bus_add_poison(nvdimm_bus,
|
||||
ars_status->records[i].err_address,
|
||||
ars_status->records[i].length);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
|
||||
struct nd_region_desc *ndr_desc)
|
||||
{
|
||||
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
||||
struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
|
||||
struct nd_cmd_ars_status *ars_status = NULL;
|
||||
struct nd_cmd_ars_start *ars_start = NULL;
|
||||
struct nd_cmd_ars_cap *ars_cap = NULL;
|
||||
u64 start, len, cur, remaining;
|
||||
int rc;
|
||||
|
||||
ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
|
||||
if (!ars_cap)
|
||||
return -ENOMEM;
|
||||
|
||||
start = ndr_desc->res->start;
|
||||
len = ndr_desc->res->end - ndr_desc->res->start + 1;
|
||||
|
||||
rc = ars_get_cap(nd_desc, ars_cap, start, len);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
|
||||
* extended status is not set, skip this but continue initialization
|
||||
*/
|
||||
if ((ars_cap->status & 0xffff) ||
|
||||
!(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
|
||||
dev_warn(acpi_desc->dev,
|
||||
"ARS unsupported (status: 0x%x), won't create an error list\n",
|
||||
ars_cap->status);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if a full-range ARS has been run. If so, use those results
|
||||
* without having to start a new ARS.
|
||||
*/
|
||||
ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status),
|
||||
GFP_KERNEL);
|
||||
if (!ars_status) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = ars_get_status(nd_desc, ars_status);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (ars_status->address <= start &&
|
||||
(ars_status->address + ars_status->length >= start + len)) {
|
||||
rc = ars_status_process_records(nvdimm_bus, ars_status, start);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* ARS_STATUS can overflow if the number of poison entries found is
|
||||
* greater than the maximum buffer size (ars_cap->max_ars_out)
|
||||
* To detect overflow, check if the length field of ars_status
|
||||
* is less than the length we supplied. If so, process the
|
||||
* error entries we got, adjust the start point, and start again
|
||||
*/
|
||||
ars_start = kzalloc(sizeof(*ars_start), GFP_KERNEL);
|
||||
if (!ars_start)
|
||||
return -ENOMEM;
|
||||
|
||||
cur = start;
|
||||
remaining = len;
|
||||
do {
|
||||
u64 done, end;
|
||||
|
||||
rc = ars_do_start(nd_desc, ars_start, cur, remaining);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = ars_get_status(nd_desc, ars_status);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = ars_status_process_records(nvdimm_bus, ars_status, cur);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
end = min(cur + remaining,
|
||||
ars_status->address + ars_status->length);
|
||||
done = end - cur;
|
||||
cur += done;
|
||||
remaining -= done;
|
||||
} while (remaining);
|
||||
|
||||
out:
|
||||
kfree(ars_cap);
|
||||
kfree(ars_start);
|
||||
kfree(ars_status);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
|
||||
struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
|
||||
struct acpi_nfit_memory_map *memdev,
|
||||
|
@ -1585,6 +1781,13 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
|
|||
|
||||
nvdimm_bus = acpi_desc->nvdimm_bus;
|
||||
if (nfit_spa_type(spa) == NFIT_SPA_PM) {
|
||||
rc = acpi_nfit_find_poison(acpi_desc, ndr_desc);
|
||||
if (rc) {
|
||||
dev_err(acpi_desc->dev,
|
||||
"error while performing ARS to find poison: %d\n",
|
||||
rc);
|
||||
return rc;
|
||||
}
|
||||
if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc))
|
||||
return -ENOMEM;
|
||||
} else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
|
||||
|
|
|
@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
|
||||
|
@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
|
||||
|
||||
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
|
||||
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
|
|
|
@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
|
|||
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
|
||||
{
|
||||
return ahci_platform_suspend_host(&pdev->dev);
|
||||
|
@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
|
|||
|
||||
return ahci_platform_resume_host(&pdev->dev);
|
||||
}
|
||||
#else
|
||||
#define ahci_mvebu_suspend NULL
|
||||
#define ahci_mvebu_resume NULL
|
||||
#endif
|
||||
|
||||
static const struct ata_port_info ahci_mvebu_port_info = {
|
||||
.flags = AHCI_FLAG_COMMON,
|
||||
|
|
|
@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
|
|||
ata_tf_to_fis(tf, pmp, is_cmd, fis);
|
||||
ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
|
||||
|
||||
/* set port value for softreset of Port Multiplier */
|
||||
if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
|
||||
tmp = readl(port_mmio + PORT_FBS);
|
||||
tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
|
||||
tmp |= pmp << PORT_FBS_DEV_OFFSET;
|
||||
writel(tmp, port_mmio + PORT_FBS);
|
||||
pp->fbs_last_dev = pmp;
|
||||
}
|
||||
|
||||
/* issue & wait */
|
||||
writel(1, port_mmio + PORT_CMD_ISSUE);
|
||||
|
||||
|
|
|
@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
|
|||
unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
|
||||
u8 page, void *buf, unsigned int sectors)
|
||||
{
|
||||
unsigned long ap_flags = dev->link->ap->flags;
|
||||
struct ata_taskfile tf;
|
||||
unsigned int err_mask;
|
||||
bool dma = false;
|
||||
|
||||
DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
|
||||
|
||||
/*
|
||||
* Return error without actually issuing the command on controllers
|
||||
* which e.g. lockup on a read log page.
|
||||
*/
|
||||
if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
|
||||
return AC_ERR_DEV;
|
||||
|
||||
retry:
|
||||
ata_tf_init(dev, &tf);
|
||||
if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
|
||||
|
|
|
@ -45,7 +45,8 @@ enum {
|
|||
SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
|
||||
|
||||
SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
|
||||
ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
|
||||
ATA_FLAG_PMP | ATA_FLAG_NCQ |
|
||||
ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
|
||||
|
||||
SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
|
||||
SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
|
||||
|
|
|
@ -630,6 +630,9 @@ static void sil_dev_config(struct ata_device *dev)
|
|||
unsigned int n, quirks = 0;
|
||||
unsigned char model_num[ATA_ID_PROD_LEN + 1];
|
||||
|
||||
/* This controller doesn't support trim */
|
||||
dev->horkage |= ATA_HORKAGE_NOTRIM;
|
||||
|
||||
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
|
||||
|
||||
for (n = 0; sil_blacklist[n].product; n++)
|
||||
|
|
|
@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
|
|||
if (mem->state == MEM_OFFLINE)
|
||||
return 0;
|
||||
|
||||
/* Can't offline block with non-present sections */
|
||||
if (mem->section_count != sections_per_block)
|
||||
return -EINVAL;
|
||||
|
||||
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
|
||||
}
|
||||
|
||||
|
|
|
@ -444,8 +444,9 @@ static void null_lnvm_end_io(struct request *rq, int error)
|
|||
blk_put_request(rq);
|
||||
}
|
||||
|
||||
static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
|
||||
static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
struct request_queue *q = dev->q;
|
||||
struct request *rq;
|
||||
struct bio *bio = rqd->bio;
|
||||
|
||||
|
@ -470,7 +471,7 @@ static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
|
||||
static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
|
||||
{
|
||||
sector_t size = gb * 1024 * 1024 * 1024ULL;
|
||||
sector_t blksize;
|
||||
|
@ -523,7 +524,7 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
|
||||
static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
|
||||
{
|
||||
mempool_t *virtmem_pool;
|
||||
|
||||
|
@ -541,7 +542,7 @@ static void null_lnvm_destroy_dma_pool(void *pool)
|
|||
mempool_destroy(pool);
|
||||
}
|
||||
|
||||
static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
|
||||
static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
|
||||
gfp_t mem_flags, dma_addr_t *dma_handler)
|
||||
{
|
||||
return mempool_alloc(pool, mem_flags);
|
||||
|
@ -765,7 +766,9 @@ out:
|
|||
|
||||
static int __init null_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int i;
|
||||
struct nullb *nullb;
|
||||
|
||||
if (bs > PAGE_SIZE) {
|
||||
pr_warn("null_blk: invalid block size\n");
|
||||
|
@ -807,22 +810,29 @@ static int __init null_init(void)
|
|||
0, 0, NULL);
|
||||
if (!ppa_cache) {
|
||||
pr_err("null_blk: unable to create ppa cache\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_devices; i++) {
|
||||
if (null_add_dev()) {
|
||||
unregister_blkdev(null_major, "nullb");
|
||||
ret = -ENOMEM;
|
||||
goto err_ppa;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_devices; i++) {
|
||||
ret = null_add_dev();
|
||||
if (ret)
|
||||
goto err_dev;
|
||||
}
|
||||
|
||||
pr_info("null: module loaded\n");
|
||||
return 0;
|
||||
err_ppa:
|
||||
|
||||
err_dev:
|
||||
while (!list_empty(&nullb_list)) {
|
||||
nullb = list_entry(nullb_list.next, struct nullb, list);
|
||||
null_del_dev(nullb);
|
||||
}
|
||||
kmem_cache_destroy(ppa_cache);
|
||||
return -EINVAL;
|
||||
err_ppa:
|
||||
unregister_blkdev(null_major, "nullb");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit null_exit(void)
|
||||
|
|
|
@ -1230,14 +1230,14 @@ static int smi_start_processing(void *send_info,
|
|||
|
||||
new_smi->intf = intf;
|
||||
|
||||
/* Try to claim any interrupts. */
|
||||
if (new_smi->irq_setup)
|
||||
new_smi->irq_setup(new_smi);
|
||||
|
||||
/* Set up the timer that drives the interface. */
|
||||
setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
|
||||
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
|
||||
|
||||
/* Try to claim any interrupts. */
|
||||
if (new_smi->irq_setup)
|
||||
new_smi->irq_setup(new_smi);
|
||||
|
||||
/*
|
||||
* Check if the user forcefully enabled the daemon.
|
||||
*/
|
||||
|
|
|
@ -209,6 +209,8 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
|
|||
|
||||
struct clk_gpio_delayed_register_data {
|
||||
const char *gpio_name;
|
||||
int num_parents;
|
||||
const char **parent_names;
|
||||
struct device_node *node;
|
||||
struct mutex lock;
|
||||
struct clk *clk;
|
||||
|
@ -222,8 +224,6 @@ static struct clk *of_clk_gpio_delayed_register_get(
|
|||
{
|
||||
struct clk_gpio_delayed_register_data *data = _data;
|
||||
struct clk *clk;
|
||||
const char **parent_names;
|
||||
int i, num_parents;
|
||||
int gpio;
|
||||
enum of_gpio_flags of_flags;
|
||||
|
||||
|
@ -248,26 +248,14 @@ static struct clk *of_clk_gpio_delayed_register_get(
|
|||
return ERR_PTR(gpio);
|
||||
}
|
||||
|
||||
num_parents = of_clk_get_parent_count(data->node);
|
||||
|
||||
parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
|
||||
if (!parent_names) {
|
||||
clk = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_parents; i++)
|
||||
parent_names[i] = of_clk_get_parent_name(data->node, i);
|
||||
|
||||
clk = data->clk_register_get(data->node->name, parent_names,
|
||||
num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
|
||||
clk = data->clk_register_get(data->node->name, data->parent_names,
|
||||
data->num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
|
||||
if (IS_ERR(clk))
|
||||
goto out;
|
||||
|
||||
data->clk = clk;
|
||||
out:
|
||||
mutex_unlock(&data->lock);
|
||||
kfree(parent_names);
|
||||
|
||||
return clk;
|
||||
}
|
||||
|
@ -296,11 +284,24 @@ static void __init of_gpio_clk_setup(struct device_node *node,
|
|||
unsigned gpio, bool active_low))
|
||||
{
|
||||
struct clk_gpio_delayed_register_data *data;
|
||||
const char **parent_names;
|
||||
int i, num_parents;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
num_parents = of_clk_get_parent_count(node);
|
||||
|
||||
parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
|
||||
if (!parent_names)
|
||||
return;
|
||||
|
||||
for (i = 0; i < num_parents; i++)
|
||||
parent_names[i] = of_clk_get_parent_name(node, i);
|
||||
|
||||
data->num_parents = num_parents;
|
||||
data->parent_names = parent_names;
|
||||
data->node = node;
|
||||
data->gpio_name = gpio_name;
|
||||
data->clk_register_get = clk_register_get;
|
||||
|
|
|
@ -778,8 +778,10 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
|
|||
*/
|
||||
clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
|
||||
div = get_pll_div(cg, hwc, clksel);
|
||||
if (!div)
|
||||
if (!div) {
|
||||
kfree(hwc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pct80_rate = clk_get_rate(div->clk);
|
||||
pct80_rate *= 8;
|
||||
|
|
|
@ -292,6 +292,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
|
|||
ret = scpi_clk_add(dev, child, match);
|
||||
if (ret) {
|
||||
scpi_clocks_remove(pdev);
|
||||
of_node_put(child);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
|
|||
unsigned long parent_rate)
|
||||
{
|
||||
struct clk_pllv1 *pll = to_clk_pllv1(hw);
|
||||
long long ll;
|
||||
unsigned long long ull;
|
||||
int mfn_abs;
|
||||
unsigned int mfi, mfn, mfd, pd;
|
||||
u32 reg;
|
||||
|
@ -94,16 +94,16 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
|
|||
rate = parent_rate * 2;
|
||||
rate /= pd + 1;
|
||||
|
||||
ll = (unsigned long long)rate * mfn_abs;
|
||||
ull = (unsigned long long)rate * mfn_abs;
|
||||
|
||||
do_div(ll, mfd + 1);
|
||||
do_div(ull, mfd + 1);
|
||||
|
||||
if (mfn_is_negative(pll, mfn))
|
||||
ll = -ll;
|
||||
ull = (rate * mfi) - ull;
|
||||
else
|
||||
ull = (rate * mfi) + ull;
|
||||
|
||||
ll = (rate * mfi) + ll;
|
||||
|
||||
return ll;
|
||||
return ull;
|
||||
}
|
||||
|
||||
static struct clk_ops clk_pllv1_ops = {
|
||||
|
|
|
@ -79,7 +79,7 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
|
|||
{
|
||||
long mfi, mfn, mfd, pdf, ref_clk;
|
||||
unsigned long dbl;
|
||||
s64 temp;
|
||||
u64 temp;
|
||||
|
||||
dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
|
||||
|
||||
|
@ -98,7 +98,8 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
|
|||
temp = (u64) ref_clk * abs(mfn);
|
||||
do_div(temp, mfd + 1);
|
||||
if (mfn < 0)
|
||||
temp = -temp;
|
||||
temp = (ref_clk * mfi) - temp;
|
||||
else
|
||||
temp = (ref_clk * mfi) + temp;
|
||||
|
||||
return temp;
|
||||
|
@ -126,7 +127,7 @@ static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
|
|||
{
|
||||
u32 reg;
|
||||
long mfi, pdf, mfn, mfd = 999999;
|
||||
s64 temp64;
|
||||
u64 temp64;
|
||||
unsigned long quad_parent_rate;
|
||||
|
||||
quad_parent_rate = 4 * parent_rate;
|
||||
|
|
|
@ -335,22 +335,22 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
|
|||
clk[VF610_CLK_SAI0_SEL] = imx_clk_mux("sai0_sel", CCM_CSCMR1, 0, 2, sai_sels, 4);
|
||||
clk[VF610_CLK_SAI0_EN] = imx_clk_gate("sai0_en", "sai0_sel", CCM_CSCDR1, 16);
|
||||
clk[VF610_CLK_SAI0_DIV] = imx_clk_divider("sai0_div", "sai0_en", CCM_CSCDR1, 0, 4);
|
||||
clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "sai0_div", CCM_CCGR0, CCM_CCGRx_CGn(15));
|
||||
clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(15));
|
||||
|
||||
clk[VF610_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", CCM_CSCMR1, 2, 2, sai_sels, 4);
|
||||
clk[VF610_CLK_SAI1_EN] = imx_clk_gate("sai1_en", "sai1_sel", CCM_CSCDR1, 17);
|
||||
clk[VF610_CLK_SAI1_DIV] = imx_clk_divider("sai1_div", "sai1_en", CCM_CSCDR1, 4, 4);
|
||||
clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "sai1_div", CCM_CCGR1, CCM_CCGRx_CGn(0));
|
||||
clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(0));
|
||||
|
||||
clk[VF610_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", CCM_CSCMR1, 4, 2, sai_sels, 4);
|
||||
clk[VF610_CLK_SAI2_EN] = imx_clk_gate("sai2_en", "sai2_sel", CCM_CSCDR1, 18);
|
||||
clk[VF610_CLK_SAI2_DIV] = imx_clk_divider("sai2_div", "sai2_en", CCM_CSCDR1, 8, 4);
|
||||
clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "sai2_div", CCM_CCGR1, CCM_CCGRx_CGn(1));
|
||||
clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(1));
|
||||
|
||||
clk[VF610_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", CCM_CSCMR1, 6, 2, sai_sels, 4);
|
||||
clk[VF610_CLK_SAI3_EN] = imx_clk_gate("sai3_en", "sai3_sel", CCM_CSCDR1, 19);
|
||||
clk[VF610_CLK_SAI3_DIV] = imx_clk_divider("sai3_div", "sai3_en", CCM_CSCDR1, 12, 4);
|
||||
clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "sai3_div", CCM_CCGR1, CCM_CCGRx_CGn(2));
|
||||
clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(2));
|
||||
|
||||
clk[VF610_CLK_NFC_SEL] = imx_clk_mux("nfc_sel", CCM_CSCMR1, 12, 2, nfc_sels, 4);
|
||||
clk[VF610_CLK_NFC_EN] = imx_clk_gate("nfc_en", "nfc_sel", CCM_CSCDR2, 9);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
|
|
@ -41,15 +41,10 @@
|
|||
|
||||
#define SUN4I_PLL2_OUTPUTS 4
|
||||
|
||||
struct sun4i_pll2_data {
|
||||
u32 post_div_offset;
|
||||
u32 pre_div_flags;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(sun4i_a10_pll2_lock);
|
||||
|
||||
static void __init sun4i_pll2_setup(struct device_node *node,
|
||||
struct sun4i_pll2_data *data)
|
||||
int post_div_offset)
|
||||
{
|
||||
const char *clk_name = node->name, *parent;
|
||||
struct clk **clks, *base_clk, *prediv_clk;
|
||||
|
@ -76,7 +71,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
|
|||
parent, 0, reg,
|
||||
SUN4I_PLL2_PRE_DIV_SHIFT,
|
||||
SUN4I_PLL2_PRE_DIV_WIDTH,
|
||||
data->pre_div_flags,
|
||||
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
|
||||
&sun4i_a10_pll2_lock);
|
||||
if (!prediv_clk) {
|
||||
pr_err("Couldn't register the prediv clock\n");
|
||||
|
@ -127,7 +122,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
|
|||
*/
|
||||
val = readl(reg);
|
||||
val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT);
|
||||
val |= (SUN4I_PLL2_POST_DIV_VALUE - data->post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
|
||||
val |= (SUN4I_PLL2_POST_DIV_VALUE - post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
|
||||
writel(val, reg);
|
||||
|
||||
of_property_read_string_index(node, "clock-output-names",
|
||||
|
@ -191,25 +186,17 @@ err_unmap:
|
|||
iounmap(reg);
|
||||
}
|
||||
|
||||
static struct sun4i_pll2_data sun4i_a10_pll2_data = {
|
||||
.pre_div_flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
|
||||
};
|
||||
|
||||
static void __init sun4i_a10_pll2_setup(struct device_node *node)
|
||||
{
|
||||
sun4i_pll2_setup(node, &sun4i_a10_pll2_data);
|
||||
sun4i_pll2_setup(node, 0);
|
||||
}
|
||||
|
||||
CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk",
|
||||
sun4i_a10_pll2_setup);
|
||||
|
||||
static struct sun4i_pll2_data sun5i_a13_pll2_data = {
|
||||
.post_div_offset = 1,
|
||||
};
|
||||
|
||||
static void __init sun5i_a13_pll2_setup(struct device_node *node)
|
||||
{
|
||||
sun4i_pll2_setup(node, &sun5i_a13_pll2_data);
|
||||
sun4i_pll2_setup(node, 1);
|
||||
}
|
||||
|
||||
CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk",
|
||||
|
|
|
@ -20,6 +20,8 @@ static struct ti_dt_clk dm816x_clks[] = {
|
|||
DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
|
||||
DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
|
||||
DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
|
||||
DT_CLK(NULL, "timer_32k_ck", "sysclk18_ck"),
|
||||
DT_CLK(NULL, "timer_ext_ck", "tclkin_ck"),
|
||||
DT_CLK(NULL, "mpu_ck", "mpu_ck"),
|
||||
DT_CLK(NULL, "timer1_fck", "timer1_fck"),
|
||||
DT_CLK(NULL, "timer2_fck", "timer2_fck"),
|
||||
|
|
|
@ -240,7 +240,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw)
|
|||
*/
|
||||
unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
|
||||
{
|
||||
long long dpll_clk;
|
||||
u64 dpll_clk;
|
||||
u32 dpll_mult, dpll_div, v;
|
||||
struct dpll_data *dd;
|
||||
|
||||
|
@ -262,7 +262,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
|
|||
dpll_div = v & dd->div1_mask;
|
||||
dpll_div >>= __ffs(dd->div1_mask);
|
||||
|
||||
dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult;
|
||||
dpll_clk = (u64)clk_get_rate(dd->clk_ref) * dpll_mult;
|
||||
do_div(dpll_clk, dpll_div + 1);
|
||||
|
||||
return dpll_clk;
|
||||
|
|
|
@ -214,7 +214,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
{
|
||||
struct clk_divider *divider;
|
||||
unsigned int div, value;
|
||||
unsigned long flags = 0;
|
||||
u32 val;
|
||||
|
||||
if (!hw || !rate)
|
||||
|
@ -228,9 +227,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
if (value > div_mask(divider))
|
||||
value = div_mask(divider);
|
||||
|
||||
if (divider->lock)
|
||||
spin_lock_irqsave(divider->lock, flags);
|
||||
|
||||
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
|
||||
val = div_mask(divider) << (divider->shift + 16);
|
||||
} else {
|
||||
|
@ -240,9 +236,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
val |= value << divider->shift;
|
||||
ti_clk_ll_ops->clk_writel(val, divider->reg);
|
||||
|
||||
if (divider->lock)
|
||||
spin_unlock_irqrestore(divider->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -256,8 +249,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
|
|||
const char *parent_name,
|
||||
unsigned long flags, void __iomem *reg,
|
||||
u8 shift, u8 width, u8 clk_divider_flags,
|
||||
const struct clk_div_table *table,
|
||||
spinlock_t *lock)
|
||||
const struct clk_div_table *table)
|
||||
{
|
||||
struct clk_divider *div;
|
||||
struct clk *clk;
|
||||
|
@ -288,7 +280,6 @@ static struct clk *_register_divider(struct device *dev, const char *name,
|
|||
div->shift = shift;
|
||||
div->width = width;
|
||||
div->flags = clk_divider_flags;
|
||||
div->lock = lock;
|
||||
div->hw.init = &init;
|
||||
div->table = table;
|
||||
|
||||
|
@ -421,7 +412,7 @@ struct clk *ti_clk_register_divider(struct ti_clk *setup)
|
|||
|
||||
clk = _register_divider(NULL, setup->name, div->parent,
|
||||
flags, (void __iomem *)reg, div->bit_shift,
|
||||
width, div_flags, table, NULL);
|
||||
width, div_flags, table);
|
||||
|
||||
if (IS_ERR(clk))
|
||||
kfree(table);
|
||||
|
@ -584,8 +575,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
|
|||
goto cleanup;
|
||||
|
||||
clk = _register_divider(NULL, node->name, parent_name, flags, reg,
|
||||
shift, width, clk_divider_flags, table,
|
||||
NULL);
|
||||
shift, width, clk_divider_flags, table);
|
||||
|
||||
if (!IS_ERR(clk)) {
|
||||
of_clk_add_provider(node, of_clk_src_simple_get, clk);
|
||||
|
|
|
@ -168,7 +168,7 @@ static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
|
|||
{
|
||||
struct fapll_data *fd = to_fapll(hw);
|
||||
u32 fapll_n, fapll_p, v;
|
||||
long long rate;
|
||||
u64 rate;
|
||||
|
||||
if (ti_fapll_clock_is_bypass(fd))
|
||||
return parent_rate;
|
||||
|
@ -314,7 +314,7 @@ static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
|
|||
{
|
||||
struct fapll_synth *synth = to_synth(hw);
|
||||
u32 synth_div_m;
|
||||
long long rate;
|
||||
u64 rate;
|
||||
|
||||
/* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
|
||||
if (!synth->div)
|
||||
|
|
|
@ -69,7 +69,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
|||
{
|
||||
struct clk_mux *mux = to_clk_mux(hw);
|
||||
u32 val;
|
||||
unsigned long flags = 0;
|
||||
|
||||
if (mux->table) {
|
||||
index = mux->table[index];
|
||||
|
@ -81,9 +80,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
|||
index++;
|
||||
}
|
||||
|
||||
if (mux->lock)
|
||||
spin_lock_irqsave(mux->lock, flags);
|
||||
|
||||
if (mux->flags & CLK_MUX_HIWORD_MASK) {
|
||||
val = mux->mask << (mux->shift + 16);
|
||||
} else {
|
||||
|
@ -93,9 +89,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
|||
val |= index << mux->shift;
|
||||
ti_clk_ll_ops->clk_writel(val, mux->reg);
|
||||
|
||||
if (mux->lock)
|
||||
spin_unlock_irqrestore(mux->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -109,7 +102,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
|
|||
const char **parent_names, u8 num_parents,
|
||||
unsigned long flags, void __iomem *reg,
|
||||
u8 shift, u32 mask, u8 clk_mux_flags,
|
||||
u32 *table, spinlock_t *lock)
|
||||
u32 *table)
|
||||
{
|
||||
struct clk_mux *mux;
|
||||
struct clk *clk;
|
||||
|
@ -133,7 +126,6 @@ static struct clk *_register_mux(struct device *dev, const char *name,
|
|||
mux->shift = shift;
|
||||
mux->mask = mask;
|
||||
mux->flags = clk_mux_flags;
|
||||
mux->lock = lock;
|
||||
mux->table = table;
|
||||
mux->hw.init = &init;
|
||||
|
||||
|
@ -175,7 +167,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup)
|
|||
|
||||
return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
|
||||
flags, (void __iomem *)reg, mux->bit_shift, mask,
|
||||
mux_flags, NULL, NULL);
|
||||
mux_flags, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -227,8 +219,7 @@ static void of_mux_clk_setup(struct device_node *node)
|
|||
mask = (1 << fls(mask)) - 1;
|
||||
|
||||
clk = _register_mux(NULL, node->name, parent_names, num_parents,
|
||||
flags, reg, shift, mask, clk_mux_flags, NULL,
|
||||
NULL);
|
||||
flags, reg, shift, mask, clk_mux_flags, NULL);
|
||||
|
||||
if (!IS_ERR(clk))
|
||||
of_clk_add_provider(node, of_clk_src_simple_get, clk);
|
||||
|
|
|
@ -55,7 +55,7 @@ int __init clocksource_mmio_init(void __iomem *base, const char *name,
|
|||
{
|
||||
struct clocksource_mmio *cs;
|
||||
|
||||
if (bits > 32 || bits < 16)
|
||||
if (bits > 64 || bits < 16)
|
||||
return -EINVAL;
|
||||
|
||||
cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL);
|
||||
|
|
|
@ -648,7 +648,7 @@ late_initcall(s3c_cpufreq_initcall);
|
|||
*
|
||||
* Register the given set of PLLs with the system.
|
||||
*/
|
||||
int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
|
||||
int s3c_plltab_register(struct cpufreq_frequency_table *plls,
|
||||
unsigned int plls_no)
|
||||
{
|
||||
struct cpufreq_frequency_table *vals;
|
||||
|
|
|
@ -122,12 +122,10 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
|
|||
}
|
||||
|
||||
ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
release_firmware(fw);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load);
|
||||
|
||||
|
@ -256,7 +254,6 @@ int fpga_mgr_register(struct device *dev, const char *name,
|
|||
void *priv)
|
||||
{
|
||||
struct fpga_manager *mgr;
|
||||
const char *dt_label;
|
||||
int id, ret;
|
||||
|
||||
if (!mops || !mops->write_init || !mops->write ||
|
||||
|
@ -300,11 +297,9 @@ int fpga_mgr_register(struct device *dev, const char *name,
|
|||
mgr->dev.id = id;
|
||||
dev_set_drvdata(dev, mgr);
|
||||
|
||||
dt_label = of_get_property(mgr->dev.of_node, "label", NULL);
|
||||
if (dt_label)
|
||||
ret = dev_set_name(&mgr->dev, "%s", dt_label);
|
||||
else
|
||||
ret = dev_set_name(&mgr->dev, "fpga%d", id);
|
||||
if (ret)
|
||||
goto error_device;
|
||||
|
||||
ret = device_add(&mgr->dev);
|
||||
if (ret)
|
||||
|
|
|
@ -477,6 +477,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
if (domain == AMDGPU_GEM_DOMAIN_CPU)
|
||||
goto error_unreserve;
|
||||
}
|
||||
list_for_each_entry(entry, &duplicates, head) {
|
||||
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
if (domain == AMDGPU_GEM_DOMAIN_CPU)
|
||||
goto error_unreserve;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
|
|
@ -159,7 +159,6 @@ struct nvkm_device_func {
|
|||
struct nvkm_device_quirk {
|
||||
u8 tv_pin_mask;
|
||||
u8 tv_gpio;
|
||||
bool War00C800_0;
|
||||
};
|
||||
|
||||
struct nvkm_device_chip {
|
||||
|
|
|
@ -258,12 +258,6 @@ nvkm_device_pci_10de_0df4[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_0fcd[] = {
|
||||
{ 0x17aa, 0x3801, NULL, { .War00C800_0 = true } }, /* Lenovo Y510P */
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_0fd2[] = {
|
||||
{ 0x1028, 0x0595, "GeForce GT 640M LE" },
|
||||
|
@ -278,12 +272,6 @@ nvkm_device_pci_10de_0fe3[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_0fe4[] = {
|
||||
{ 0x144d, 0xc740, NULL, { .War00C800_0 = true } },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_104b[] = {
|
||||
{ 0x1043, 0x844c, "GeForce GT 625" },
|
||||
|
@ -690,13 +678,6 @@ nvkm_device_pci_10de_1189[] = {
|
|||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_1199[] = {
|
||||
{ 0x1458, 0xd001, "GeForce GTX 760" },
|
||||
{ 0x1462, 0x1106, "GeForce GTX 780M", { .War00C800_0 = true } }, /* Medion Erazer X7827 */
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_11e0[] = {
|
||||
{ 0x1558, 0x5106, NULL, { .War00C800_0 = true } },
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -706,14 +687,6 @@ nvkm_device_pci_10de_11e3[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_11fc[] = {
|
||||
{ 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */
|
||||
{ 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
|
||||
{ 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_1247[] = {
|
||||
{ 0x1043, 0x212a, "GeForce GT 635M" },
|
||||
|
@ -1368,7 +1341,7 @@ nvkm_device_pci_10de[] = {
|
|||
{ 0x0fc6, "GeForce GTX 650" },
|
||||
{ 0x0fc8, "GeForce GT 740" },
|
||||
{ 0x0fc9, "GeForce GT 730" },
|
||||
{ 0x0fcd, "GeForce GT 755M", nvkm_device_pci_10de_0fcd },
|
||||
{ 0x0fcd, "GeForce GT 755M" },
|
||||
{ 0x0fce, "GeForce GT 640M LE" },
|
||||
{ 0x0fd1, "GeForce GT 650M" },
|
||||
{ 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
|
||||
|
@ -1382,7 +1355,7 @@ nvkm_device_pci_10de[] = {
|
|||
{ 0x0fe1, "GeForce GT 730M" },
|
||||
{ 0x0fe2, "GeForce GT 745M" },
|
||||
{ 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
|
||||
{ 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 },
|
||||
{ 0x0fe4, "GeForce GT 750M" },
|
||||
{ 0x0fe9, "GeForce GT 750M" },
|
||||
{ 0x0fea, "GeForce GT 755M" },
|
||||
{ 0x0fec, "GeForce 710A" },
|
||||
|
@ -1497,12 +1470,12 @@ nvkm_device_pci_10de[] = {
|
|||
{ 0x11c6, "GeForce GTX 650 Ti" },
|
||||
{ 0x11c8, "GeForce GTX 650" },
|
||||
{ 0x11cb, "GeForce GT 740" },
|
||||
{ 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 },
|
||||
{ 0x11e0, "GeForce GTX 770M" },
|
||||
{ 0x11e1, "GeForce GTX 765M" },
|
||||
{ 0x11e2, "GeForce GTX 765M" },
|
||||
{ 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
|
||||
{ 0x11fa, "Quadro K4000" },
|
||||
{ 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc },
|
||||
{ 0x11fc, "Quadro K2100M" },
|
||||
{ 0x1200, "GeForce GTX 560 Ti" },
|
||||
{ 0x1201, "GeForce GTX 560" },
|
||||
{ 0x1203, "GeForce GTX 460 SE v2" },
|
||||
|
|
|
@ -81,9 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
|
|||
nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
|
||||
nvkm_rd32(device, 0x000200);
|
||||
|
||||
if ( nvkm_boolopt(device->cfgopt, "War00C800_0",
|
||||
device->quirk ? device->quirk->War00C800_0 : false)) {
|
||||
nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
|
||||
if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) {
|
||||
switch (device->chipset) {
|
||||
case 0xe4:
|
||||
magic(device, 0x04000000);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue