Merge branch 'perf/urgent' into perf/core, before applying dependent patches

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-04-02 17:17:46 +02:00
commit c2b078e78a
37 changed files with 269 additions and 162 deletions

View File

@ -1362,6 +1362,7 @@ F: drivers/i2c/busses/i2c-rk3x.c
F: drivers/*/*rockchip* F: drivers/*/*rockchip*
F: drivers/*/*/*rockchip* F: drivers/*/*/*rockchip*
F: sound/soc/rockchip/ F: sound/soc/rockchip/
N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org> M: Kukjin Kim <kgene@kernel.org>

View File

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 0 PATCHLEVEL = 0
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc6
NAME = Hurr durr I'ma sheep NAME = Hurr durr I'ma sheep
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set) sigset_t *set)
{ {
int err; int err;
err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
sizeof(sf->uc.uc_mcontext.regs.scratch)); sizeof(sf->uc.uc_mcontext.regs.scratch));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
if (!err) if (!err)
set_current_blocked(&set); set_current_blocked(&set);
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch)); sizeof(sf->uc.uc_mcontext.regs.scratch));
return err; return err;
@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
/* Don't restart from sigreturn */ /* Don't restart from sigreturn */
syscall_wont_restart(regs); syscall_wont_restart(regs);
/*
* Ensure that sigreturn always returns to user mode (in case the
* regs saved on user stack got fudged between save and sigreturn)
* Otherwise it is easy to panic the kernel with a custom
* signal handler and/or restorer which clobberes the status32/ret
* to return to a bogus location in kernel mode.
*/
regs->status32 |= STATUS_U_MASK;
return regs->r0; return regs->r0;
badframe: badframe:
@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/* /*
* handler returns using sigreturn stub provided already by userpsace * handler returns using sigreturn stub provided already by userpsace
* If not, nuke the process right away
*/ */
BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
return 1;
regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
/* User Stack for signal handler will be above the frame just carved */ /* User Stack for signal handler will be above the frame just carved */
@ -296,12 +308,12 @@ static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs) handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{ {
sigset_t *oldset = sigmask_to_save(); sigset_t *oldset = sigmask_to_save();
int ret; int failed;
/* Set up the stack frame */ /* Set up the stack frame */
ret = setup_rt_frame(ksig, oldset, regs); failed = setup_rt_frame(ksig, oldset, regs);
signal_setup_done(ret, ksig, 0); signal_setup_done(failed, ksig, 0);
} }
void do_signal(struct pt_regs *regs) void do_signal(struct pt_regs *regs)

View File

@ -619,6 +619,7 @@ config ARCH_PXA
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GPIO_PXA select GPIO_PXA
select HAVE_IDE select HAVE_IDE
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER select MULTI_IRQ_HANDLER
select PLAT_PXA select PLAT_PXA
select SPARSE_IRQ select SPARSE_IRQ

View File

@ -36,6 +36,20 @@
>; >;
}; };
mmc_pins: pinmux_mmc_pins {
pinctrl-single,pins = <
DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */
DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */
DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */
DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */
DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */
DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */
DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */
DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */
DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */
>;
};
usb0_pins: pinmux_usb0_pins { usb0_pins: pinmux_usb0_pins {
pinctrl-single,pins = < pinctrl-single,pins = <
DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
@ -137,7 +151,12 @@
}; };
&mmc1 { &mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc_pins>;
vmmc-supply = <&vmmcsd_fixed>; vmmc-supply = <&vmmcsd_fixed>;
bus-width = <4>;
cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
}; };
/* At least dm8168-evm rev c won't support multipoint, later may */ /* At least dm8168-evm rev c won't support multipoint, later may */

View File

@ -150,17 +150,27 @@
}; };
gpio1: gpio@48032000 { gpio1: gpio@48032000 {
compatible = "ti,omap3-gpio"; compatible = "ti,omap4-gpio";
ti,hwmods = "gpio1"; ti,hwmods = "gpio1";
ti,gpio-always-on;
reg = <0x48032000 0x1000>; reg = <0x48032000 0x1000>;
interrupts = <97>; interrupts = <96>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
}; };
gpio2: gpio@4804c000 { gpio2: gpio@4804c000 {
compatible = "ti,omap3-gpio"; compatible = "ti,omap4-gpio";
ti,hwmods = "gpio2"; ti,hwmods = "gpio2";
ti,gpio-always-on;
reg = <0x4804c000 0x1000>; reg = <0x4804c000 0x1000>;
interrupts = <99>; interrupts = <98>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
}; };
gpmc: gpmc@50000000 { gpmc: gpmc@50000000 {

View File

@ -1111,7 +1111,6 @@
"wkupclk", "refclk", "wkupclk", "refclk",
"div-clk", "phy-div"; "div-clk", "phy-div";
#phy-cells = <0>; #phy-cells = <0>;
ti,hwmods = "pcie1-phy";
}; };
pcie2_phy: pciephy@4a095000 { pcie2_phy: pciephy@4a095000 {
@ -1130,7 +1129,6 @@
"wkupclk", "refclk", "wkupclk", "refclk",
"div-clk", "phy-div"; "div-clk", "phy-div";
#phy-cells = <0>; #phy-cells = <0>;
ti,hwmods = "pcie2-phy";
status = "disabled"; status = "disabled";
}; };
}; };

View File

@ -92,6 +92,8 @@
ti,hwmods = "aes"; ti,hwmods = "aes";
reg = <0x480c5000 0x50>; reg = <0x480c5000 0x50>;
interrupts = <0>; interrupts = <0>;
dmas = <&sdma 65 &sdma 66>;
dma-names = "tx", "rx";
}; };
prm: prm@48306000 { prm: prm@48306000 {
@ -550,6 +552,8 @@
ti,hwmods = "sham"; ti,hwmods = "sham";
reg = <0x480c3000 0x64>; reg = <0x480c3000 0x64>;
interrupts = <49>; interrupts = <49>;
dmas = <&sdma 69>;
dma-names = "rx";
}; };
smartreflex_core: smartreflex@480cb000 { smartreflex_core: smartreflex@480cb000 {

View File

@ -411,6 +411,7 @@
"mac_clk_rx", "mac_clk_tx", "mac_clk_rx", "mac_clk_tx",
"clk_mac_ref", "clk_mac_refout", "clk_mac_ref", "clk_mac_refout",
"aclk_mac", "pclk_mac"; "aclk_mac", "pclk_mac";
status = "disabled";
}; };
usb_host0_ehci: usb@ff500000 { usb_host0_ehci: usb@ff500000 {

View File

@ -660,7 +660,7 @@
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
reg = <0xfff01000 0x1000>; reg = <0xfff01000 0x1000>;
interrupts = <0 156 4>; interrupts = <0 155 4>;
num-cs = <4>; num-cs = <4>;
clocks = <&spi_m_clk>; clocks = <&spi_m_clk>;
status = "disabled"; status = "disabled";

View File

@ -56,6 +56,22 @@
model = "Olimex A10-OLinuXino-LIME"; model = "Olimex A10-OLinuXino-LIME";
compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
cpus {
cpu0: cpu@0 {
/*
* The A10-Lime is known to be unstable
* when running at 1008 MHz
*/
operating-points = <
/* kHz uV */
912000 1350000
864000 1300000
624000 1250000
>;
cooling-max-level = <2>;
};
};
soc@01c00000 { soc@01c00000 {
emac: ethernet@01c0b000 { emac: ethernet@01c0b000 {
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -75,7 +75,6 @@
clock-latency = <244144>; /* 8 32k periods */ clock-latency = <244144>; /* 8 32k periods */
operating-points = < operating-points = <
/* kHz uV */ /* kHz uV */
1056000 1500000
1008000 1400000 1008000 1400000
912000 1350000 912000 1350000
864000 1300000 864000 1300000
@ -83,7 +82,7 @@
>; >;
#cooling-cells = <2>; #cooling-cells = <2>;
cooling-min-level = <0>; cooling-min-level = <0>;
cooling-max-level = <4>; cooling-max-level = <3>;
}; };
}; };

View File

@ -47,7 +47,6 @@
clock-latency = <244144>; /* 8 32k periods */ clock-latency = <244144>; /* 8 32k periods */
operating-points = < operating-points = <
/* kHz uV */ /* kHz uV */
1104000 1500000
1008000 1400000 1008000 1400000
912000 1350000 912000 1350000
864000 1300000 864000 1300000
@ -57,7 +56,7 @@
>; >;
#cooling-cells = <2>; #cooling-cells = <2>;
cooling-min-level = <0>; cooling-min-level = <0>;
cooling-max-level = <6>; cooling-max-level = <5>;
}; };
}; };

View File

@ -105,7 +105,6 @@
clock-latency = <244144>; /* 8 32k periods */ clock-latency = <244144>; /* 8 32k periods */
operating-points = < operating-points = <
/* kHz uV */ /* kHz uV */
1008000 1450000
960000 1400000 960000 1400000
912000 1400000 912000 1400000
864000 1300000 864000 1300000
@ -116,7 +115,7 @@
>; >;
#cooling-cells = <2>; #cooling-cells = <2>;
cooling-min-level = <0>; cooling-min-level = <0>;
cooling-max-level = <7>; cooling-max-level = <6>;
}; };
cpu@1 { cpu@1 {

View File

@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
return kasprintf(GFP_KERNEL, "OMAP4"); return kasprintf(GFP_KERNEL, "OMAP4");
else if (soc_is_omap54xx()) else if (soc_is_omap54xx())
return kasprintf(GFP_KERNEL, "OMAP5"); return kasprintf(GFP_KERNEL, "OMAP5");
else if (soc_is_am33xx() || soc_is_am335x())
return kasprintf(GFP_KERNEL, "AM33xx");
else if (soc_is_am43xx()) else if (soc_is_am43xx())
return kasprintf(GFP_KERNEL, "AM43xx"); return kasprintf(GFP_KERNEL, "AM43xx");
else if (soc_is_dra7xx()) else if (soc_is_dra7xx())

View File

@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/bitops.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
@ -40,7 +41,6 @@
#define ICHP_VAL_IRQ (1 << 31) #define ICHP_VAL_IRQ (1 << 31)
#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
#define IPR_VALID (1 << 31) #define IPR_VALID (1 << 31)
#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
#define MAX_INTERNAL_IRQS 128 #define MAX_INTERNAL_IRQS 128
@ -51,6 +51,7 @@
static void __iomem *pxa_irq_base; static void __iomem *pxa_irq_base;
static int pxa_internal_irq_nr; static int pxa_internal_irq_nr;
static bool cpu_has_ipr; static bool cpu_has_ipr;
static struct irq_domain *pxa_irq_domain;
static inline void __iomem *irq_base(int i) static inline void __iomem *irq_base(int i)
{ {
@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
void pxa_mask_irq(struct irq_data *d) void pxa_mask_irq(struct irq_data *d)
{ {
void __iomem *base = irq_data_get_irq_chip_data(d); void __iomem *base = irq_data_get_irq_chip_data(d);
irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR); uint32_t icmr = __raw_readl(base + ICMR);
icmr &= ~(1 << IRQ_BIT(d->irq)); icmr &= ~BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR); __raw_writel(icmr, base + ICMR);
} }
void pxa_unmask_irq(struct irq_data *d) void pxa_unmask_irq(struct irq_data *d)
{ {
void __iomem *base = irq_data_get_irq_chip_data(d); void __iomem *base = irq_data_get_irq_chip_data(d);
irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR); uint32_t icmr = __raw_readl(base + ICMR);
icmr |= 1 << IRQ_BIT(d->irq); icmr |= BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR); __raw_writel(icmr, base + ICMR);
} }
@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
} while (1); } while (1);
} }
void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{ {
int irq, i, n; void __iomem *base = irq_base(hw / 32);
BUG_ON(irq_nr > MAX_INTERNAL_IRQS); /* initialize interrupt priority */
if (cpu_has_ipr)
__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(virq, base);
set_irq_flags(virq, IRQF_VALID);
return 0;
}
static struct irq_domain_ops pxa_irq_ops = {
.map = pxa_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static __init void
pxa_init_irq_common(struct device_node *node, int irq_nr,
int (*fn)(struct irq_data *, unsigned int))
{
int n;
pxa_internal_irq_nr = irq_nr; pxa_internal_irq_nr = irq_nr;
cpu_has_ipr = !cpu_is_pxa25x(); pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
pxa_irq_base = io_p2v(0x40d00000); PXA_IRQ(0), 0,
&pxa_irq_ops, NULL);
if (!pxa_irq_domain)
panic("Unable to add PXA IRQ domain\n");
irq_set_default_host(pxa_irq_domain);
for (n = 0; n < irq_nr; n += 32) { for (n = 0; n < irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5); void __iomem *base = irq_base(n >> 5);
__raw_writel(0, base + ICMR); /* disable all IRQs */ __raw_writel(0, base + ICMR); /* disable all IRQs */
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
/* initialize interrupt priority */
if (cpu_has_ipr)
__raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
irq = PXA_IRQ(i);
irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, base);
set_irq_flags(irq, IRQF_VALID);
}
} }
/* only unmasked interrupts kick us out of idle */ /* only unmasked interrupts kick us out of idle */
__raw_writel(1, irq_base(0) + ICCR); __raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn; pxa_internal_irq_chip.irq_set_wake = fn;
} }
void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
{
BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
pxa_irq_base = io_p2v(0x40d00000);
cpu_has_ipr = !cpu_is_pxa25x();
pxa_init_irq_common(NULL, irq_nr, fn);
}
#ifdef CONFIG_PM #ifdef CONFIG_PM
static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
}; };
#ifdef CONFIG_OF #ifdef CONFIG_OF
static struct irq_domain *pxa_irq_domain;
static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
void __iomem *base = irq_base(hw / 32);
/* initialize interrupt priority */
if (cpu_has_ipr)
__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(hw, base);
set_irq_flags(hw, IRQF_VALID);
return 0;
}
static struct irq_domain_ops pxa_irq_ops = {
.map = pxa_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static const struct of_device_id intc_ids[] __initconst = { static const struct of_device_id intc_ids[] __initconst = {
{ .compatible = "marvell,pxa-intc", }, { .compatible = "marvell,pxa-intc", },
{} {}
@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
{ {
struct device_node *node; struct device_node *node;
struct resource res; struct resource res;
int n, ret; int ret;
node = of_find_matching_node(NULL, intc_ids); node = of_find_matching_node(NULL, intc_ids);
if (!node) { if (!node) {
@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
return; return;
} }
pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
&pxa_irq_ops, NULL);
if (!pxa_irq_domain)
panic("Unable to add PXA IRQ domain\n");
irq_set_default_host(pxa_irq_domain);
for (n = 0; n < pxa_internal_irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5);
__raw_writel(0, base + ICMR); /* disable all IRQs */
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
}
/* only unmasked interrupts kick us out of idle */
__raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn;
} }
#endif /* CONFIG_OF */ #endif /* CONFIG_OF */

View File

@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
}; };
static struct platform_device can_regulator_device = { static struct platform_device can_regulator_device = {
.name = "reg-fixed-volage", .name = "reg-fixed-voltage",
.id = 0, .id = 0,
.dev = { .dev = {
.platform_data = &can_regulator_pdata, .platform_data = &can_regulator_pdata,

View File

@ -1,10 +1,12 @@
menuconfig ARCH_SUNXI menuconfig ARCH_SUNXI
bool "Allwinner SoCs" if ARCH_MULTI_V7 bool "Allwinner SoCs" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_RESET_CONTROLLER
select CLKSRC_MMIO select CLKSRC_MMIO
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select PINCTRL select PINCTRL
select SUN4I_TIMER select SUN4I_TIMER
select RESET_CONTROLLER
if ARCH_SUNXI if ARCH_SUNXI
@ -20,10 +22,8 @@ config MACH_SUN5I
config MACH_SUN6I config MACH_SUN6I
bool "Allwinner A31 (sun6i) SoCs support" bool "Allwinner A31 (sun6i) SoCs support"
default ARCH_SUNXI default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC select ARM_GIC
select MFD_SUN6I_PRCM select MFD_SUN6I_PRCM
select RESET_CONTROLLER
select SUN5I_HSTIMER select SUN5I_HSTIMER
config MACH_SUN7I config MACH_SUN7I
@ -37,16 +37,12 @@ config MACH_SUN7I
config MACH_SUN8I config MACH_SUN8I
bool "Allwinner A23 (sun8i) SoCs support" bool "Allwinner A23 (sun8i) SoCs support"
default ARCH_SUNXI default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC select ARM_GIC
select MFD_SUN6I_PRCM select MFD_SUN6I_PRCM
select RESET_CONTROLLER
config MACH_SUN9I config MACH_SUN9I
bool "Allwinner (sun9i) SoCs support" bool "Allwinner (sun9i) SoCs support"
default ARCH_SUNXI default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC select ARM_GIC
select RESET_CONTROLLER
endif endif

View File

@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
const struct of_device_id *match; const struct of_device_id *match;
const struct dmtimer_platform_data *pdata; const struct dmtimer_platform_data *pdata;
int ret;
match = of_match_device(of_match_ptr(omap_timer_match), dev); match = of_match_device(of_match_ptr(omap_timer_match), dev);
pdata = match ? match->data : dev->platform_data; pdata = match ? match->data : dev->platform_data;
@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
} }
if (!timer->reserved) { if (!timer->reserved) {
pm_runtime_get_sync(dev); ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
__func__);
goto err_get_sync;
}
__omap_dm_timer_init_regs(timer); __omap_dm_timer_init_regs(timer);
pm_runtime_put(dev); pm_runtime_put(dev);
} }
@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
dev_dbg(dev, "Device Probed.\n"); dev_dbg(dev, "Device Probed.\n");
return 0; return 0;
err_get_sync:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
} }
/** /**
@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
} }
spin_unlock_irqrestore(&dm_timer_lock, flags); spin_unlock_irqrestore(&dm_timer_lock, flags);
pm_runtime_disable(&pdev->dev);
return ret; return ret;
} }

View File

@ -8,7 +8,7 @@
*/ */
/* SoC fixed clocks */ /* SoC fixed clocks */
soc_uartclk: refclk72738khz { soc_uartclk: refclk7273800hz {
compatible = "fixed-clock"; compatible = "fixed-clock";
#clock-cells = <0>; #clock-cells = <0>;
clock-frequency = <7273800>; clock-frequency = <7273800>;

View File

@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) { if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
#ifdef CONFIG_64BIT #if PT_NLEVELS == 3
actual_pgd += PTRS_PER_PGD; actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it /* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this * with PxD_FLAG_ATTACHED as a signal to the system that this
@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{ {
#ifdef CONFIG_64BIT #if PT_NLEVELS == 3
pgd -= PTRS_PER_PGD; pgd -= PTRS_PER_PGD;
#endif #endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{ {
#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd; /*
* cannot free it */ * This is the permanent pmd attached to the pgd;
* cannot free it.
* Increment the counter to compensate for the decrement
* done by generic mm code.
*/
mm_inc_nr_pmds(mm);
return; return;
#endif
free_pages((unsigned long)pmd, PMD_ORDER); free_pages((unsigned long)pmd, PMD_ORDER);
} }
@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{ {
#ifdef CONFIG_64BIT #if PT_NLEVELS == 3
/* preserve the gateway marker if this is the beginning of /* preserve the gateway marker if this is the beginning of
* the permanent pmd */ * the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)

View File

@ -55,8 +55,8 @@
#define ENTRY_COMP(_name_) .word sys_##_name_ #define ENTRY_COMP(_name_) .word sys_##_name_
#endif #endif
ENTRY_SAME(restart_syscall) /* 0 */ 90: ENTRY_SAME(restart_syscall) /* 0 */
ENTRY_SAME(exit) 91: ENTRY_SAME(exit)
ENTRY_SAME(fork_wrapper) ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read) ENTRY_SAME(read)
ENTRY_SAME(write) ENTRY_SAME(write)
@ -439,7 +439,10 @@
ENTRY_SAME(bpf) ENTRY_SAME(bpf)
ENTRY_COMP(execveat) ENTRY_COMP(execveat)
/* Nothing yet */
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
.error "size of syscall table does not fit value of __NR_Linux_syscalls"
.endif
#undef ENTRY_SAME #undef ENTRY_SAME
#undef ENTRY_DIFF #undef ENTRY_DIFF

View File

@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
spin_lock(&vcpu->arch.vpa_update_lock); spin_lock(&vcpu->arch.vpa_update_lock);
lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
if (lppaca) if (lppaca)
yield_count = lppaca->yield_count; yield_count = be32_to_cpu(lppaca->yield_count);
spin_unlock(&vcpu->arch.vpa_update_lock); spin_unlock(&vcpu->arch.vpa_update_lock);
return yield_count; return yield_count;
} }
@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
bool preserve_top32) bool preserve_top32)
{ {
struct kvm *kvm = vcpu->kvm;
struct kvmppc_vcore *vc = vcpu->arch.vcore; struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask; u64 mask;
mutex_lock(&kvm->lock);
spin_lock(&vc->lock); spin_lock(&vc->lock);
/* /*
* If ILE (interrupt little-endian) has changed, update the * If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore. * MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/ */
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int i; int i;
mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.vcore != vc) if (vcpu->arch.vcore != vc)
continue; continue;
@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
else else
vcpu->arch.intr_msr &= ~MSR_LE; vcpu->arch.intr_msr &= ~MSR_LE;
} }
mutex_unlock(&kvm->lock);
} }
/* /*
@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF; mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock); spin_unlock(&vc->lock);
mutex_unlock(&kvm->lock);
} }
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,

View File

@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Save HEIR (HV emulation assist reg) in emul_inst /* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */ if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED li r3,KVM_INST_FETCH_FAILED
stw r3,VCPU_LAST_INST(r9)
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f bne 11f
mfspr r3,SPRN_HEIR mfspr r3,SPRN_HEIR

View File

@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x08a3, 0x4), INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4), INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_EVENT_CONSTRAINT(0x04a3, 0xf), INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
}; };
@ -1852,11 +1852,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if (c) if (c)
return c; return c;
c = intel_pebs_constraints(event); c = intel_shared_regs_constraints(cpuc, event);
if (c) if (c)
return c; return c;
c = intel_shared_regs_constraints(cpuc, event); c = intel_pebs_constraints(event);
if (c) if (c)
return c; return c;

View File

@ -364,12 +364,21 @@ system_call_fastpath:
* Has incomplete stack frame and undefined top of stack. * Has incomplete stack frame and undefined top of stack.
*/ */
ret_from_sys_call: ret_from_sys_call:
testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz int_ret_from_sys_call_fixup /* Go the the slow path */
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
/*
* We must check ti flags with interrupts (or at least preemption)
* off because we must *never* return to userspace without
* processing exit work that is enqueued if we're preempted here.
* In particular, returning to userspace with any of the one-shot
* flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
* very bad.
*/
testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz int_ret_from_sys_call_fixup /* Go the the slow path */
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
/* /*
* sysretq will re-enable interrupts: * sysretq will re-enable interrupts:
@ -386,7 +395,7 @@ ret_from_sys_call:
int_ret_from_sys_call_fixup: int_ret_from_sys_call_fixup:
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
jmp int_ret_from_sys_call jmp int_ret_from_sys_call_irqs_off
/* Do syscall tracing */ /* Do syscall tracing */
tracesys: tracesys:
@ -432,6 +441,7 @@ tracesys_phase2:
GLOBAL(int_ret_from_sys_call) GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
int_ret_from_sys_call_irqs_off:
movl $_TIF_ALLWORK_MASK,%edi movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */ /* edi: mask to check */
GLOBAL(int_with_check) GLOBAL(int_with_check)

View File

@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
config SH_TIMER_CMT config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_CMT default SYS_SUPPORTS_SH_CMT
help help
This enables build of a clocksource and clockevent driver for This enables build of a clocksource and clockevent driver for
@ -201,6 +202,7 @@ config SH_TIMER_CMT
config SH_TIMER_MTU2 config SH_TIMER_MTU2
bool "Renesas MTU2 timer driver" if COMPILE_TEST bool "Renesas MTU2 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_MTU2 default SYS_SUPPORTS_SH_MTU2
help help
This enables build of a clockevent driver for the Multi-Function This enables build of a clockevent driver for the Multi-Function
@ -210,6 +212,7 @@ config SH_TIMER_MTU2
config SH_TIMER_TMU config SH_TIMER_TMU
bool "Renesas TMU timer driver" if COMPILE_TEST bool "Renesas TMU timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_TMU default SYS_SUPPORTS_SH_TMU
help help
This enables build of a clocksource and clockevent driver for This enables build of a clocksource and clockevent driver for

View File

@ -17,7 +17,6 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqreturn.h> #include <linux/irqreturn.h>
#include <linux/reset.h> #include <linux/reset.h>
#include <linux/sched_clock.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
.dev_id = &sun5i_clockevent, .dev_id = &sun5i_clockevent,
}; };
static u64 sun5i_timer_sched_read(void)
{
return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
}
static void __init sun5i_timer_init(struct device_node *node) static void __init sun5i_timer_init(struct device_node *node)
{ {
struct reset_control *rstc; struct reset_control *rstc;
@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node)
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
timer_base + TIMER_CTL_REG(1)); timer_base + TIMER_CTL_REG(1));
sched_clock_register(sun5i_timer_sched_read, 32, rate);
clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
rate, 340, 32, clocksource_mmio_readl_down); rate, 340, 32, clocksource_mmio_readl_down);

View File

@ -42,10 +42,10 @@
#define PDC_WDT_MIN_TIMEOUT 1 #define PDC_WDT_MIN_TIMEOUT 1
#define PDC_WDT_DEF_TIMEOUT 64 #define PDC_WDT_DEF_TIMEOUT 64
static int heartbeat; static int heartbeat = PDC_WDT_DEF_TIMEOUT;
module_param(heartbeat, int, 0); module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
"(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT; static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0); module_param(nowayout, bool, 0);
@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev)
pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
pdc_wdt->wdt_dev.parent = &pdev->dev; pdc_wdt->wdt_dev.parent = &pdev->dev;
watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
if (ret < 0) { if (ret < 0) {
@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
platform_set_drvdata(pdev, pdc_wdt); platform_set_drvdata(pdev, pdc_wdt);
watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
ret = watchdog_register_device(&pdc_wdt->wdt_dev); ret = watchdog_register_device(&pdc_wdt->wdt_dev);
if (ret) if (ret)

View File

@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
u32 reg; u32 reg;
struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
void __iomem *wdt_base = mtk_wdt->wdt_base; void __iomem *wdt_base = mtk_wdt->wdt_base;
u32 ret; int ret;
ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
if (ret < 0) if (ret < 0)

View File

@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
if (!new_class->name) if (!new_class->name)
return 0; return 0;
list_for_each_entry(class, &all_lock_classes, lock_entry) { list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
if (new_class->key - new_class->subclass == class->key) if (new_class->key - new_class->subclass == class->key)
return class->name_version; return class->name_version;
if (class->name && !strcmp(class->name, new_class->name)) if (class->name && !strcmp(class->name, new_class->name))
@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
hash_head = classhashentry(key); hash_head = classhashentry(key);
/* /*
* We can walk the hash lockfree, because the hash only * We do an RCU walk of the hash, see lockdep_free_key_range().
* grows, and we are careful when adding entries to the end:
*/ */
list_for_each_entry(class, hash_head, hash_entry) { if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return NULL;
list_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key) { if (class->key == key) {
/* /*
* Huh! same key, different name? Did someone trample * Huh! same key, different name? Did someone trample
@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
struct lockdep_subclass_key *key; struct lockdep_subclass_key *key;
struct list_head *hash_head; struct list_head *hash_head;
struct lock_class *class; struct lock_class *class;
unsigned long flags;
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass); class = look_up_lock_class(lock, subclass);
if (likely(class)) if (likely(class))
@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
key = lock->key->subkeys + subclass; key = lock->key->subkeys + subclass;
hash_head = classhashentry(key); hash_head = classhashentry(key);
raw_local_irq_save(flags);
if (!graph_lock()) { if (!graph_lock()) {
raw_local_irq_restore(flags);
return NULL; return NULL;
} }
/* /*
* We have to do the hash-walk again, to avoid races * We have to do the hash-walk again, to avoid races
* with another CPU: * with another CPU:
*/ */
list_for_each_entry(class, hash_head, hash_entry) list_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key) if (class->key == key)
goto out_unlock_set; goto out_unlock_set;
}
/* /*
* Allocate a new key from the static array, and add it to * Allocate a new key from the static array, and add it to
* the hash: * the hash:
*/ */
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
if (!debug_locks_off_graph_unlock()) { if (!debug_locks_off_graph_unlock()) {
raw_local_irq_restore(flags);
return NULL; return NULL;
} }
raw_local_irq_restore(flags);
print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
dump_stack(); dump_stack();
@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (verbose(class)) { if (verbose(class)) {
graph_unlock(); graph_unlock();
raw_local_irq_restore(flags);
printk("\nnew class %p: %s", class->key, class->name); printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1) if (class->name_version > 1)
@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
printk("\n"); printk("\n");
dump_stack(); dump_stack();
raw_local_irq_save(flags);
if (!graph_lock()) { if (!graph_lock()) {
raw_local_irq_restore(flags);
return NULL; return NULL;
} }
} }
out_unlock_set: out_unlock_set:
graph_unlock(); graph_unlock();
raw_local_irq_restore(flags);
out_set_class_cache: out_set_class_cache:
if (!subclass || force) if (!subclass || force)
@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
entry->distance = distance; entry->distance = distance;
entry->trace = *trace; entry->trace = *trace;
/* /*
* Since we never remove from the dependency list, the list can * Both allocation and removal are done under the graph lock; but
* be walked lockless by other CPUs, it's only allocation * iteration is under RCU-sched; see look_up_lock_class() and
* that must be protected by the spinlock. But this also means * lockdep_free_key_range().
* we must make new entries visible only once writes to the
* entry become visible - hence the RCU op:
*/ */
list_add_tail_rcu(&entry->entry, head); list_add_tail_rcu(&entry->entry, head);
@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
else else
head = &lock->class->locks_before; head = &lock->class->locks_before;
list_for_each_entry(entry, head, entry) { DEBUG_LOCKS_WARN_ON(!irqs_disabled());
list_for_each_entry_rcu(entry, head, entry) {
if (!lock_accessed(entry)) { if (!lock_accessed(entry)) {
unsigned int cq_depth; unsigned int cq_depth;
mark_lock_accessed(entry, lock); mark_lock_accessed(entry, lock);
@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
* We can walk it lock-free, because entries only get added * We can walk it lock-free, because entries only get added
* to the hash: * to the hash:
*/ */
list_for_each_entry(chain, hash_head, entry) { list_for_each_entry_rcu(chain, hash_head, entry) {
if (chain->chain_key == chain_key) { if (chain->chain_key == chain_key) {
cache_hit: cache_hit:
debug_atomic_inc(chain_lookup_hits); debug_atomic_inc(chain_lookup_hits);
@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
return; return;
if (subclass) if (subclass) {
unsigned long flags;
if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
current->lockdep_recursion = 1;
register_lock_class(lock, subclass, 1); register_lock_class(lock, subclass, 1);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
} }
EXPORT_SYMBOL_GPL(lockdep_init_map); EXPORT_SYMBOL_GPL(lockdep_init_map);
@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
return addr >= start && addr < start + size; return addr >= start && addr < start + size;
} }
/*
* Used in module.c to remove lock classes from memory that is going to be
* freed; and possibly re-used by other modules.
*
* We will have had one sync_sched() before getting here, so we're guaranteed
* nobody will look up these exact classes -- they're properly dead but still
* allocated.
*/
void lockdep_free_key_range(void *start, unsigned long size) void lockdep_free_key_range(void *start, unsigned long size)
{ {
struct lock_class *class, *next; struct lock_class *class;
struct list_head *head; struct list_head *head;
unsigned long flags; unsigned long flags;
int i; int i;
@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
head = classhash_table + i; head = classhash_table + i;
if (list_empty(head)) if (list_empty(head))
continue; continue;
list_for_each_entry_safe(class, next, head, hash_entry) { list_for_each_entry_rcu(class, head, hash_entry) {
if (within(class->key, start, size)) if (within(class->key, start, size))
zap_class(class); zap_class(class);
else if (within(class->name, start, size)) else if (within(class->name, start, size))
@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
if (locked) if (locked)
graph_unlock(); graph_unlock();
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
/*
* Wait for any possible iterators from look_up_lock_class() to pass
* before continuing to free the memory they refer to.
*
* sync_sched() is sufficient because the read-side is IRQ disable.
*/
synchronize_sched();
/*
* XXX at this point we could return the resources to the pool;
* instead we leak them. We would need to change to bitmap allocators
* instead of the linear allocators we have now.
*/
} }
void lockdep_reset_lock(struct lockdep_map *lock) void lockdep_reset_lock(struct lockdep_map *lock)
{ {
struct lock_class *class, *next; struct lock_class *class;
struct list_head *head; struct list_head *head;
unsigned long flags; unsigned long flags;
int i, j; int i, j;
@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
head = classhash_table + i; head = classhash_table + i;
if (list_empty(head)) if (list_empty(head))
continue; continue;
list_for_each_entry_safe(class, next, head, hash_entry) { list_for_each_entry_rcu(class, head, hash_entry) {
int match = 0; int match = 0;
for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)

View File

@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
kfree(mod->args); kfree(mod->args);
percpu_modfree(mod); percpu_modfree(mod);
/* Free lock-classes: */ /* Free lock-classes; relies on the preceding sync_rcu(). */
lockdep_free_key_range(mod->module_core, mod->core_size); lockdep_free_key_range(mod->module_core, mod->core_size);
/* Finally, free the core (containing the module structure) */ /* Finally, free the core (containing the module structure) */
@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
module_bug_cleanup(mod); module_bug_cleanup(mod);
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
/* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size);
/* we can't deallocate the module until we clear memory protection */ /* we can't deallocate the module until we clear memory protection */
unset_module_init_ro_nx(mod); unset_module_init_ro_nx(mod);
unset_module_core_ro_nx(mod); unset_module_core_ro_nx(mod);
@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
synchronize_rcu(); synchronize_rcu();
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
free_module: free_module:
/* Free lock-classes; relies on the preceding sync_rcu() */
lockdep_free_key_range(mod->module_core, mod->core_size);
module_deallocate(mod, info); module_deallocate(mod, info);
free_copy: free_copy:
free_copy(info); free_copy(info);

View File

@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
} else { } else {
if (dl_prio(oldprio)) if (dl_prio(oldprio))
p->dl.dl_boosted = 0; p->dl.dl_boosted = 0;
if (rt_prio(oldprio))
p->rt.timeout = 0;
p->sched_class = &fair_sched_class; p->sched_class = &fair_sched_class;
} }

View File

@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
*/ */
static int bc_set_next(ktime_t expires, struct clock_event_device *bc) static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
{ {
int bc_moved;
/* /*
* We try to cancel the timer first. If the callback is on * We try to cancel the timer first. If the callback is on
* flight on some other cpu then we let it handle it. If we * flight on some other cpu then we let it handle it. If we
@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
* restart the timer because we are in the callback, but we * restart the timer because we are in the callback, but we
* can set the expiry time and let the callback return * can set the expiry time and let the callback return
* HRTIMER_RESTART. * HRTIMER_RESTART.
*
* Since we are in the idle loop at this point and because
* hrtimer_{start/cancel} functions call into tracing,
* calls to these functions must be bound within RCU_NONIDLE.
*/ */
if (hrtimer_try_to_cancel(&bctimer) >= 0) { RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
0);
if (bc_moved) {
/* Bind the "device" to the cpu */ /* Bind the "device" to the cpu */
bc->bound_on = smp_processor_id(); bc->bound_on = smp_processor_id();
} else if (bc->bound_on == smp_processor_id()) { } else if (bc->bound_on == smp_processor_id()) {

View File

@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
goto out; goto out;
/* No partial writes. */ /* No partial writes. */
length = EINVAL; length = -EINVAL;
if (*ppos != 0) if (*ppos != 0)
goto out; goto out;

View File

@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Sunrise Point */ /* Sunrise Point */
{ PCI_DEVICE(0x8086, 0xa170), { PCI_DEVICE(0x8086, 0xa170),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
/* Sunrise Point-LP */ /* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70), { PCI_DEVICE(0x8086, 0x9d70),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },

View File

@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
{ {
/* We currently only handle front, HP */ /* We currently only handle front, HP */
static hda_nid_t pins[] = { static hda_nid_t pins[] = {
0x0f, 0x10, 0x14, 0x15, 0 0x0f, 0x10, 0x14, 0x15, 0x17, 0
}; };
hda_nid_t *p; hda_nid_t *p;
for (p = pins; *p; p++) for (p = pins; *p; p++)
@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),