Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
Resolved conflicts: fs/xfs/xfs_trans_priv.h: - deleted struct xfs_ail field xa_flags - kept field xa_log_flush in struct xfs_ail fs/xfs/xfs_trans_ail.c: - in xfsaild_push(), in XFS_ITEM_PUSHBUF case, replaced "flush_log = 1" with "ailp->xa_log_flush++" Signed-off-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
commit
9508534c5f
|
@ -2706,10 +2706,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||||
functions are at fixed addresses, they make nice
|
functions are at fixed addresses, they make nice
|
||||||
targets for exploits that can control RIP.
|
targets for exploits that can control RIP.
|
||||||
|
|
||||||
emulate [default] Vsyscalls turn into traps and are
|
emulate Vsyscalls turn into traps and are emulated
|
||||||
emulated reasonably safely.
|
reasonably safely.
|
||||||
|
|
||||||
native Vsyscalls are native syscall instructions.
|
native [default] Vsyscalls are native syscall
|
||||||
|
instructions.
|
||||||
This is a little bit faster than trapping
|
This is a little bit faster than trapping
|
||||||
and makes a few dynamic recompilers work
|
and makes a few dynamic recompilers work
|
||||||
better than they would in emulation mode.
|
better than they would in emulation mode.
|
||||||
|
|
|
@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
|
||||||
of logical flows. Packets for each flow are steered to a separate receive
|
of logical flows. Packets for each flow are steered to a separate receive
|
||||||
queue, which in turn can be processed by separate CPUs. This mechanism is
|
queue, which in turn can be processed by separate CPUs. This mechanism is
|
||||||
generally known as “Receive-side Scaling” (RSS). The goal of RSS and
|
generally known as “Receive-side Scaling” (RSS). The goal of RSS and
|
||||||
the other scaling techniques to increase performance uniformly.
|
the other scaling techniques is to increase performance uniformly.
|
||||||
Multi-queue distribution can also be used for traffic prioritization, but
|
Multi-queue distribution can also be used for traffic prioritization, but
|
||||||
that is not the focus of these techniques.
|
that is not the focus of these techniques.
|
||||||
|
|
||||||
|
@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
|
||||||
same CPU. Indeed, with many flows and few CPUs, it is very likely that
|
same CPU. Indeed, with many flows and few CPUs, it is very likely that
|
||||||
a single application thread handles flows with many different flow hashes.
|
a single application thread handles flows with many different flow hashes.
|
||||||
|
|
||||||
rps_sock_table is a global flow table that contains the *desired* CPU for
|
rps_sock_flow_table is a global flow table that contains the *desired* CPU
|
||||||
flows: the CPU that is currently processing the flow in userspace. Each
|
for flows: the CPU that is currently processing the flow in userspace.
|
||||||
table value is a CPU index that is updated during calls to recvmsg and
|
Each table value is a CPU index that is updated during calls to recvmsg
|
||||||
sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
|
and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
|
||||||
and tcp_splice_read()).
|
and tcp_splice_read()).
|
||||||
|
|
||||||
When the scheduler moves a thread to a new CPU while it has outstanding
|
When the scheduler moves a thread to a new CPU while it has outstanding
|
||||||
|
|
|
@ -6366,10 +6366,10 @@ F: net/ipv4/tcp_lp.c
|
||||||
|
|
||||||
TEGRA SUPPORT
|
TEGRA SUPPORT
|
||||||
M: Colin Cross <ccross@android.com>
|
M: Colin Cross <ccross@android.com>
|
||||||
M: Erik Gilling <konkers@android.com>
|
|
||||||
M: Olof Johansson <olof@lixom.net>
|
M: Olof Johansson <olof@lixom.net>
|
||||||
|
M: Stephen Warren <swarren@nvidia.com>
|
||||||
L: linux-tegra@vger.kernel.org
|
L: linux-tegra@vger.kernel.org
|
||||||
T: git git://android.git.kernel.org/kernel/tegra.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/arm/mach-tegra
|
F: arch/arm/mach-tegra
|
||||||
|
|
||||||
|
|
|
@ -259,7 +259,6 @@ static void __init vic_disable(void __iomem *base)
|
||||||
writel(0, base + VIC_INT_SELECT);
|
writel(0, base + VIC_INT_SELECT);
|
||||||
writel(0, base + VIC_INT_ENABLE);
|
writel(0, base + VIC_INT_ENABLE);
|
||||||
writel(~0, base + VIC_INT_ENABLE_CLEAR);
|
writel(~0, base + VIC_INT_ENABLE_CLEAR);
|
||||||
writel(0, base + VIC_IRQ_STATUS);
|
|
||||||
writel(0, base + VIC_ITCR);
|
writel(0, base + VIC_ITCR);
|
||||||
writel(~0, base + VIC_INT_SOFT_CLEAR);
|
writel(~0, base + VIC_INT_SOFT_CLEAR);
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,8 @@
|
||||||
#ifndef __ASM_ARM_LOCALTIMER_H
|
#ifndef __ASM_ARM_LOCALTIMER_H
|
||||||
#define __ASM_ARM_LOCALTIMER_H
|
#define __ASM_ARM_LOCALTIMER_H
|
||||||
|
|
||||||
|
#include <linux/errno.h>
|
||||||
|
|
||||||
struct clock_event_device;
|
struct clock_event_device;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -321,8 +321,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
|
||||||
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
|
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
|
||||||
[PERF_COUNT_HW_INSTRUCTIONS] =
|
[PERF_COUNT_HW_INSTRUCTIONS] =
|
||||||
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
|
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
|
||||||
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
|
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
|
||||||
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
|
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
|
||||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
|
||||||
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||||
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
|
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
|
||||||
|
|
|
@ -193,7 +193,8 @@ static int __init omap2430_i2c_init(void)
|
||||||
{
|
{
|
||||||
omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
|
omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
|
||||||
ARRAY_SIZE(sdp2430_i2c1_boardinfo));
|
ARRAY_SIZE(sdp2430_i2c1_boardinfo));
|
||||||
omap2_pmic_init("twl4030", &sdp2430_twldata);
|
omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ,
|
||||||
|
&sdp2430_twldata);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,8 +137,7 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot,
|
||||||
*/
|
*/
|
||||||
reg = omap4_ctrl_pad_readl(control_pbias_offset);
|
reg = omap4_ctrl_pad_readl(control_pbias_offset);
|
||||||
reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
|
reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
|
||||||
OMAP4_MMC1_PWRDNZ_MASK |
|
OMAP4_MMC1_PWRDNZ_MASK);
|
||||||
OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
|
|
||||||
omap4_ctrl_pad_writel(reg, control_pbias_offset);
|
omap4_ctrl_pad_writel(reg, control_pbias_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,8 +155,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
|
||||||
else
|
else
|
||||||
reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK;
|
reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK;
|
||||||
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
|
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
|
||||||
OMAP4_MMC1_PWRDNZ_MASK |
|
OMAP4_MMC1_PWRDNZ_MASK);
|
||||||
OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
|
|
||||||
omap4_ctrl_pad_writel(reg, control_pbias_offset);
|
omap4_ctrl_pad_writel(reg, control_pbias_offset);
|
||||||
|
|
||||||
timeout = jiffies + msecs_to_jiffies(5);
|
timeout = jiffies + msecs_to_jiffies(5);
|
||||||
|
@ -171,16 +169,14 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
|
||||||
if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
|
if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
|
||||||
pr_err("Pbias Voltage is not same as LDO\n");
|
pr_err("Pbias Voltage is not same as LDO\n");
|
||||||
/* Caution : On VMODE_ERROR Power Down MMC IO */
|
/* Caution : On VMODE_ERROR Power Down MMC IO */
|
||||||
reg &= ~(OMAP4_MMC1_PWRDNZ_MASK |
|
reg &= ~(OMAP4_MMC1_PWRDNZ_MASK);
|
||||||
OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
|
|
||||||
omap4_ctrl_pad_writel(reg, control_pbias_offset);
|
omap4_ctrl_pad_writel(reg, control_pbias_offset);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
reg = omap4_ctrl_pad_readl(control_pbias_offset);
|
reg = omap4_ctrl_pad_readl(control_pbias_offset);
|
||||||
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
|
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
|
||||||
OMAP4_MMC1_PWRDNZ_MASK |
|
OMAP4_MMC1_PWRDNZ_MASK |
|
||||||
OMAP4_MMC1_PBIASLITE_VMODE_MASK |
|
OMAP4_MMC1_PBIASLITE_VMODE_MASK);
|
||||||
OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
|
|
||||||
omap4_ctrl_pad_writel(reg, control_pbias_offset);
|
omap4_ctrl_pad_writel(reg, control_pbias_offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,9 +137,6 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
|
||||||
musb_plat.mode = board_data->mode;
|
musb_plat.mode = board_data->mode;
|
||||||
musb_plat.extvbus = board_data->extvbus;
|
musb_plat.extvbus = board_data->extvbus;
|
||||||
|
|
||||||
if (cpu_is_omap44xx())
|
|
||||||
omap4430_phy_init(dev);
|
|
||||||
|
|
||||||
if (cpu_is_omap3517() || cpu_is_omap3505()) {
|
if (cpu_is_omap3517() || cpu_is_omap3505()) {
|
||||||
oh_name = "am35x_otg_hs";
|
oh_name = "am35x_otg_hs";
|
||||||
name = "musb-am35x";
|
name = "musb-am35x";
|
||||||
|
|
|
@ -32,7 +32,6 @@
|
||||||
|
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
|
|
||||||
#include <mach/hardware.h>
|
|
||||||
#include <mach/clk.h>
|
#include <mach/clk.h>
|
||||||
|
|
||||||
/* Frequency table index must be sequential starting at 0 */
|
/* Frequency table index must be sequential starting at 0 */
|
||||||
|
|
|
@ -6,6 +6,7 @@ config UX500_SOC_COMMON
|
||||||
select ARM_GIC
|
select ARM_GIC
|
||||||
select HAS_MTU
|
select HAS_MTU
|
||||||
select ARM_ERRATA_753970
|
select ARM_ERRATA_753970
|
||||||
|
select ARM_ERRATA_754322
|
||||||
|
|
||||||
menu "Ux500 SoC"
|
menu "Ux500 SoC"
|
||||||
|
|
||||||
|
|
|
@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi)
|
||||||
*/
|
*/
|
||||||
bank_start = min(bank_start,
|
bank_start = min(bank_start,
|
||||||
ALIGN(prev_bank_end, PAGES_PER_SECTION));
|
ALIGN(prev_bank_end, PAGES_PER_SECTION));
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* Align down here since the VM subsystem insists that the
|
||||||
|
* memmap entries are valid from the bank start aligned to
|
||||||
|
* MAX_ORDER_NR_PAGES.
|
||||||
|
*/
|
||||||
|
bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* If we had a previous bank, and there is a space
|
* If we had a previous bank, and there is a space
|
||||||
|
|
|
@ -24,6 +24,7 @@ config MIPS
|
||||||
select GENERIC_IRQ_PROBE
|
select GENERIC_IRQ_PROBE
|
||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
select HAVE_ARCH_JUMP_LABEL
|
select HAVE_ARCH_JUMP_LABEL
|
||||||
|
select IRQ_FORCED_THREADING
|
||||||
|
|
||||||
menu "Machine selection"
|
menu "Machine selection"
|
||||||
|
|
||||||
|
@ -722,6 +723,7 @@ config CAVIUM_OCTEON_SIMULATOR
|
||||||
select SYS_SUPPORTS_HIGHMEM
|
select SYS_SUPPORTS_HIGHMEM
|
||||||
select SYS_SUPPORTS_HOTPLUG_CPU
|
select SYS_SUPPORTS_HOTPLUG_CPU
|
||||||
select SYS_HAS_CPU_CAVIUM_OCTEON
|
select SYS_HAS_CPU_CAVIUM_OCTEON
|
||||||
|
select HOLES_IN_ZONE
|
||||||
help
|
help
|
||||||
The Octeon simulator is software performance model of the Cavium
|
The Octeon simulator is software performance model of the Cavium
|
||||||
Octeon Processor. It supports simulating Octeon processors on x86
|
Octeon Processor. It supports simulating Octeon processors on x86
|
||||||
|
@ -744,6 +746,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
|
||||||
select ZONE_DMA32
|
select ZONE_DMA32
|
||||||
select USB_ARCH_HAS_OHCI
|
select USB_ARCH_HAS_OHCI
|
||||||
select USB_ARCH_HAS_EHCI
|
select USB_ARCH_HAS_EHCI
|
||||||
|
select HOLES_IN_ZONE
|
||||||
help
|
help
|
||||||
This option supports all of the Octeon reference boards from Cavium
|
This option supports all of the Octeon reference boards from Cavium
|
||||||
Networks. It builds a kernel that dynamically determines the Octeon
|
Networks. It builds a kernel that dynamically determines the Octeon
|
||||||
|
@ -973,6 +976,9 @@ config ISA_DMA_API
|
||||||
config GENERIC_GPIO
|
config GENERIC_GPIO
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
config HOLES_IN_ZONE
|
||||||
|
bool
|
||||||
|
|
||||||
#
|
#
|
||||||
# Endianess selection. Sufficiently obscure so many users don't know what to
|
# Endianess selection. Sufficiently obscure so many users don't know what to
|
||||||
# answer,so we try hard to limit the available choices. Also the use of a
|
# answer,so we try hard to limit the available choices. Also the use of a
|
||||||
|
|
|
@ -492,7 +492,7 @@ static void __init alchemy_setup_macs(int ctype)
|
||||||
memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
|
memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
|
||||||
|
|
||||||
ret = platform_device_register(&au1xxx_eth0_device);
|
ret = platform_device_register(&au1xxx_eth0_device);
|
||||||
if (!ret)
|
if (ret)
|
||||||
printk(KERN_INFO "Alchemy: failed to register MAC0\n");
|
printk(KERN_INFO "Alchemy: failed to register MAC0\n");
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -158,15 +158,21 @@ static void restore_core_regs(void)
|
||||||
|
|
||||||
void au_sleep(void)
|
void au_sleep(void)
|
||||||
{
|
{
|
||||||
int cpuid = alchemy_get_cputype();
|
save_core_regs();
|
||||||
if (cpuid != ALCHEMY_CPU_UNKNOWN) {
|
|
||||||
save_core_regs();
|
switch (alchemy_get_cputype()) {
|
||||||
if (cpuid <= ALCHEMY_CPU_AU1500)
|
case ALCHEMY_CPU_AU1000:
|
||||||
alchemy_sleep_au1000();
|
case ALCHEMY_CPU_AU1500:
|
||||||
else if (cpuid <= ALCHEMY_CPU_AU1200)
|
case ALCHEMY_CPU_AU1100:
|
||||||
alchemy_sleep_au1550();
|
alchemy_sleep_au1000();
|
||||||
restore_core_regs();
|
break;
|
||||||
|
case ALCHEMY_CPU_AU1550:
|
||||||
|
case ALCHEMY_CPU_AU1200:
|
||||||
|
alchemy_sleep_au1550();
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
restore_core_regs();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_PM */
|
#endif /* CONFIG_PM */
|
||||||
|
|
|
@ -89,8 +89,12 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
|
||||||
{
|
{
|
||||||
unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
|
unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
|
||||||
|
|
||||||
|
disable_irq_nosync(irq);
|
||||||
|
|
||||||
for ( ; bisr; bisr &= bisr - 1)
|
for ( ; bisr; bisr &= bisr - 1)
|
||||||
generic_handle_irq(bcsr_csc_base + __ffs(bisr));
|
generic_handle_irq(bcsr_csc_base + __ffs(bisr));
|
||||||
|
|
||||||
|
enable_irq(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* NOTE: both the enable and mask bits must be cleared, otherwise the
|
/* NOTE: both the enable and mask bits must be cleared, otherwise the
|
||||||
|
|
|
@ -23,13 +23,6 @@ void __init board_setup(void)
|
||||||
unsigned long freq0, clksrc, div, pfc;
|
unsigned long freq0, clksrc, div, pfc;
|
||||||
unsigned short whoami;
|
unsigned short whoami;
|
||||||
|
|
||||||
/* Set Config[OD] (disable overlapping bus transaction):
|
|
||||||
* This gets rid of a _lot_ of spurious interrupts (especially
|
|
||||||
* wrt. IDE); but incurs ~10% performance hit in some
|
|
||||||
* cpu-bound applications.
|
|
||||||
*/
|
|
||||||
set_c0_config(1 << 19);
|
|
||||||
|
|
||||||
bcsr_init(DB1200_BCSR_PHYS_ADDR,
|
bcsr_init(DB1200_BCSR_PHYS_ADDR,
|
||||||
DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);
|
DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type = {
|
||||||
|
|
||||||
static struct irqaction ar7_cascade_action = {
|
static struct irqaction ar7_cascade_action = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "AR7 cascade interrupt"
|
.name = "AR7 cascade interrupt",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init ar7_irq_init(int base)
|
static void __init ar7_irq_init(int base)
|
||||||
|
|
|
@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_irq_chip = {
|
||||||
static struct irqaction cpu_ip2_cascade_action = {
|
static struct irqaction cpu_ip2_cascade_action = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "cascade_ip2",
|
.name = "cascade_ip2",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init arch_init_irq(void)
|
void __init arch_init_irq(void)
|
||||||
|
|
|
@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void)
|
||||||
static struct irqaction cascade = {
|
static struct irqaction cascade = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init arch_init_irq(void)
|
void __init arch_init_irq(void)
|
||||||
|
|
|
@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU);
|
||||||
static struct irqaction ioirq = {
|
static struct irqaction ioirq = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
static struct irqaction fpuirq = {
|
static struct irqaction fpuirq = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "fpu",
|
.name = "fpu",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irqaction busirq = {
|
static struct irqaction busirq = {
|
||||||
.flags = IRQF_DISABLED,
|
.flags = IRQF_DISABLED,
|
||||||
.name = "bus error",
|
.name = "bus error",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irqaction haltirq = {
|
static struct irqaction haltirq = {
|
||||||
.handler = dec_intr_halt,
|
.handler = dec_intr_halt,
|
||||||
.name = "halt",
|
.name = "halt",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void)
|
||||||
|
|
||||||
static struct irqaction irq_cascade = {
|
static struct irqaction irq_cascade = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.flags = 0,
|
.flags = IRQF_NO_THREAD,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
.dev_id = NULL,
|
.dev_id = NULL,
|
||||||
.next = NULL,
|
.next = NULL,
|
||||||
|
|
|
@ -54,7 +54,6 @@
|
||||||
#define cpu_has_mips_r2_exec_hazard 0
|
#define cpu_has_mips_r2_exec_hazard 0
|
||||||
#define cpu_has_dsp 0
|
#define cpu_has_dsp 0
|
||||||
#define cpu_has_mipsmt 0
|
#define cpu_has_mipsmt 0
|
||||||
#define cpu_has_userlocal 0
|
|
||||||
#define cpu_has_vint 0
|
#define cpu_has_vint 0
|
||||||
#define cpu_has_veic 0
|
#define cpu_has_veic 0
|
||||||
#define cpu_hwrena_impl_bits 0xc0000000
|
#define cpu_hwrena_impl_bits 0xc0000000
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
|
#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
|
||||||
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/version.h>
|
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <asm/mach-powertv/asic.h>
|
#include <asm/mach-powertv/asic.h>
|
||||||
|
|
||||||
|
|
|
@ -195,9 +195,9 @@
|
||||||
* to cover the pipeline delay.
|
* to cover the pipeline delay.
|
||||||
*/
|
*/
|
||||||
.set mips32
|
.set mips32
|
||||||
mfc0 v1, CP0_TCSTATUS
|
mfc0 k0, CP0_TCSTATUS
|
||||||
.set mips0
|
.set mips0
|
||||||
LONG_S v1, PT_TCSTATUS(sp)
|
LONG_S k0, PT_TCSTATUS(sp)
|
||||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||||
LONG_S $4, PT_R4(sp)
|
LONG_S $4, PT_R4(sp)
|
||||||
LONG_S $5, PT_R5(sp)
|
LONG_S $5, PT_R5(sp)
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/sysdev.h>
|
#include <linux/syscore_ops.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/gpio.h>
|
#include <linux/gpio.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
|
@ -86,7 +86,6 @@ struct jz_gpio_chip {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
||||||
struct gpio_chip gpio_chip;
|
struct gpio_chip gpio_chip;
|
||||||
struct sys_device sysdev;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct jz_gpio_chip jz4740_gpio_chips[];
|
static struct jz_gpio_chip jz4740_gpio_chips[];
|
||||||
|
@ -459,49 +458,47 @@ static struct jz_gpio_chip jz4740_gpio_chips[] = {
|
||||||
JZ4740_GPIO_CHIP(D),
|
JZ4740_GPIO_CHIP(D),
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev)
|
static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip)
|
||||||
{
|
{
|
||||||
return container_of(dev, struct jz_gpio_chip, sysdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state)
|
|
||||||
{
|
|
||||||
struct jz_gpio_chip *chip = sysdev_to_chip(dev);
|
|
||||||
|
|
||||||
chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
|
chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
|
||||||
writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
|
writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
|
||||||
writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
|
writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int jz4740_gpio_suspend(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++)
|
||||||
|
jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int jz4740_gpio_resume(struct sys_device *dev)
|
static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip)
|
||||||
{
|
{
|
||||||
struct jz_gpio_chip *chip = sysdev_to_chip(dev);
|
|
||||||
uint32_t mask = chip->suspend_mask;
|
uint32_t mask = chip->suspend_mask;
|
||||||
|
|
||||||
writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR);
|
writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR);
|
||||||
writel(mask, chip->base + JZ_REG_GPIO_MASK_SET);
|
writel(mask, chip->base + JZ_REG_GPIO_MASK_SET);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sysdev_class jz4740_gpio_sysdev_class = {
|
static void jz4740_gpio_resume(void)
|
||||||
.name = "gpio",
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--)
|
||||||
|
jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct syscore_ops jz4740_gpio_syscore_ops = {
|
||||||
.suspend = jz4740_gpio_suspend,
|
.suspend = jz4740_gpio_suspend,
|
||||||
.resume = jz4740_gpio_resume,
|
.resume = jz4740_gpio_resume,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
|
static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
|
||||||
{
|
{
|
||||||
int ret, irq;
|
int irq;
|
||||||
|
|
||||||
chip->sysdev.id = id;
|
|
||||||
chip->sysdev.cls = &jz4740_gpio_sysdev_class;
|
|
||||||
ret = sysdev_register(&chip->sysdev);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
spin_lock_init(&chip->lock);
|
spin_lock_init(&chip->lock);
|
||||||
|
|
||||||
|
@ -519,22 +516,17 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
|
||||||
irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
|
irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
|
||||||
handle_level_irq);
|
handle_level_irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init jz4740_gpio_init(void)
|
static int __init jz4740_gpio_init(void)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = sysdev_class_register(&jz4740_gpio_sysdev_class);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
|
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
|
||||||
jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
|
jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
|
||||||
|
|
||||||
|
register_syscore_ops(&jz4740_gpio_syscore_ops);
|
||||||
|
|
||||||
printk(KERN_INFO "JZ4740 GPIO initialized\n");
|
printk(KERN_INFO "JZ4740 GPIO initialized\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -19,6 +19,26 @@
|
||||||
|
|
||||||
#include <asm-generic/sections.h>
|
#include <asm-generic/sections.h>
|
||||||
|
|
||||||
|
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
|
||||||
|
#define MCOUNT_OFFSET_INSNS 5
|
||||||
|
#else
|
||||||
|
#define MCOUNT_OFFSET_INSNS 4
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if the address is in kernel space
|
||||||
|
*
|
||||||
|
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
|
||||||
|
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
|
||||||
|
*/
|
||||||
|
static inline int in_kernel_space(unsigned long ip)
|
||||||
|
{
|
||||||
|
if (ip >= (unsigned long)_stext &&
|
||||||
|
ip <= (unsigned long)_etext)
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
|
||||||
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
|
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
|
||||||
|
@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_insns(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if the address is in kernel space
|
|
||||||
*
|
|
||||||
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
|
|
||||||
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
|
|
||||||
*/
|
|
||||||
static inline int in_kernel_space(unsigned long ip)
|
|
||||||
{
|
|
||||||
if (ip >= (unsigned long)_stext &&
|
|
||||||
ip <= (unsigned long)_etext)
|
|
||||||
return 1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
|
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
|
||||||
{
|
{
|
||||||
int faulted;
|
int faulted;
|
||||||
|
@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
|
||||||
* 1: offset = 4 instructions
|
* 1: offset = 4 instructions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
|
|
||||||
#define MCOUNT_OFFSET_INSNS 5
|
|
||||||
#else
|
|
||||||
#define MCOUNT_OFFSET_INSNS 4
|
|
||||||
#endif
|
|
||||||
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
|
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
|
||||||
|
|
||||||
int ftrace_make_nop(struct module *mod,
|
int ftrace_make_nop(struct module *mod,
|
||||||
|
|
|
@ -229,7 +229,7 @@ static void i8259A_shutdown(void)
|
||||||
*/
|
*/
|
||||||
if (i8259A_auto_eoi >= 0) {
|
if (i8259A_auto_eoi >= 0) {
|
||||||
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
|
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
|
||||||
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
|
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi)
|
||||||
static struct irqaction irq2 = {
|
static struct irqaction irq2 = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource pic1_io_resource = {
|
static struct resource pic1_io_resource = {
|
||||||
|
|
|
@ -349,3 +349,10 @@ SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
|
||||||
return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
|
return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
|
||||||
dfd, pathname);
|
dfd, pathname);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val,
|
||||||
|
struct compat_timespec __user *, utime, u32 __user *, uaddr2,
|
||||||
|
u32, val3)
|
||||||
|
{
|
||||||
|
return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3);
|
||||||
|
}
|
||||||
|
|
|
@ -315,7 +315,7 @@ EXPORT(sysn32_call_table)
|
||||||
PTR sys_fremovexattr
|
PTR sys_fremovexattr
|
||||||
PTR sys_tkill
|
PTR sys_tkill
|
||||||
PTR sys_ni_syscall
|
PTR sys_ni_syscall
|
||||||
PTR compat_sys_futex
|
PTR sys_32_futex
|
||||||
PTR compat_sys_sched_setaffinity /* 6195 */
|
PTR compat_sys_sched_setaffinity /* 6195 */
|
||||||
PTR compat_sys_sched_getaffinity
|
PTR compat_sys_sched_getaffinity
|
||||||
PTR sys_cacheflush
|
PTR sys_cacheflush
|
||||||
|
|
|
@ -441,7 +441,7 @@ sys_call_table:
|
||||||
PTR sys_fremovexattr /* 4235 */
|
PTR sys_fremovexattr /* 4235 */
|
||||||
PTR sys_tkill
|
PTR sys_tkill
|
||||||
PTR sys_sendfile64
|
PTR sys_sendfile64
|
||||||
PTR compat_sys_futex
|
PTR sys_32_futex
|
||||||
PTR compat_sys_sched_setaffinity
|
PTR compat_sys_sched_setaffinity
|
||||||
PTR compat_sys_sched_getaffinity /* 4240 */
|
PTR compat_sys_sched_getaffinity /* 4240 */
|
||||||
PTR compat_sys_io_setup
|
PTR compat_sys_io_setup
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
||||||
*/
|
*/
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
|
#include <linux/irqflags.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/personality.h>
|
#include <linux/personality.h>
|
||||||
|
@ -658,6 +659,8 @@ static void do_signal(struct pt_regs *regs)
|
||||||
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
||||||
__u32 thread_info_flags)
|
__u32 thread_info_flags)
|
||||||
{
|
{
|
||||||
|
local_irq_enable();
|
||||||
|
|
||||||
/* deal with pending signal delivery */
|
/* deal with pending signal delivery */
|
||||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
|
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
|
||||||
do_signal(regs);
|
do_signal(regs);
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
@ -364,21 +365,26 @@ static int regs_to_trapnr(struct pt_regs *regs)
|
||||||
return (regs->cp0_cause >> 2) & 0x1f;
|
return (regs->cp0_cause >> 2) & 0x1f;
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(die_lock);
|
static DEFINE_RAW_SPINLOCK(die_lock);
|
||||||
|
|
||||||
void __noreturn die(const char *str, struct pt_regs *regs)
|
void __noreturn die(const char *str, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
static int die_counter;
|
static int die_counter;
|
||||||
int sig = SIGSEGV;
|
int sig = SIGSEGV;
|
||||||
#ifdef CONFIG_MIPS_MT_SMTC
|
#ifdef CONFIG_MIPS_MT_SMTC
|
||||||
unsigned long dvpret = dvpe();
|
unsigned long dvpret;
|
||||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||||
|
|
||||||
|
oops_enter();
|
||||||
|
|
||||||
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
|
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
|
||||||
sig = 0;
|
sig = 0;
|
||||||
|
|
||||||
console_verbose();
|
console_verbose();
|
||||||
spin_lock_irq(&die_lock);
|
raw_spin_lock_irq(&die_lock);
|
||||||
|
#ifdef CONFIG_MIPS_MT_SMTC
|
||||||
|
dvpret = dvpe();
|
||||||
|
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||||
bust_spinlocks(1);
|
bust_spinlocks(1);
|
||||||
#ifdef CONFIG_MIPS_MT_SMTC
|
#ifdef CONFIG_MIPS_MT_SMTC
|
||||||
mips_mt_regdump(dvpret);
|
mips_mt_regdump(dvpret);
|
||||||
|
@ -387,7 +393,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
|
||||||
printk("%s[#%d]:\n", str, ++die_counter);
|
printk("%s[#%d]:\n", str, ++die_counter);
|
||||||
show_registers(regs);
|
show_registers(regs);
|
||||||
add_taint(TAINT_DIE);
|
add_taint(TAINT_DIE);
|
||||||
spin_unlock_irq(&die_lock);
|
raw_spin_unlock_irq(&die_lock);
|
||||||
|
|
||||||
|
oops_exit();
|
||||||
|
|
||||||
if (in_interrupt())
|
if (in_interrupt())
|
||||||
panic("Fatal exception in interrupt");
|
panic("Fatal exception in interrupt");
|
||||||
|
|
|
@ -192,7 +192,7 @@ static struct tc *get_tc(int index)
|
||||||
}
|
}
|
||||||
spin_unlock(&vpecontrol.tc_list_lock);
|
spin_unlock(&vpecontrol.tc_list_lock);
|
||||||
|
|
||||||
return NULL;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate a vpe and associate it with this minor (or index) */
|
/* allocate a vpe and associate it with this minor (or index) */
|
||||||
|
|
|
@ -123,11 +123,10 @@ void ltq_enable_irq(struct irq_data *d)
|
||||||
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
|
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int irq_nr = d->irq - INT_NUM_IRQ0;
|
|
||||||
|
|
||||||
ltq_enable_irq(d);
|
ltq_enable_irq(d);
|
||||||
for (i = 0; i < MAX_EIU; i++) {
|
for (i = 0; i < MAX_EIU; i++) {
|
||||||
if (irq_nr == ltq_eiu_irq[i]) {
|
if (d->irq == ltq_eiu_irq[i]) {
|
||||||
/* low level - we should really handle set_type */
|
/* low level - we should really handle set_type */
|
||||||
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
|
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
|
||||||
(0x6 << (i * 4)), LTQ_EIU_EXIN_C);
|
(0x6 << (i * 4)), LTQ_EIU_EXIN_C);
|
||||||
|
@ -147,11 +146,10 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
|
||||||
static void ltq_shutdown_eiu_irq(struct irq_data *d)
|
static void ltq_shutdown_eiu_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int irq_nr = d->irq - INT_NUM_IRQ0;
|
|
||||||
|
|
||||||
ltq_disable_irq(d);
|
ltq_disable_irq(d);
|
||||||
for (i = 0; i < MAX_EIU; i++) {
|
for (i = 0; i < MAX_EIU; i++) {
|
||||||
if (irq_nr == ltq_eiu_irq[i]) {
|
if (d->irq == ltq_eiu_irq[i]) {
|
||||||
/* disable */
|
/* disable */
|
||||||
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
|
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
|
||||||
LTQ_EIU_EXIN_INEN);
|
LTQ_EIU_EXIN_INEN);
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/version.h>
|
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
|
|
||||||
#include <lantiq_soc.h>
|
#include <lantiq_soc.h>
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/version.h>
|
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
|
|
||||||
#include <lantiq_soc.h>
|
#include <lantiq_soc.h>
|
||||||
|
|
|
@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void)
|
||||||
static struct irqaction cascade = {
|
static struct irqaction cascade = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init arch_init_irq(void)
|
void __init arch_init_irq(void)
|
||||||
|
|
|
@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending)
|
||||||
static struct irqaction cascade_irqaction = {
|
static struct irqaction cascade_irqaction = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init mach_init_irq(void)
|
void __init mach_init_irq(void)
|
||||||
|
|
|
@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id)
|
||||||
struct irqaction ip6_irqaction = {
|
struct irqaction ip6_irqaction = {
|
||||||
.handler = ip6_action,
|
.handler = ip6_action,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
.flags = IRQF_SHARED,
|
.flags = IRQF_SHARED | IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct irqaction cascade_irqaction = {
|
struct irqaction cascade_irqaction = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init mach_init_irq(void)
|
void __init mach_init_irq(void)
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
* Copyright (C) 2011 Wind River Systems,
|
* Copyright (C) 2011 Wind River Systems,
|
||||||
* written by Ralf Baechle <ralf@linux-mips.org>
|
* written by Ralf Baechle <ralf@linux-mips.org>
|
||||||
*/
|
*/
|
||||||
|
#include <linux/compiler.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/mman.h>
|
#include <linux/mman.h>
|
||||||
|
@ -15,12 +16,11 @@
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
|
||||||
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
||||||
|
|
||||||
EXPORT_SYMBOL(shm_align_mask);
|
EXPORT_SYMBOL(shm_align_mask);
|
||||||
|
|
||||||
/* gap between mmap and stack */
|
/* gap between mmap and stack */
|
||||||
#define MIN_GAP (128*1024*1024UL)
|
#define MIN_GAP (128*1024*1024UL)
|
||||||
#define MAX_GAP ((TASK_SIZE)/6*5)
|
#define MAX_GAP ((TASK_SIZE)/6*5)
|
||||||
|
|
||||||
static int mmap_is_legacy(void)
|
static int mmap_is_legacy(void)
|
||||||
{
|
{
|
||||||
|
@ -57,13 +57,13 @@ static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
|
||||||
return base - off;
|
return base - off;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define COLOUR_ALIGN(addr,pgoff) \
|
#define COLOUR_ALIGN(addr, pgoff) \
|
||||||
((((addr) + shm_align_mask) & ~shm_align_mask) + \
|
((((addr) + shm_align_mask) & ~shm_align_mask) + \
|
||||||
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
|
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
|
||||||
|
|
||||||
enum mmap_allocation_direction {UP, DOWN};
|
enum mmap_allocation_direction {UP, DOWN};
|
||||||
|
|
||||||
static unsigned long arch_get_unmapped_area_foo(struct file *filp,
|
static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
||||||
unsigned long addr0, unsigned long len, unsigned long pgoff,
|
unsigned long addr0, unsigned long len, unsigned long pgoff,
|
||||||
unsigned long flags, enum mmap_allocation_direction dir)
|
unsigned long flags, enum mmap_allocation_direction dir)
|
||||||
{
|
{
|
||||||
|
@ -103,16 +103,16 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
|
||||||
|
|
||||||
vma = find_vma(mm, addr);
|
vma = find_vma(mm, addr);
|
||||||
if (TASK_SIZE - len >= addr &&
|
if (TASK_SIZE - len >= addr &&
|
||||||
(!vma || addr + len <= vma->vm_start))
|
(!vma || addr + len <= vma->vm_start))
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dir == UP) {
|
if (dir == UP) {
|
||||||
addr = mm->mmap_base;
|
addr = mm->mmap_base;
|
||||||
if (do_color_align)
|
if (do_color_align)
|
||||||
addr = COLOUR_ALIGN(addr, pgoff);
|
addr = COLOUR_ALIGN(addr, pgoff);
|
||||||
else
|
else
|
||||||
addr = PAGE_ALIGN(addr);
|
addr = PAGE_ALIGN(addr);
|
||||||
|
|
||||||
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
|
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
|
||||||
/* At this point: (!vma || addr < vma->vm_end). */
|
/* At this point: (!vma || addr < vma->vm_end). */
|
||||||
|
@ -131,28 +131,30 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
|
||||||
mm->free_area_cache = mm->mmap_base;
|
mm->free_area_cache = mm->mmap_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* either no address requested or can't fit in requested address hole */
|
/*
|
||||||
|
* either no address requested, or the mapping can't fit into
|
||||||
|
* the requested address hole
|
||||||
|
*/
|
||||||
addr = mm->free_area_cache;
|
addr = mm->free_area_cache;
|
||||||
if (do_color_align) {
|
if (do_color_align) {
|
||||||
unsigned long base =
|
unsigned long base =
|
||||||
COLOUR_ALIGN_DOWN(addr - len, pgoff);
|
COLOUR_ALIGN_DOWN(addr - len, pgoff);
|
||||||
|
|
||||||
addr = base + len;
|
addr = base + len;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* make sure it can fit in the remaining address space */
|
/* make sure it can fit in the remaining address space */
|
||||||
if (likely(addr > len)) {
|
if (likely(addr > len)) {
|
||||||
vma = find_vma(mm, addr - len);
|
vma = find_vma(mm, addr - len);
|
||||||
if (!vma || addr <= vma->vm_start) {
|
if (!vma || addr <= vma->vm_start) {
|
||||||
/* remember the address as a hint for next time */
|
/* cache the address as a hint for next time */
|
||||||
return mm->free_area_cache = addr-len;
|
return mm->free_area_cache = addr - len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(mm->mmap_base < len))
|
if (unlikely(mm->mmap_base < len))
|
||||||
goto bottomup;
|
goto bottomup;
|
||||||
|
|
||||||
addr = mm->mmap_base-len;
|
addr = mm->mmap_base - len;
|
||||||
if (do_color_align)
|
if (do_color_align)
|
||||||
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
||||||
|
|
||||||
|
@ -163,8 +165,8 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
|
||||||
* return with success:
|
* return with success:
|
||||||
*/
|
*/
|
||||||
vma = find_vma(mm, addr);
|
vma = find_vma(mm, addr);
|
||||||
if (likely(!vma || addr+len <= vma->vm_start)) {
|
if (likely(!vma || addr + len <= vma->vm_start)) {
|
||||||
/* remember the address as a hint for next time */
|
/* cache the address as a hint for next time */
|
||||||
return mm->free_area_cache = addr;
|
return mm->free_area_cache = addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,7 +175,7 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
|
||||||
mm->cached_hole_size = vma->vm_start - addr;
|
mm->cached_hole_size = vma->vm_start - addr;
|
||||||
|
|
||||||
/* try just below the current vma->vm_start */
|
/* try just below the current vma->vm_start */
|
||||||
addr = vma->vm_start-len;
|
addr = vma->vm_start - len;
|
||||||
if (do_color_align)
|
if (do_color_align)
|
||||||
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
||||||
} while (likely(len < vma->vm_start));
|
} while (likely(len < vma->vm_start));
|
||||||
|
@ -201,7 +203,7 @@ bottomup:
|
||||||
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
|
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
|
||||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||||
{
|
{
|
||||||
return arch_get_unmapped_area_foo(filp,
|
return arch_get_unmapped_area_common(filp,
|
||||||
addr0, len, pgoff, flags, UP);
|
addr0, len, pgoff, flags, UP);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,7 +215,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
|
||||||
unsigned long addr0, unsigned long len, unsigned long pgoff,
|
unsigned long addr0, unsigned long len, unsigned long pgoff,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
return arch_get_unmapped_area_foo(filp,
|
return arch_get_unmapped_area_common(filp,
|
||||||
addr0, len, pgoff, flags, DOWN);
|
addr0, len, pgoff, flags, DOWN);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1759,14 +1759,13 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
|
||||||
u32 *p = handle_tlbm;
|
u32 *p = handle_tlbm;
|
||||||
struct uasm_label *l = labels;
|
struct uasm_label *l = labels;
|
||||||
struct uasm_reloc *r = relocs;
|
struct uasm_reloc *r = relocs;
|
||||||
struct work_registers wr;
|
|
||||||
|
|
||||||
memset(handle_tlbm, 0, sizeof(handle_tlbm));
|
memset(handle_tlbm, 0, sizeof(handle_tlbm));
|
||||||
memset(labels, 0, sizeof(labels));
|
memset(labels, 0, sizeof(labels));
|
||||||
memset(relocs, 0, sizeof(relocs));
|
memset(relocs, 0, sizeof(relocs));
|
||||||
|
|
||||||
build_r3000_tlbchange_handler_head(&p, K0, K1);
|
build_r3000_tlbchange_handler_head(&p, K0, K1);
|
||||||
build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
|
build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
|
||||||
uasm_i_nop(&p); /* load delay */
|
uasm_i_nop(&p); /* load delay */
|
||||||
build_make_write(&p, &r, K0, K1);
|
build_make_write(&p, &r, K0, K1);
|
||||||
build_r3000_pte_reload_tlbwi(&p, K0, K1);
|
build_r3000_pte_reload_tlbwi(&p, K0, K1);
|
||||||
|
@ -1963,7 +1962,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
|
||||||
uasm_i_andi(&p, wr.r3, wr.r3, 2);
|
uasm_i_andi(&p, wr.r3, wr.r3, 2);
|
||||||
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
|
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
|
||||||
}
|
}
|
||||||
|
if (PM_DEFAULT_MASK == 0)
|
||||||
|
uasm_i_nop(&p);
|
||||||
/*
|
/*
|
||||||
* We clobbered C0_PAGEMASK, restore it. On the other branch
|
* We clobbered C0_PAGEMASK, restore it. On the other branch
|
||||||
* it is restored in build_huge_tlb_write_entry.
|
* it is restored in build_huge_tlb_write_entry.
|
||||||
|
|
|
@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
|
||||||
|
|
||||||
static struct irqaction i8259irq = {
|
static struct irqaction i8259irq = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "XT-PIC cascade"
|
.name = "XT-PIC cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irqaction corehi_irqaction = {
|
static struct irqaction corehi_irqaction = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "CoreHi"
|
.name = "CoreHi",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static msc_irqmap_t __initdata msc_irqmap[] = {
|
static msc_irqmap_t __initdata msc_irqmap[] = {
|
||||||
|
|
|
@ -2,4 +2,4 @@ obj-y += setup.o platform.o irq.o setup.o time.o
|
||||||
obj-$(CONFIG_SMP) += smp.o smpboot.o
|
obj-$(CONFIG_SMP) += smp.o smpboot.o
|
||||||
obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o
|
obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o
|
||||||
|
|
||||||
EXTRA_CFLAGS += -Werror
|
ccflags-y += -Werror
|
||||||
|
|
|
@ -171,8 +171,13 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
|
||||||
u32 temp_buffer;
|
u32 temp_buffer;
|
||||||
|
|
||||||
/* set clock to 33Mhz */
|
/* set clock to 33Mhz */
|
||||||
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
|
if (ltq_is_ar9()) {
|
||||||
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
|
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR);
|
||||||
|
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR);
|
||||||
|
} else {
|
||||||
|
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
|
||||||
|
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
|
||||||
|
}
|
||||||
|
|
||||||
/* external or internal clock ? */
|
/* external or internal clock ? */
|
||||||
if (conf->clock) {
|
if (conf->clock) {
|
||||||
|
|
|
@ -215,7 +215,7 @@ static int __init rc32434_pci_init(void)
|
||||||
rc32434_pcibridge_init();
|
rc32434_pcibridge_init();
|
||||||
|
|
||||||
io_map_base = ioremap(rc32434_res_pci_io1.start,
|
io_map_base = ioremap(rc32434_res_pci_io1.start,
|
||||||
resource_size(&rcrc32434_res_pci_io1));
|
resource_size(&rc32434_res_pci_io1));
|
||||||
|
|
||||||
if (!io_map_base)
|
if (!io_map_base)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -108,12 +108,14 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||||
|
|
||||||
static struct irqaction cic_cascade_msp = {
|
static struct irqaction cic_cascade_msp = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "MSP CIC cascade"
|
.name = "MSP CIC cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irqaction per_cascade_msp = {
|
static struct irqaction per_cascade_msp = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "MSP PER cascade"
|
.name = "MSP PER cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init arch_init_irq(void)
|
void __init arch_init_irq(void)
|
||||||
|
|
|
@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = {
|
||||||
|
|
||||||
static struct irqaction gic_action = {
|
static struct irqaction gic_action = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.flags = IRQF_DISABLED,
|
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
|
||||||
.name = "GIC",
|
.name = "GIC",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_irq(void)
|
||||||
|
|
||||||
static struct irqaction local0_cascade = {
|
static struct irqaction local0_cascade = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.flags = IRQF_DISABLED,
|
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
|
||||||
.name = "local0 cascade",
|
.name = "local0 cascade",
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irqaction local1_cascade = {
|
static struct irqaction local1_cascade = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.flags = IRQF_DISABLED,
|
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
|
||||||
.name = "local1 cascade",
|
.name = "local1 cascade",
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irqaction buserr = {
|
static struct irqaction buserr = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.flags = IRQF_DISABLED,
|
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
|
||||||
.name = "Bus Error",
|
.name = "Bus Error",
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irqaction map0_cascade = {
|
static struct irqaction map0_cascade = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.flags = IRQF_DISABLED,
|
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
|
||||||
.name = "mapable0 cascade",
|
.name = "mapable0 cascade",
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef USE_LIO3_IRQ
|
#ifdef USE_LIO3_IRQ
|
||||||
static struct irqaction map1_cascade = {
|
static struct irqaction map1_cascade = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.flags = IRQF_DISABLED,
|
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
|
||||||
.name = "mapable1 cascade",
|
.name = "mapable1 cascade",
|
||||||
};
|
};
|
||||||
#define SGI_INTERRUPTS SGINT_END
|
#define SGI_INTERRUPTS SGINT_END
|
||||||
|
|
|
@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void)
|
||||||
static struct irqaction sni_rm200_irq2 = {
|
static struct irqaction sni_rm200_irq2 = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource sni_rm200_pic1_resource = {
|
static struct resource sni_rm200_pic1_resource = {
|
||||||
|
|
|
@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned;
|
||||||
static struct irqaction cascade_irqaction = {
|
static struct irqaction cascade_irqaction = {
|
||||||
.handler = no_action,
|
.handler = no_action,
|
||||||
.name = "cascade",
|
.name = "cascade",
|
||||||
|
.flags = IRQF_NO_THREAD,
|
||||||
};
|
};
|
||||||
|
|
||||||
int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
|
int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
#include <asm/irqflags.h>
|
#include <asm/irqflags.h>
|
||||||
#include <linux/atomic.h>
|
#include <asm/atomic_32.h>
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
#include <hv/hypervisor.h>
|
#include <hv/hypervisor.h>
|
||||||
#include <arch/abi.h>
|
#include <arch/abi.h>
|
||||||
|
|
|
@ -70,7 +70,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/atomic.h>
|
#include <asm/atomic_32.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
|
||||||
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
|
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
|
||||||
};
|
};
|
||||||
|
|
||||||
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
|
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
|
||||||
|
|
||||||
static int __init vsyscall_setup(char *str)
|
static int __init vsyscall_setup(char *str)
|
||||||
{
|
{
|
||||||
|
|
|
@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
/* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
|
||||||
|
/* 2006 AMD HT/VIA system with two host bridges */
|
||||||
|
{
|
||||||
|
.callback = set_use_crs,
|
||||||
|
.ident = "ASUS M2V-MX SE",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
|
||||||
|
DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
|
||||||
|
},
|
||||||
|
},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -678,38 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
|
||||||
pentry = (struct sfi_device_table_entry *)sb->pentry;
|
pentry = (struct sfi_device_table_entry *)sb->pentry;
|
||||||
|
|
||||||
for (i = 0; i < num; i++, pentry++) {
|
for (i = 0; i < num; i++, pentry++) {
|
||||||
if (pentry->irq != (u8)0xff) { /* native RTE case */
|
int irq = pentry->irq;
|
||||||
|
|
||||||
|
if (irq != (u8)0xff) { /* native RTE case */
|
||||||
/* these SPI2 devices are not exposed to system as PCI
|
/* these SPI2 devices are not exposed to system as PCI
|
||||||
* devices, but they have separate RTE entry in IOAPIC
|
* devices, but they have separate RTE entry in IOAPIC
|
||||||
* so we have to enable them one by one here
|
* so we have to enable them one by one here
|
||||||
*/
|
*/
|
||||||
ioapic = mp_find_ioapic(pentry->irq);
|
ioapic = mp_find_ioapic(irq);
|
||||||
irq_attr.ioapic = ioapic;
|
irq_attr.ioapic = ioapic;
|
||||||
irq_attr.ioapic_pin = pentry->irq;
|
irq_attr.ioapic_pin = irq;
|
||||||
irq_attr.trigger = 1;
|
irq_attr.trigger = 1;
|
||||||
irq_attr.polarity = 1;
|
irq_attr.polarity = 1;
|
||||||
io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr);
|
io_apic_set_pci_routing(NULL, irq, &irq_attr);
|
||||||
} else
|
} else
|
||||||
pentry->irq = 0; /* No irq */
|
irq = 0; /* No irq */
|
||||||
|
|
||||||
switch (pentry->type) {
|
switch (pentry->type) {
|
||||||
case SFI_DEV_TYPE_IPC:
|
case SFI_DEV_TYPE_IPC:
|
||||||
/* ID as IRQ is a hack that will go away */
|
/* ID as IRQ is a hack that will go away */
|
||||||
pdev = platform_device_alloc(pentry->name, pentry->irq);
|
pdev = platform_device_alloc(pentry->name, irq);
|
||||||
if (pdev == NULL) {
|
if (pdev == NULL) {
|
||||||
pr_err("out of memory for SFI platform device '%s'.\n",
|
pr_err("out of memory for SFI platform device '%s'.\n",
|
||||||
pentry->name);
|
pentry->name);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
install_irq_resource(pdev, pentry->irq);
|
install_irq_resource(pdev, irq);
|
||||||
pr_debug("info[%2d]: IPC bus, name = %16.16s, "
|
pr_debug("info[%2d]: IPC bus, name = %16.16s, "
|
||||||
"irq = 0x%2x\n", i, pentry->name, pentry->irq);
|
"irq = 0x%2x\n", i, pentry->name, irq);
|
||||||
sfi_handle_ipc_dev(pdev);
|
sfi_handle_ipc_dev(pdev);
|
||||||
break;
|
break;
|
||||||
case SFI_DEV_TYPE_SPI:
|
case SFI_DEV_TYPE_SPI:
|
||||||
memset(&spi_info, 0, sizeof(spi_info));
|
memset(&spi_info, 0, sizeof(spi_info));
|
||||||
strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
|
strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
|
||||||
spi_info.irq = pentry->irq;
|
spi_info.irq = irq;
|
||||||
spi_info.bus_num = pentry->host_num;
|
spi_info.bus_num = pentry->host_num;
|
||||||
spi_info.chip_select = pentry->addr;
|
spi_info.chip_select = pentry->addr;
|
||||||
spi_info.max_speed_hz = pentry->max_freq;
|
spi_info.max_speed_hz = pentry->max_freq;
|
||||||
|
@ -726,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
|
||||||
memset(&i2c_info, 0, sizeof(i2c_info));
|
memset(&i2c_info, 0, sizeof(i2c_info));
|
||||||
bus = pentry->host_num;
|
bus = pentry->host_num;
|
||||||
strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
|
strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
|
||||||
i2c_info.irq = pentry->irq;
|
i2c_info.irq = irq;
|
||||||
i2c_info.addr = pentry->addr;
|
i2c_info.addr = pentry->addr;
|
||||||
pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
|
pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
|
||||||
"irq = 0x%2x, addr = 0x%x\n", i, bus,
|
"irq = 0x%2x, addr = 0x%x\n", i, bus,
|
||||||
|
|
|
@ -34,8 +34,8 @@ struct gpio_bank {
|
||||||
u16 irq;
|
u16 irq;
|
||||||
u16 virtual_irq_start;
|
u16 virtual_irq_start;
|
||||||
int method;
|
int method;
|
||||||
#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
|
|
||||||
u32 suspend_wakeup;
|
u32 suspend_wakeup;
|
||||||
|
#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
|
||||||
u32 saved_wakeup;
|
u32 saved_wakeup;
|
||||||
#endif
|
#endif
|
||||||
u32 non_wakeup_gpios;
|
u32 non_wakeup_gpios;
|
||||||
|
|
|
@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
|
||||||
void
|
void
|
||||||
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
|
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
|
||||||
{
|
{
|
||||||
|
*gpio_base = -1;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,9 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
|
||||||
for (retry = 0; retry < 4; retry++) {
|
for (retry = 0; retry < 4; retry++) {
|
||||||
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
|
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
|
||||||
msg, msg_bytes, NULL, 0, delay, &ack);
|
msg, msg_bytes, NULL, 0, delay, &ack);
|
||||||
if (ret < 0)
|
if (ret == -EBUSY)
|
||||||
|
continue;
|
||||||
|
else if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
|
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
|
||||||
return send_bytes;
|
return send_bytes;
|
||||||
|
@ -160,7 +162,9 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
|
||||||
for (retry = 0; retry < 4; retry++) {
|
for (retry = 0; retry < 4; retry++) {
|
||||||
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
|
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
|
||||||
msg, msg_bytes, recv, recv_bytes, delay, &ack);
|
msg, msg_bytes, recv, recv_bytes, delay, &ack);
|
||||||
if (ret < 0)
|
if (ret == -EBUSY)
|
||||||
|
continue;
|
||||||
|
else if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
|
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -236,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
|
||||||
for (retry = 0; retry < 4; retry++) {
|
for (retry = 0; retry < 4; retry++) {
|
||||||
ret = radeon_process_aux_ch(auxch,
|
ret = radeon_process_aux_ch(auxch,
|
||||||
msg, msg_bytes, reply, reply_bytes, 0, &ack);
|
msg, msg_bytes, reply, reply_bytes, 0, &ack);
|
||||||
if (ret < 0) {
|
if (ret == -EBUSY)
|
||||||
|
continue;
|
||||||
|
else if (ret < 0) {
|
||||||
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
|
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1303,23 +1303,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
|
||||||
/* get the DPCD from the bridge */
|
/* get the DPCD from the bridge */
|
||||||
radeon_dp_getdpcd(radeon_connector);
|
radeon_dp_getdpcd(radeon_connector);
|
||||||
|
|
||||||
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
|
if (encoder) {
|
||||||
ret = connector_status_connected;
|
/* setup ddc on the bridge */
|
||||||
else {
|
radeon_atom_ext_encoder_setup_ddc(encoder);
|
||||||
/* need to setup ddc on the bridge */
|
|
||||||
if (encoder)
|
|
||||||
radeon_atom_ext_encoder_setup_ddc(encoder);
|
|
||||||
if (radeon_ddc_probe(radeon_connector,
|
if (radeon_ddc_probe(radeon_connector,
|
||||||
radeon_connector->requires_extended_probe))
|
radeon_connector->requires_extended_probe)) /* try DDC */
|
||||||
ret = connector_status_connected;
|
ret = connector_status_connected;
|
||||||
}
|
else if (radeon_connector->dac_load_detect) { /* try load detection */
|
||||||
|
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
|
||||||
if ((ret == connector_status_disconnected) &&
|
|
||||||
radeon_connector->dac_load_detect) {
|
|
||||||
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
|
|
||||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
|
||||||
if (encoder) {
|
|
||||||
encoder_funcs = encoder->helper_private;
|
|
||||||
ret = encoder_funcs->detect(encoder, connector);
|
ret = encoder_funcs->detect(encoder, connector);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1755,9 +1755,12 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
|
||||||
/* DCE4/5 */
|
/* DCE4/5 */
|
||||||
if (ASIC_IS_DCE4(rdev)) {
|
if (ASIC_IS_DCE4(rdev)) {
|
||||||
dig = radeon_encoder->enc_priv;
|
dig = radeon_encoder->enc_priv;
|
||||||
if (ASIC_IS_DCE41(rdev))
|
if (ASIC_IS_DCE41(rdev)) {
|
||||||
return radeon_crtc->crtc_id;
|
if (dig->linkb)
|
||||||
else {
|
return 1;
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
switch (radeon_encoder->encoder_id) {
|
switch (radeon_encoder->encoder_id) {
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||||
if (dig->linkb)
|
if (dig->linkb)
|
||||||
|
|
|
@ -1715,7 +1715,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get the monitoring functions started */
|
/* Get the monitoring functions started */
|
||||||
static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
|
static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
|
||||||
|
enum kinds kind)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
u8 tmp, diode;
|
u8 tmp, diode;
|
||||||
|
@ -1746,10 +1747,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
|
||||||
w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
|
w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
|
||||||
|
|
||||||
/* Get thermal sensor types */
|
/* Get thermal sensor types */
|
||||||
diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
|
switch (kind) {
|
||||||
|
case w83627ehf:
|
||||||
|
diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
diode = 0x70;
|
||||||
|
}
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
if ((tmp & (0x02 << i)))
|
if ((tmp & (0x02 << i)))
|
||||||
data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2;
|
data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
|
||||||
else
|
else
|
||||||
data->temp_type[i] = 4; /* thermistor */
|
data->temp_type[i] = 4; /* thermistor */
|
||||||
}
|
}
|
||||||
|
@ -2016,7 +2023,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize the chip */
|
/* Initialize the chip */
|
||||||
w83627ehf_init_device(data);
|
w83627ehf_init_device(data, sio_data->kind);
|
||||||
|
|
||||||
data->vrm = vid_which_vrm();
|
data->vrm = vid_which_vrm();
|
||||||
superio_enter(sio_data->sioreg);
|
superio_enter(sio_data->sioreg);
|
||||||
|
|
|
@ -327,7 +327,7 @@ config BLK_DEV_OPTI621
|
||||||
select BLK_DEV_IDEPCI
|
select BLK_DEV_IDEPCI
|
||||||
help
|
help
|
||||||
This is a driver for the OPTi 82C621 EIDE controller.
|
This is a driver for the OPTi 82C621 EIDE controller.
|
||||||
Please read the comments at the top of <file:drivers/ide/pci/opti621.c>.
|
Please read the comments at the top of <file:drivers/ide/opti621.c>.
|
||||||
|
|
||||||
config BLK_DEV_RZ1000
|
config BLK_DEV_RZ1000
|
||||||
tristate "RZ1000 chipset bugfix/support"
|
tristate "RZ1000 chipset bugfix/support"
|
||||||
|
@ -365,7 +365,7 @@ config BLK_DEV_ALI15X3
|
||||||
normal dual channel support.
|
normal dual channel support.
|
||||||
|
|
||||||
Please read the comments at the top of
|
Please read the comments at the top of
|
||||||
<file:drivers/ide/pci/alim15x3.c>.
|
<file:drivers/ide/alim15x3.c>.
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
@ -528,7 +528,7 @@ config BLK_DEV_NS87415
|
||||||
This driver adds detection and support for the NS87415 chip
|
This driver adds detection and support for the NS87415 chip
|
||||||
(used mainly on SPARC64 and PA-RISC machines).
|
(used mainly on SPARC64 and PA-RISC machines).
|
||||||
|
|
||||||
Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>.
|
Please read the comments at the top of <file:drivers/ide/ns87415.c>.
|
||||||
|
|
||||||
config BLK_DEV_PDC202XX_OLD
|
config BLK_DEV_PDC202XX_OLD
|
||||||
tristate "PROMISE PDC202{46|62|65|67} support"
|
tristate "PROMISE PDC202{46|62|65|67} support"
|
||||||
|
@ -547,7 +547,7 @@ config BLK_DEV_PDC202XX_OLD
|
||||||
for more than one card.
|
for more than one card.
|
||||||
|
|
||||||
Please read the comments at the top of
|
Please read the comments at the top of
|
||||||
<file:drivers/ide/pci/pdc202xx_old.c>.
|
<file:drivers/ide/pdc202xx_old.c>.
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
@ -593,7 +593,7 @@ config BLK_DEV_SIS5513
|
||||||
ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740,
|
ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740,
|
||||||
SiS745, SiS750
|
SiS745, SiS750
|
||||||
|
|
||||||
Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>.
|
Please read the comments at the top of <file:drivers/ide/sis5513.c>.
|
||||||
|
|
||||||
config BLK_DEV_SL82C105
|
config BLK_DEV_SL82C105
|
||||||
tristate "Winbond SL82c105 support"
|
tristate "Winbond SL82c105 support"
|
||||||
|
@ -616,7 +616,7 @@ config BLK_DEV_SLC90E66
|
||||||
look-a-like to the PIIX4 it should be a nice addition.
|
look-a-like to the PIIX4 it should be a nice addition.
|
||||||
|
|
||||||
Please read the comments at the top of
|
Please read the comments at the top of
|
||||||
<file:drivers/ide/pci/slc90e66.c>.
|
<file:drivers/ide/slc90e66.c>.
|
||||||
|
|
||||||
config BLK_DEV_TRM290
|
config BLK_DEV_TRM290
|
||||||
tristate "Tekram TRM290 chipset support"
|
tristate "Tekram TRM290 chipset support"
|
||||||
|
@ -625,7 +625,7 @@ config BLK_DEV_TRM290
|
||||||
This driver adds support for bus master DMA transfers
|
This driver adds support for bus master DMA transfers
|
||||||
using the Tekram TRM290 PCI IDE chip. Volunteers are
|
using the Tekram TRM290 PCI IDE chip. Volunteers are
|
||||||
needed for further tweaking and development.
|
needed for further tweaking and development.
|
||||||
Please read the comments at the top of <file:drivers/ide/pci/trm290.c>.
|
Please read the comments at the top of <file:drivers/ide/trm290.c>.
|
||||||
|
|
||||||
config BLK_DEV_VIA82CXXX
|
config BLK_DEV_VIA82CXXX
|
||||||
tristate "VIA82CXXX chipset support"
|
tristate "VIA82CXXX chipset support"
|
||||||
|
@ -836,7 +836,7 @@ config BLK_DEV_ALI14XX
|
||||||
of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
|
of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
|
||||||
I/O speeds to be set as well.
|
I/O speeds to be set as well.
|
||||||
See the files <file:Documentation/ide/ide.txt> and
|
See the files <file:Documentation/ide/ide.txt> and
|
||||||
<file:drivers/ide/legacy/ali14xx.c> for more info.
|
<file:drivers/ide/ali14xx.c> for more info.
|
||||||
|
|
||||||
config BLK_DEV_DTC2278
|
config BLK_DEV_DTC2278
|
||||||
tristate "DTC-2278 support"
|
tristate "DTC-2278 support"
|
||||||
|
@ -847,7 +847,7 @@ config BLK_DEV_DTC2278
|
||||||
boot parameter. It enables support for the secondary IDE interface
|
boot parameter. It enables support for the secondary IDE interface
|
||||||
of the DTC-2278 card, and permits faster I/O speeds to be set as
|
of the DTC-2278 card, and permits faster I/O speeds to be set as
|
||||||
well. See the <file:Documentation/ide/ide.txt> and
|
well. See the <file:Documentation/ide/ide.txt> and
|
||||||
<file:drivers/ide/legacy/dtc2278.c> files for more info.
|
<file:drivers/ide/dtc2278.c> files for more info.
|
||||||
|
|
||||||
config BLK_DEV_HT6560B
|
config BLK_DEV_HT6560B
|
||||||
tristate "Holtek HT6560B support"
|
tristate "Holtek HT6560B support"
|
||||||
|
@ -858,7 +858,7 @@ config BLK_DEV_HT6560B
|
||||||
boot parameter. It enables support for the secondary IDE interface
|
boot parameter. It enables support for the secondary IDE interface
|
||||||
of the Holtek card, and permits faster I/O speeds to be set as well.
|
of the Holtek card, and permits faster I/O speeds to be set as well.
|
||||||
See the <file:Documentation/ide/ide.txt> and
|
See the <file:Documentation/ide/ide.txt> and
|
||||||
<file:drivers/ide/legacy/ht6560b.c> files for more info.
|
<file:drivers/ide/ht6560b.c> files for more info.
|
||||||
|
|
||||||
config BLK_DEV_QD65XX
|
config BLK_DEV_QD65XX
|
||||||
tristate "QDI QD65xx support"
|
tristate "QDI QD65xx support"
|
||||||
|
@ -867,7 +867,7 @@ config BLK_DEV_QD65XX
|
||||||
help
|
help
|
||||||
This driver is enabled at runtime using the "qd65xx.probe" kernel
|
This driver is enabled at runtime using the "qd65xx.probe" kernel
|
||||||
boot parameter. It permits faster I/O speeds to be set. See the
|
boot parameter. It permits faster I/O speeds to be set. See the
|
||||||
<file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c>
|
<file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c>
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
config BLK_DEV_UMC8672
|
config BLK_DEV_UMC8672
|
||||||
|
@ -879,7 +879,7 @@ config BLK_DEV_UMC8672
|
||||||
boot parameter. It enables support for the secondary IDE interface
|
boot parameter. It enables support for the secondary IDE interface
|
||||||
of the UMC-8672, and permits faster I/O speeds to be set as well.
|
of the UMC-8672, and permits faster I/O speeds to be set as well.
|
||||||
See the files <file:Documentation/ide/ide.txt> and
|
See the files <file:Documentation/ide/ide.txt> and
|
||||||
<file:drivers/ide/legacy/umc8672.c> for more info.
|
<file:drivers/ide/umc8672.c> for more info.
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
|
@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++)
|
||||||
__set_bit(BTN_0 + i, input_dev->keybit);
|
__set_bit(BTN_0 + i, input_dev->keybit);
|
||||||
|
|
||||||
if (wacom_wac->features.type != WACOM_21UX2) {
|
input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
|
||||||
input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
|
input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
|
||||||
input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
|
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
|
||||||
|
|
||||||
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
|
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
|
||||||
|
|
|
@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
ti->num_flush_requests = 1;
|
ti->num_flush_requests = 1;
|
||||||
|
ti->discard_zeroes_data_unsupported = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bad:
|
bad:
|
||||||
|
|
|
@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
|
||||||
* corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
|
* corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
|
||||||
*/
|
*/
|
||||||
if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
|
if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
|
||||||
if (!argc)
|
if (!argc) {
|
||||||
ti->error = "Feature corrupt_bio_byte requires parameters";
|
ti->error = "Feature corrupt_bio_byte requires parameters";
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
|
r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
|
||||||
if (r)
|
if (r)
|
||||||
|
|
|
@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
|
||||||
rs->ti->error = "write_mostly option is only valid for RAID1";
|
rs->ti->error = "write_mostly option is only valid for RAID1";
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (value > rs->md.raid_disks) {
|
if (value >= rs->md.raid_disks) {
|
||||||
rs->ti->error = "Invalid write_mostly drive index given";
|
rs->ti->error = "Invalid write_mostly drive index given";
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
template_disk = dm_table_get_integrity_disk(t, true);
|
template_disk = dm_table_get_integrity_disk(t, true);
|
||||||
if (!template_disk &&
|
if (template_disk)
|
||||||
blk_integrity_is_initialized(dm_disk(t->md))) {
|
blk_integrity_register(dm_disk(t->md),
|
||||||
|
blk_get_integrity(template_disk));
|
||||||
|
else if (blk_integrity_is_initialized(dm_disk(t->md)))
|
||||||
DMWARN("%s: device no longer has a valid integrity profile",
|
DMWARN("%s: device no longer has a valid integrity profile",
|
||||||
dm_device_name(t->md));
|
dm_device_name(t->md));
|
||||||
return;
|
else
|
||||||
}
|
DMWARN("%s: unable to establish an integrity profile",
|
||||||
blk_integrity_register(dm_disk(t->md),
|
dm_device_name(t->md));
|
||||||
blk_get_integrity(template_disk));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
|
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||||
|
@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool dm_table_discard_zeroes_data(struct dm_table *t)
|
||||||
|
{
|
||||||
|
struct dm_target *ti;
|
||||||
|
unsigned i = 0;
|
||||||
|
|
||||||
|
/* Ensure that all targets supports discard_zeroes_data. */
|
||||||
|
while (i < dm_table_get_num_targets(t)) {
|
||||||
|
ti = dm_table_get_target(t, i++);
|
||||||
|
|
||||||
|
if (ti->discard_zeroes_data_unsupported)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||||
struct queue_limits *limits)
|
struct queue_limits *limits)
|
||||||
{
|
{
|
||||||
|
@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||||
}
|
}
|
||||||
blk_queue_flush(q, flush);
|
blk_queue_flush(q, flush);
|
||||||
|
|
||||||
|
if (!dm_table_discard_zeroes_data(t))
|
||||||
|
q->limits.discard_zeroes_data = 0;
|
||||||
|
|
||||||
dm_table_set_integrity(t);
|
dm_table_set_integrity(t);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -61,6 +61,11 @@
|
||||||
static void autostart_arrays(int part);
|
static void autostart_arrays(int part);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* pers_list is a list of registered personalities protected
|
||||||
|
* by pers_lock.
|
||||||
|
* pers_lock does extra service to protect accesses to
|
||||||
|
* mddev->thread when the mutex cannot be held.
|
||||||
|
*/
|
||||||
static LIST_HEAD(pers_list);
|
static LIST_HEAD(pers_list);
|
||||||
static DEFINE_SPINLOCK(pers_lock);
|
static DEFINE_SPINLOCK(pers_lock);
|
||||||
|
|
||||||
|
@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev)
|
||||||
} else
|
} else
|
||||||
mutex_unlock(&mddev->reconfig_mutex);
|
mutex_unlock(&mddev->reconfig_mutex);
|
||||||
|
|
||||||
|
/* was we've dropped the mutex we need a spinlock to
|
||||||
|
* make sur the thread doesn't disappear
|
||||||
|
*/
|
||||||
|
spin_lock(&pers_lock);
|
||||||
md_wakeup_thread(mddev->thread);
|
md_wakeup_thread(mddev->thread);
|
||||||
|
spin_unlock(&pers_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
|
static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
|
||||||
|
@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
|
||||||
return thread;
|
return thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
void md_unregister_thread(mdk_thread_t *thread)
|
void md_unregister_thread(mdk_thread_t **threadp)
|
||||||
{
|
{
|
||||||
|
mdk_thread_t *thread = *threadp;
|
||||||
if (!thread)
|
if (!thread)
|
||||||
return;
|
return;
|
||||||
dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
|
dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
|
||||||
|
/* Locking ensures that mddev_unlock does not wake_up a
|
||||||
|
* non-existent thread
|
||||||
|
*/
|
||||||
|
spin_lock(&pers_lock);
|
||||||
|
*threadp = NULL;
|
||||||
|
spin_unlock(&pers_lock);
|
||||||
|
|
||||||
kthread_stop(thread->tsk);
|
kthread_stop(thread->tsk);
|
||||||
kfree(thread);
|
kfree(thread);
|
||||||
|
@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev)
|
||||||
mdk_rdev_t *rdev;
|
mdk_rdev_t *rdev;
|
||||||
|
|
||||||
/* resync has finished, collect result */
|
/* resync has finished, collect result */
|
||||||
md_unregister_thread(mddev->sync_thread);
|
md_unregister_thread(&mddev->sync_thread);
|
||||||
mddev->sync_thread = NULL;
|
|
||||||
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
|
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
|
||||||
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
||||||
/* success...*/
|
/* success...*/
|
||||||
|
|
|
@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p);
|
||||||
extern int unregister_md_personality(struct mdk_personality *p);
|
extern int unregister_md_personality(struct mdk_personality *p);
|
||||||
extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
|
extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
|
||||||
mddev_t *mddev, const char *name);
|
mddev_t *mddev, const char *name);
|
||||||
extern void md_unregister_thread(mdk_thread_t *thread);
|
extern void md_unregister_thread(mdk_thread_t **threadp);
|
||||||
extern void md_wakeup_thread(mdk_thread_t *thread);
|
extern void md_wakeup_thread(mdk_thread_t *thread);
|
||||||
extern void md_check_recovery(mddev_t *mddev);
|
extern void md_check_recovery(mddev_t *mddev);
|
||||||
extern void md_write_start(mddev_t *mddev, struct bio *bi);
|
extern void md_write_start(mddev_t *mddev, struct bio *bi);
|
||||||
|
|
|
@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)
|
||||||
{
|
{
|
||||||
multipath_conf_t *conf = mddev->private;
|
multipath_conf_t *conf = mddev->private;
|
||||||
|
|
||||||
md_unregister_thread(mddev->thread);
|
md_unregister_thread(&mddev->thread);
|
||||||
mddev->thread = NULL;
|
|
||||||
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
|
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
|
||||||
mempool_destroy(conf->pool);
|
mempool_destroy(conf->pool);
|
||||||
kfree(conf->multipaths);
|
kfree(conf->multipaths);
|
||||||
|
|
|
@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev)
|
||||||
raise_barrier(conf);
|
raise_barrier(conf);
|
||||||
lower_barrier(conf);
|
lower_barrier(conf);
|
||||||
|
|
||||||
md_unregister_thread(mddev->thread);
|
md_unregister_thread(&mddev->thread);
|
||||||
mddev->thread = NULL;
|
|
||||||
if (conf->r1bio_pool)
|
if (conf->r1bio_pool)
|
||||||
mempool_destroy(conf->r1bio_pool);
|
mempool_destroy(conf->r1bio_pool);
|
||||||
kfree(conf->mirrors);
|
kfree(conf->mirrors);
|
||||||
|
|
|
@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_conf:
|
out_free_conf:
|
||||||
md_unregister_thread(mddev->thread);
|
md_unregister_thread(&mddev->thread);
|
||||||
if (conf->r10bio_pool)
|
if (conf->r10bio_pool)
|
||||||
mempool_destroy(conf->r10bio_pool);
|
mempool_destroy(conf->r10bio_pool);
|
||||||
safe_put_page(conf->tmppage);
|
safe_put_page(conf->tmppage);
|
||||||
|
@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev)
|
||||||
raise_barrier(conf, 0);
|
raise_barrier(conf, 0);
|
||||||
lower_barrier(conf);
|
lower_barrier(conf);
|
||||||
|
|
||||||
md_unregister_thread(mddev->thread);
|
md_unregister_thread(&mddev->thread);
|
||||||
mddev->thread = NULL;
|
|
||||||
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
|
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
|
||||||
if (conf->r10bio_pool)
|
if (conf->r10bio_pool)
|
||||||
mempool_destroy(conf->r10bio_pool);
|
mempool_destroy(conf->r10bio_pool);
|
||||||
|
|
|
@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
abort:
|
abort:
|
||||||
md_unregister_thread(mddev->thread);
|
md_unregister_thread(&mddev->thread);
|
||||||
mddev->thread = NULL;
|
|
||||||
if (conf) {
|
if (conf) {
|
||||||
print_raid5_conf(conf);
|
print_raid5_conf(conf);
|
||||||
free_conf(conf);
|
free_conf(conf);
|
||||||
|
@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev)
|
||||||
{
|
{
|
||||||
raid5_conf_t *conf = mddev->private;
|
raid5_conf_t *conf = mddev->private;
|
||||||
|
|
||||||
md_unregister_thread(mddev->thread);
|
md_unregister_thread(&mddev->thread);
|
||||||
mddev->thread = NULL;
|
|
||||||
if (mddev->queue)
|
if (mddev->queue)
|
||||||
mddev->queue->backing_dev_info.congested_fn = NULL;
|
mddev->queue->backing_dev_info.congested_fn = NULL;
|
||||||
free_conf(conf);
|
free_conf(conf);
|
||||||
|
|
|
@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp);
|
||||||
* FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
|
* FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
/* iSCSI L2 */
|
enum {
|
||||||
#define BNX2X_ISCSI_ETH_CL_ID_IDX 1
|
BNX2X_ISCSI_ETH_CL_ID_IDX,
|
||||||
#define BNX2X_ISCSI_ETH_CID 49
|
BNX2X_FCOE_ETH_CL_ID_IDX,
|
||||||
|
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
|
||||||
|
};
|
||||||
|
|
||||||
/* FCoE L2 */
|
#define BNX2X_CNIC_START_ETH_CID 48
|
||||||
#define BNX2X_FCOE_ETH_CL_ID_IDX 2
|
enum {
|
||||||
#define BNX2X_FCOE_ETH_CID 50
|
/* iSCSI L2 */
|
||||||
|
BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
|
||||||
|
/* FCoE L2 */
|
||||||
|
BNX2X_FCOE_ETH_CID,
|
||||||
|
};
|
||||||
|
|
||||||
/** Additional rings budgeting */
|
/** Additional rings budgeting */
|
||||||
#ifdef BCM_CNIC
|
#ifdef BCM_CNIC
|
||||||
|
|
|
@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
|
||||||
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
|
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
|
||||||
{
|
{
|
||||||
return bp->cnic_base_cl_id + cl_idx +
|
return bp->cnic_base_cl_id + cl_idx +
|
||||||
(bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
|
(bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
|
static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
|
||||||
|
|
|
@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
void __iomem *data = ®s->tx.dsr1_0;
|
void __iomem *data = ®s->tx.dsr1_0;
|
||||||
u16 *payload = (u16 *)frame->data;
|
u16 *payload = (u16 *)frame->data;
|
||||||
|
|
||||||
/* It is safe to write into dsr[dlc+1] */
|
for (i = 0; i < frame->can_dlc / 2; i++) {
|
||||||
for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
|
|
||||||
out_be16(data, *payload++);
|
out_be16(data, *payload++);
|
||||||
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
|
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
|
||||||
}
|
}
|
||||||
|
/* write remaining byte if necessary */
|
||||||
|
if (frame->can_dlc & 1)
|
||||||
|
out_8(data, frame->data[frame->can_dlc - 1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
out_8(®s->tx.dlr, frame->can_dlc);
|
out_8(®s->tx.dlr, frame->can_dlc);
|
||||||
|
@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
|
||||||
void __iomem *data = ®s->rx.dsr1_0;
|
void __iomem *data = ®s->rx.dsr1_0;
|
||||||
u16 *payload = (u16 *)frame->data;
|
u16 *payload = (u16 *)frame->data;
|
||||||
|
|
||||||
for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
|
for (i = 0; i < frame->can_dlc / 2; i++) {
|
||||||
*payload++ = in_be16(data);
|
*payload++ = in_be16(data);
|
||||||
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
|
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
|
||||||
}
|
}
|
||||||
|
/* read remaining byte if necessary */
|
||||||
|
if (frame->can_dlc & 1)
|
||||||
|
frame->data[frame->can_dlc - 1] = in_8(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
out_8(®s->canrflg, MSCAN_RXF);
|
out_8(®s->canrflg, MSCAN_RXF);
|
||||||
|
|
|
@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
dest = macvlan_hash_lookup(port, eth->h_dest);
|
dest = macvlan_hash_lookup(port, eth->h_dest);
|
||||||
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
|
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
|
||||||
/* send to lowerdev first for its network taps */
|
/* send to lowerdev first for its network taps */
|
||||||
vlan->forward(vlan->lowerdev, skb);
|
dev_forward_skb(vlan->lowerdev, skb);
|
||||||
|
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
||||||
memset(ring->buf, 0, ring->buf_size);
|
memset(ring->buf, 0, ring->buf_size);
|
||||||
|
|
||||||
ring->qp_state = MLX4_QP_STATE_RST;
|
ring->qp_state = MLX4_QP_STATE_RST;
|
||||||
ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
|
ring->doorbell_qpn = ring->qp.qpn << 8;
|
||||||
|
|
||||||
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
|
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
|
||||||
ring->cqn, &ring->context);
|
ring->cqn, &ring->context);
|
||||||
|
@ -791,7 +791,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
skb_orphan(skb);
|
skb_orphan(skb);
|
||||||
|
|
||||||
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
|
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
|
||||||
*(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
|
*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
|
||||||
op_own |= htonl((bf_index & 0xffff) << 8);
|
op_own |= htonl((bf_index & 0xffff) << 8);
|
||||||
/* Ensure new descirptor hits memory
|
/* Ensure new descirptor hits memory
|
||||||
* before setting ownership of this descriptor to HW */
|
* before setting ownership of this descriptor to HW */
|
||||||
|
@ -812,7 +812,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
wmb();
|
wmb();
|
||||||
tx_desc->ctrl.owner_opcode = op_own;
|
tx_desc->ctrl.owner_opcode = op_own;
|
||||||
wmb();
|
wmb();
|
||||||
writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
|
iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Poll CQ here */
|
/* Poll CQ here */
|
||||||
|
|
|
@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
|
||||||
sas_disable_routing(parent, phy->attached_sas_addr);
|
sas_disable_routing(parent, phy->attached_sas_addr);
|
||||||
}
|
}
|
||||||
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
|
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
|
||||||
sas_port_delete_phy(phy->port, phy->phy);
|
if (phy->port) {
|
||||||
if (phy->port->num_phys == 0)
|
sas_port_delete_phy(phy->port, phy->phy);
|
||||||
sas_port_delete(phy->port);
|
if (phy->port->num_phys == 0)
|
||||||
phy->port = NULL;
|
sas_port_delete(phy->port);
|
||||||
|
phy->port = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sas_discover_bfs_by_root_level(struct domain_device *root,
|
static int sas_discover_bfs_by_root_level(struct domain_device *root,
|
||||||
|
|
|
@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
|
||||||
qla2x00_sp_compl(ha, sp);
|
qla2x00_sp_compl(ha, sp);
|
||||||
} else {
|
} else {
|
||||||
ctx = sp->ctx;
|
ctx = sp->ctx;
|
||||||
if (ctx->type == SRB_LOGIN_CMD ||
|
if (ctx->type == SRB_ELS_CMD_RPT ||
|
||||||
ctx->type == SRB_LOGOUT_CMD) {
|
ctx->type == SRB_ELS_CMD_HST ||
|
||||||
ctx->u.iocb_cmd->free(sp);
|
ctx->type == SRB_CT_CMD) {
|
||||||
} else {
|
|
||||||
struct fc_bsg_job *bsg_job =
|
struct fc_bsg_job *bsg_job =
|
||||||
ctx->u.bsg_job;
|
ctx->u.bsg_job;
|
||||||
if (bsg_job->request->msgcode
|
if (bsg_job->request->msgcode
|
||||||
|
@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
|
||||||
kfree(sp->ctx);
|
kfree(sp->ctx);
|
||||||
mempool_free(sp,
|
mempool_free(sp,
|
||||||
ha->srb_mempool);
|
ha->srb_mempool);
|
||||||
|
} else {
|
||||||
|
ctx->u.iocb_cmd->free(sp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
|
||||||
skb->protocol = eth_type_trans(skb, dev);
|
skb->protocol = eth_type_trans(skb, dev);
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
|
|
||||||
if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
|
if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
|
||||||
|
work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
|
||||||
skb->ip_summed = CHECKSUM_NONE;
|
skb->ip_summed = CHECKSUM_NONE;
|
||||||
else
|
else
|
||||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
|
|
|
@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port,
|
||||||
spin_unlock_irqrestore(<q_asc_lock, flags);
|
spin_unlock_irqrestore(<q_asc_lock, flags);
|
||||||
|
|
||||||
/* Don't rewrite B0 */
|
/* Don't rewrite B0 */
|
||||||
if (tty_termios_baud_rate(new))
|
if (tty_termios_baud_rate(new))
|
||||||
tty_termios_encode_baud_rate(new, baud, baud);
|
tty_termios_encode_baud_rate(new, baud, baud);
|
||||||
|
|
||||||
|
uart_update_timeout(port, cflag, baud);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char*
|
static const char*
|
||||||
|
|
|
@ -1047,7 +1047,16 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
||||||
if (!max_to_defrag)
|
if (!max_to_defrag)
|
||||||
max_to_defrag = last_index - 1;
|
max_to_defrag = last_index - 1;
|
||||||
|
|
||||||
while (i <= last_index && defrag_count < max_to_defrag) {
|
/*
|
||||||
|
* make writeback starts from i, so the defrag range can be
|
||||||
|
* written sequentially.
|
||||||
|
*/
|
||||||
|
if (i < inode->i_mapping->writeback_index)
|
||||||
|
inode->i_mapping->writeback_index = i;
|
||||||
|
|
||||||
|
while (i <= last_index && defrag_count < max_to_defrag &&
|
||||||
|
(i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
|
||||||
|
PAGE_CACHE_SHIFT)) {
|
||||||
/*
|
/*
|
||||||
* make sure we stop running if someone unmounts
|
* make sure we stop running if someone unmounts
|
||||||
* the FS
|
* the FS
|
||||||
|
|
|
@ -2018,7 +2018,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
|
||||||
warned_on_ntlm = true;
|
warned_on_ntlm = true;
|
||||||
cERROR(1, "default security mechanism requested. The default "
|
cERROR(1, "default security mechanism requested. The default "
|
||||||
"security mechanism will be upgraded from ntlm to "
|
"security mechanism will be upgraded from ntlm to "
|
||||||
"ntlmv2 in kernel release 3.1");
|
"ntlmv2 in kernel release 3.2");
|
||||||
}
|
}
|
||||||
ses->overrideSecFlg = volume_info->secFlg;
|
ses->overrideSecFlg = volume_info->secFlg;
|
||||||
|
|
||||||
|
|
|
@ -629,7 +629,7 @@ xfs_buf_item_push(
|
||||||
* the xfsbufd to get this buffer written. We have to unlock the buffer
|
* the xfsbufd to get this buffer written. We have to unlock the buffer
|
||||||
* to allow the xfsbufd to write it, too.
|
* to allow the xfsbufd to write it, too.
|
||||||
*/
|
*/
|
||||||
STATIC void
|
STATIC bool
|
||||||
xfs_buf_item_pushbuf(
|
xfs_buf_item_pushbuf(
|
||||||
struct xfs_log_item *lip)
|
struct xfs_log_item *lip)
|
||||||
{
|
{
|
||||||
|
@ -643,6 +643,7 @@ xfs_buf_item_pushbuf(
|
||||||
|
|
||||||
xfs_buf_delwri_promote(bp);
|
xfs_buf_delwri_promote(bp);
|
||||||
xfs_buf_relse(bp);
|
xfs_buf_relse(bp);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC void
|
STATIC void
|
||||||
|
|
|
@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
|
||||||
* search the buffer cache can be a time consuming thing, and AIL lock is a
|
* search the buffer cache can be a time consuming thing, and AIL lock is a
|
||||||
* spinlock.
|
* spinlock.
|
||||||
*/
|
*/
|
||||||
STATIC void
|
STATIC bool
|
||||||
xfs_qm_dquot_logitem_pushbuf(
|
xfs_qm_dquot_logitem_pushbuf(
|
||||||
struct xfs_log_item *lip)
|
struct xfs_log_item *lip)
|
||||||
{
|
{
|
||||||
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
|
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
|
||||||
struct xfs_dquot *dqp = qlip->qli_dquot;
|
struct xfs_dquot *dqp = qlip->qli_dquot;
|
||||||
struct xfs_buf *bp;
|
struct xfs_buf *bp;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
ASSERT(XFS_DQ_IS_LOCKED(dqp));
|
ASSERT(XFS_DQ_IS_LOCKED(dqp));
|
||||||
|
|
||||||
|
@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
|
||||||
if (completion_done(&dqp->q_flush) ||
|
if (completion_done(&dqp->q_flush) ||
|
||||||
!(lip->li_flags & XFS_LI_IN_AIL)) {
|
!(lip->li_flags & XFS_LI_IN_AIL)) {
|
||||||
xfs_dqunlock(dqp);
|
xfs_dqunlock(dqp);
|
||||||
return;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
|
bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
|
||||||
dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
|
dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
|
||||||
xfs_dqunlock(dqp);
|
xfs_dqunlock(dqp);
|
||||||
if (!bp)
|
if (!bp)
|
||||||
return;
|
return true;
|
||||||
if (XFS_BUF_ISDELAYWRITE(bp))
|
if (XFS_BUF_ISDELAYWRITE(bp))
|
||||||
xfs_buf_delwri_promote(bp);
|
xfs_buf_delwri_promote(bp);
|
||||||
|
if (xfs_buf_ispinned(bp))
|
||||||
|
ret = false;
|
||||||
xfs_buf_relse(bp);
|
xfs_buf_relse(bp);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -706,13 +706,14 @@ xfs_inode_item_committed(
|
||||||
* marked delayed write. If that's the case, we'll promote it and that will
|
* marked delayed write. If that's the case, we'll promote it and that will
|
||||||
* allow the caller to write the buffer by triggering the xfsbufd to run.
|
* allow the caller to write the buffer by triggering the xfsbufd to run.
|
||||||
*/
|
*/
|
||||||
STATIC void
|
STATIC bool
|
||||||
xfs_inode_item_pushbuf(
|
xfs_inode_item_pushbuf(
|
||||||
struct xfs_log_item *lip)
|
struct xfs_log_item *lip)
|
||||||
{
|
{
|
||||||
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
||||||
struct xfs_inode *ip = iip->ili_inode;
|
struct xfs_inode *ip = iip->ili_inode;
|
||||||
struct xfs_buf *bp;
|
struct xfs_buf *bp;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
|
||||||
|
|
||||||
|
@ -723,7 +724,7 @@ xfs_inode_item_pushbuf(
|
||||||
if (completion_done(&ip->i_flush) ||
|
if (completion_done(&ip->i_flush) ||
|
||||||
!(lip->li_flags & XFS_LI_IN_AIL)) {
|
!(lip->li_flags & XFS_LI_IN_AIL)) {
|
||||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||||
return;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
|
bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
|
||||||
|
@ -731,10 +732,13 @@ xfs_inode_item_pushbuf(
|
||||||
|
|
||||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||||
if (!bp)
|
if (!bp)
|
||||||
return;
|
return true;
|
||||||
if (XFS_BUF_ISDELAYWRITE(bp))
|
if (XFS_BUF_ISDELAYWRITE(bp))
|
||||||
xfs_buf_delwri_promote(bp);
|
xfs_buf_delwri_promote(bp);
|
||||||
|
if (xfs_buf_ispinned(bp))
|
||||||
|
ret = false;
|
||||||
xfs_buf_relse(bp);
|
xfs_buf_relse(bp);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -68,6 +68,8 @@
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/capability.h>
|
#include <linux/capability.h>
|
||||||
|
#include <linux/kthread.h>
|
||||||
|
#include <linux/freezer.h>
|
||||||
#include <linux/list_sort.h>
|
#include <linux/list_sort.h>
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
|
@ -1648,24 +1648,13 @@ xfs_init_workqueues(void)
|
||||||
*/
|
*/
|
||||||
xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
|
xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
|
||||||
if (!xfs_syncd_wq)
|
if (!xfs_syncd_wq)
|
||||||
goto out;
|
return -ENOMEM;
|
||||||
|
|
||||||
xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
|
|
||||||
if (!xfs_ail_wq)
|
|
||||||
goto out_destroy_syncd;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_destroy_syncd:
|
|
||||||
destroy_workqueue(xfs_syncd_wq);
|
|
||||||
out:
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC void
|
STATIC void
|
||||||
xfs_destroy_workqueues(void)
|
xfs_destroy_workqueues(void)
|
||||||
{
|
{
|
||||||
destroy_workqueue(xfs_ail_wq);
|
|
||||||
destroy_workqueue(xfs_syncd_wq);
|
destroy_workqueue(xfs_syncd_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
|
||||||
void (*iop_unlock)(xfs_log_item_t *);
|
void (*iop_unlock)(xfs_log_item_t *);
|
||||||
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
|
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
|
||||||
void (*iop_push)(xfs_log_item_t *);
|
void (*iop_push)(xfs_log_item_t *);
|
||||||
void (*iop_pushbuf)(xfs_log_item_t *);
|
bool (*iop_pushbuf)(xfs_log_item_t *);
|
||||||
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
|
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
|
||||||
} xfs_item_ops_t;
|
} xfs_item_ops_t;
|
||||||
|
|
||||||
|
|
|
@ -28,8 +28,6 @@
|
||||||
#include "xfs_trans_priv.h"
|
#include "xfs_trans_priv.h"
|
||||||
#include "xfs_error.h"
|
#include "xfs_error.h"
|
||||||
|
|
||||||
struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
/*
|
/*
|
||||||
* Check that the list is sorted as it should be.
|
* Check that the list is sorted as it should be.
|
||||||
|
@ -356,16 +354,10 @@ xfs_ail_delete(
|
||||||
xfs_trans_ail_cursor_clear(ailp, lip);
|
xfs_trans_ail_cursor_clear(ailp, lip);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static long
|
||||||
* xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
|
xfsaild_push(
|
||||||
* to run at a later time if there is more work to do to complete the push.
|
struct xfs_ail *ailp)
|
||||||
*/
|
|
||||||
STATIC void
|
|
||||||
xfs_ail_worker(
|
|
||||||
struct work_struct *work)
|
|
||||||
{
|
{
|
||||||
struct xfs_ail *ailp = container_of(to_delayed_work(work),
|
|
||||||
struct xfs_ail, xa_work);
|
|
||||||
xfs_mount_t *mp = ailp->xa_mount;
|
xfs_mount_t *mp = ailp->xa_mount;
|
||||||
struct xfs_ail_cursor cur;
|
struct xfs_ail_cursor cur;
|
||||||
xfs_log_item_t *lip;
|
xfs_log_item_t *lip;
|
||||||
|
@ -439,8 +431,13 @@ xfs_ail_worker(
|
||||||
|
|
||||||
case XFS_ITEM_PUSHBUF:
|
case XFS_ITEM_PUSHBUF:
|
||||||
XFS_STATS_INC(xs_push_ail_pushbuf);
|
XFS_STATS_INC(xs_push_ail_pushbuf);
|
||||||
IOP_PUSHBUF(lip);
|
|
||||||
ailp->xa_last_pushed_lsn = lsn;
|
if (!IOP_PUSHBUF(lip)) {
|
||||||
|
stuck++;
|
||||||
|
flush_log = 1;
|
||||||
|
} else {
|
||||||
|
ailp->xa_last_pushed_lsn = lsn;
|
||||||
|
}
|
||||||
push_xfsbufd = 1;
|
push_xfsbufd = 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -452,7 +449,6 @@ xfs_ail_worker(
|
||||||
|
|
||||||
case XFS_ITEM_LOCKED:
|
case XFS_ITEM_LOCKED:
|
||||||
XFS_STATS_INC(xs_push_ail_locked);
|
XFS_STATS_INC(xs_push_ail_locked);
|
||||||
ailp->xa_last_pushed_lsn = lsn;
|
|
||||||
stuck++;
|
stuck++;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -504,20 +500,6 @@ out_done:
|
||||||
ailp->xa_last_pushed_lsn = 0;
|
ailp->xa_last_pushed_lsn = 0;
|
||||||
ailp->xa_log_flush = 0;
|
ailp->xa_log_flush = 0;
|
||||||
|
|
||||||
/*
|
|
||||||
* We clear the XFS_AIL_PUSHING_BIT first before checking
|
|
||||||
* whether the target has changed. If the target has changed,
|
|
||||||
* this pushes the requeue race directly onto the result of the
|
|
||||||
* atomic test/set bit, so we are guaranteed that either the
|
|
||||||
* the pusher that changed the target or ourselves will requeue
|
|
||||||
* the work (but not both).
|
|
||||||
*/
|
|
||||||
clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
|
|
||||||
smp_rmb();
|
|
||||||
if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
|
|
||||||
test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
|
|
||||||
return;
|
|
||||||
|
|
||||||
tout = 50;
|
tout = 50;
|
||||||
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
|
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
|
||||||
/*
|
/*
|
||||||
|
@ -544,9 +526,30 @@ out_done:
|
||||||
ailp->xa_last_pushed_lsn = 0;
|
ailp->xa_last_pushed_lsn = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* There is more to do, requeue us. */
|
return tout;
|
||||||
queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
|
}
|
||||||
msecs_to_jiffies(tout));
|
|
||||||
|
static int
|
||||||
|
xfsaild(
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
struct xfs_ail *ailp = data;
|
||||||
|
long tout = 0; /* milliseconds */
|
||||||
|
|
||||||
|
while (!kthread_should_stop()) {
|
||||||
|
if (tout && tout <= 20)
|
||||||
|
__set_current_state(TASK_KILLABLE);
|
||||||
|
else
|
||||||
|
__set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
schedule_timeout(tout ?
|
||||||
|
msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
|
||||||
|
|
||||||
|
try_to_freeze();
|
||||||
|
|
||||||
|
tout = xfsaild_push(ailp);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -581,8 +584,9 @@ xfs_ail_push(
|
||||||
*/
|
*/
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
|
xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
|
||||||
if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
|
smp_wmb();
|
||||||
queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
|
|
||||||
|
wake_up_process(ailp->xa_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -820,9 +824,18 @@ xfs_trans_ail_init(
|
||||||
INIT_LIST_HEAD(&ailp->xa_ail);
|
INIT_LIST_HEAD(&ailp->xa_ail);
|
||||||
INIT_LIST_HEAD(&ailp->xa_cursors);
|
INIT_LIST_HEAD(&ailp->xa_cursors);
|
||||||
spin_lock_init(&ailp->xa_lock);
|
spin_lock_init(&ailp->xa_lock);
|
||||||
INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
|
|
||||||
|
ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
|
||||||
|
ailp->xa_mount->m_fsname);
|
||||||
|
if (IS_ERR(ailp->xa_task))
|
||||||
|
goto out_free_ailp;
|
||||||
|
|
||||||
mp->m_ail = ailp;
|
mp->m_ail = ailp;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_free_ailp:
|
||||||
|
kmem_free(ailp);
|
||||||
|
return ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -831,6 +844,6 @@ xfs_trans_ail_destroy(
|
||||||
{
|
{
|
||||||
struct xfs_ail *ailp = mp->m_ail;
|
struct xfs_ail *ailp = mp->m_ail;
|
||||||
|
|
||||||
cancel_delayed_work_sync(&ailp->xa_work);
|
kthread_stop(ailp->xa_task);
|
||||||
kmem_free(ailp);
|
kmem_free(ailp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,24 +64,18 @@ struct xfs_ail_cursor {
|
||||||
*/
|
*/
|
||||||
struct xfs_ail {
|
struct xfs_ail {
|
||||||
struct xfs_mount *xa_mount;
|
struct xfs_mount *xa_mount;
|
||||||
|
struct task_struct *xa_task;
|
||||||
struct list_head xa_ail;
|
struct list_head xa_ail;
|
||||||
xfs_lsn_t xa_target;
|
xfs_lsn_t xa_target;
|
||||||
struct list_head xa_cursors;
|
struct list_head xa_cursors;
|
||||||
spinlock_t xa_lock;
|
spinlock_t xa_lock;
|
||||||
struct delayed_work xa_work;
|
|
||||||
xfs_lsn_t xa_last_pushed_lsn;
|
xfs_lsn_t xa_last_pushed_lsn;
|
||||||
unsigned long xa_flags;
|
|
||||||
int xa_log_flush;
|
int xa_log_flush;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define XFS_AIL_PUSHING_BIT 0
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* From xfs_trans_ail.c
|
* From xfs_trans_ail.c
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
|
|
||||||
|
|
||||||
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
|
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
|
||||||
struct xfs_ail_cursor *cur,
|
struct xfs_ail_cursor *cur,
|
||||||
struct xfs_log_item **log_items, int nr_items,
|
struct xfs_log_item **log_items, int nr_items,
|
||||||
|
|
|
@ -197,6 +197,11 @@ struct dm_target {
|
||||||
* whether or not its underlying devices have support.
|
* whether or not its underlying devices have support.
|
||||||
*/
|
*/
|
||||||
unsigned discards_supported:1;
|
unsigned discards_supported:1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set if this target does not return zeroes on discarded blocks.
|
||||||
|
*/
|
||||||
|
unsigned discard_zeroes_data_unsupported:1;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Each target can link one of these into the table */
|
/* Each target can link one of these into the table */
|
||||||
|
|
|
@ -1172,7 +1172,7 @@ DECLARE_RWSEM(uts_sem);
|
||||||
static int override_release(char __user *release, int len)
|
static int override_release(char __user *release, int len)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
char buf[len];
|
char buf[65];
|
||||||
|
|
||||||
if (current->personality & UNAME26) {
|
if (current->personality & UNAME26) {
|
||||||
char *rest = UTS_RELEASE;
|
char *rest = UTS_RELEASE;
|
||||||
|
|
|
@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct net_bridge *br = netdev_priv(dev);
|
struct net_bridge *br = netdev_priv(dev);
|
||||||
|
|
||||||
netif_carrier_off(dev);
|
|
||||||
netdev_update_features(dev);
|
netdev_update_features(dev);
|
||||||
netif_start_queue(dev);
|
netif_start_queue(dev);
|
||||||
br_stp_enable_bridge(br);
|
br_stp_enable_bridge(br);
|
||||||
|
@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct net_bridge *br = netdev_priv(dev);
|
struct net_bridge *br = netdev_priv(dev);
|
||||||
|
|
||||||
netif_carrier_off(dev);
|
|
||||||
|
|
||||||
br_stp_disable_bridge(br);
|
br_stp_disable_bridge(br);
|
||||||
br_multicast_stop(br);
|
br_multicast_stop(br);
|
||||||
|
|
||||||
|
|
|
@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||||
|
|
||||||
BUG_ON(!pcount);
|
BUG_ON(!pcount);
|
||||||
|
|
||||||
/* Tweak before seqno plays */
|
if (skb == tp->lost_skb_hint)
|
||||||
if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
|
|
||||||
!before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
|
|
||||||
tp->lost_cnt_hint += pcount;
|
tp->lost_cnt_hint += pcount;
|
||||||
|
|
||||||
TCP_SKB_CB(prev)->end_seq += shifted;
|
TCP_SKB_CB(prev)->end_seq += shifted;
|
||||||
|
|
|
@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
|
||||||
}
|
}
|
||||||
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
|
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
|
||||||
}
|
}
|
||||||
if (tcp_alloc_md5sig_pool(sk) == NULL) {
|
|
||||||
|
md5sig = tp->md5sig_info;
|
||||||
|
if (md5sig->entries4 == 0 &&
|
||||||
|
tcp_alloc_md5sig_pool(sk) == NULL) {
|
||||||
kfree(newkey);
|
kfree(newkey);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
md5sig = tp->md5sig_info;
|
|
||||||
|
|
||||||
if (md5sig->alloced4 == md5sig->entries4) {
|
if (md5sig->alloced4 == md5sig->entries4) {
|
||||||
keys = kmalloc((sizeof(*keys) *
|
keys = kmalloc((sizeof(*keys) *
|
||||||
(md5sig->entries4 + 1)), GFP_ATOMIC);
|
(md5sig->entries4 + 1)), GFP_ATOMIC);
|
||||||
if (!keys) {
|
if (!keys) {
|
||||||
kfree(newkey);
|
kfree(newkey);
|
||||||
tcp_free_md5sig_pool();
|
if (md5sig->entries4 == 0)
|
||||||
|
tcp_free_md5sig_pool();
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
|
||||||
kfree(tp->md5sig_info->keys4);
|
kfree(tp->md5sig_info->keys4);
|
||||||
tp->md5sig_info->keys4 = NULL;
|
tp->md5sig_info->keys4 = NULL;
|
||||||
tp->md5sig_info->alloced4 = 0;
|
tp->md5sig_info->alloced4 = 0;
|
||||||
|
tcp_free_md5sig_pool();
|
||||||
} else if (tp->md5sig_info->entries4 != i) {
|
} else if (tp->md5sig_info->entries4 != i) {
|
||||||
/* Need to do some manipulation */
|
/* Need to do some manipulation */
|
||||||
memmove(&tp->md5sig_info->keys4[i],
|
memmove(&tp->md5sig_info->keys4[i],
|
||||||
|
@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
|
||||||
(tp->md5sig_info->entries4 - i) *
|
(tp->md5sig_info->entries4 - i) *
|
||||||
sizeof(struct tcp4_md5sig_key));
|
sizeof(struct tcp4_md5sig_key));
|
||||||
}
|
}
|
||||||
tcp_free_md5sig_pool();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -875,6 +875,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
|
||||||
skb_reset_transport_header(skb);
|
skb_reset_transport_header(skb);
|
||||||
__skb_push(skb, skb_gro_offset(skb));
|
__skb_push(skb, skb_gro_offset(skb));
|
||||||
|
|
||||||
|
ops = rcu_dereference(inet6_protos[proto]);
|
||||||
if (!ops || !ops->gro_receive)
|
if (!ops || !ops->gro_receive)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
|
|
@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
|
||||||
}
|
}
|
||||||
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
|
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
|
||||||
}
|
}
|
||||||
if (tcp_alloc_md5sig_pool(sk) == NULL) {
|
if (tp->md5sig_info->entries6 == 0 &&
|
||||||
|
tcp_alloc_md5sig_pool(sk) == NULL) {
|
||||||
kfree(newkey);
|
kfree(newkey);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
|
||||||
(tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
|
(tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
|
||||||
|
|
||||||
if (!keys) {
|
if (!keys) {
|
||||||
tcp_free_md5sig_pool();
|
|
||||||
kfree(newkey);
|
kfree(newkey);
|
||||||
|
if (tp->md5sig_info->entries6 == 0)
|
||||||
|
tcp_free_md5sig_pool();
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
|
||||||
kfree(tp->md5sig_info->keys6);
|
kfree(tp->md5sig_info->keys6);
|
||||||
tp->md5sig_info->keys6 = NULL;
|
tp->md5sig_info->keys6 = NULL;
|
||||||
tp->md5sig_info->alloced6 = 0;
|
tp->md5sig_info->alloced6 = 0;
|
||||||
|
tcp_free_md5sig_pool();
|
||||||
} else {
|
} else {
|
||||||
/* shrink the database */
|
/* shrink the database */
|
||||||
if (tp->md5sig_info->entries6 != i)
|
if (tp->md5sig_info->entries6 != i)
|
||||||
|
@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
|
||||||
(tp->md5sig_info->entries6 - i)
|
(tp->md5sig_info->entries6 - i)
|
||||||
* sizeof (tp->md5sig_info->keys6[0]));
|
* sizeof (tp->md5sig_info->keys6[0]));
|
||||||
}
|
}
|
||||||
tcp_free_md5sig_pool();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue