Linux 3.16-rc6

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJTzJFGAAoJEHm+PkMAQRiGNzQH/087gQch5K+A2HKvPzjUXq57
 G82DJHLONMMq8+NY3Vqhp8g2V8zRbXGJEvMJMsyuscO37Vo7ADcrYo8lqY9w5bIl
 h+Zarhkqz0rqRs2SfMMIVzdd2W7MzL+lqj3GplGPxHztw0+qk7PRKILx6eRppGaH
 JaD4NfkD5+1vfve/2d1ze9D5pCiw6PFNzjesKZxScQhNhIyLdRamfSTY4r9XeURo
 CxpwjphEYfvAcgc39mwzEHPHyKSqULu0By6R8FXQpJ9QjVtzcGEiF+cPqGncpZOR
 5ZSyU5e1CpBl9w8o6Lm9ewXmaCSnBU/VFrOwWvZrXfokZedXBOz7KdShU93XFjU=
 =0VJM
 -----END PGP SIGNATURE-----

Merge tag 'v3.16-rc6' into next/dt

Update to Linux 3.16-rc6 as a dependency for the broadcom changes.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2014-07-28 17:04:15 +02:00
commit 565f46dc4d
245 changed files with 2251 additions and 1415 deletions

View File

@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
configuring GPIOs it can get its ACPI handle and extract this information configuring GPIOs it can get its ACPI handle and extract this information
from ACPI tables. from ACPI tables.
Currently the kernel is not able to automatically determine from which ACPI
device it should make the corresponding platform device so we need to add
the ACPI device explicitly to acpi_platform_device_ids list defined in
drivers/acpi/acpi_platform.c. This limitation is only for the platform
devices, SPI and I2C devices are created automatically as described below.
DMA support DMA support
~~~~~~~~~~~ ~~~~~~~~~~~
DMA controllers enumerated via ACPI should be registered in the system to DMA controllers enumerated via ACPI should be registered in the system to

View File

@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
under node /cpus/cpu@0. under node /cpus/cpu@0.
Required properties: Required properties:
- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt - None
for details
Optional properties: Optional properties:
- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
details. OPPs *must* be supplied either via DT, i.e. this property, or
populated at runtime.
- clock-latency: Specify the possible maximum transition latency for clock, - clock-latency: Specify the possible maximum transition latency for clock,
in unit of nanoseconds. in unit of nanoseconds.
- voltage-tolerance: Specify the CPU voltage tolerance in percentage. - voltage-tolerance: Specify the CPU voltage tolerance in percentage.

View File

@ -2790,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
leaf rcu_node structure. Useful for very large leaf rcu_node structure. Useful for very large
systems. systems.
rcutree.jiffies_till_sched_qs= [KNL]
Set required age in jiffies for a
given grace period before RCU starts
soliciting quiescent-state help from
rcu_note_context_switch().
rcutree.jiffies_till_first_fqs= [KNL] rcutree.jiffies_till_first_fqs= [KNL]
Set delay from grace-period initialization to Set delay from grace-period initialization to
first attempt to force quiescent states. first attempt to force quiescent states.
@ -3526,7 +3532,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the allocated input device; If set to 0, video driver the allocated input device; If set to 0, video driver
will only send out the event without touching backlight will only send out the event without touching backlight
brightness level. brightness level.
default: 0 default: 1
virtio_mmio.device= virtio_mmio.device=
[VMMIO] Memory mapped virtio (platform) device. [VMMIO] Memory mapped virtio (platform) device.

View File

@ -156,7 +156,6 @@ F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER 8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <nic_swsd@realtek.com> M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/ethernet/realtek/r8169.c F: drivers/net/ethernet/realtek/r8169.c
@ -4511,8 +4510,7 @@ S: Supported
F: drivers/idle/i7300_idle.c F: drivers/idle/i7300_idle.c
IEEE 802.15.4 SUBSYSTEM IEEE 802.15.4 SUBSYSTEM
M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com> M: Alexander Aring <alex.aring@gmail.com>
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 16 PATCHLEVEL = 16
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc6
NAME = Shuffling Zombie Juror NAME = Shuffling Zombie Juror
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -6,6 +6,7 @@ config ARM
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION

View File

@ -927,7 +927,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci"; compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x00100000>; reg = <0x00500000 0x00100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
<&uhpck>; <&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled"; status = "disabled";

View File

@ -1126,6 +1126,7 @@
compatible = "atmel,at91sam9rl-pwm"; compatible = "atmel,at91sam9rl-pwm";
reg = <0xf8034000 0x300>; reg = <0xf8034000 0x300>;
interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>; interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
clocks = <&pwm_clk>;
#pwm-cells = <3>; #pwm-cells = <3>;
status = "disabled"; status = "disabled";
}; };
@ -1157,8 +1158,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci"; compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>; reg = <0x00600000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
<&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled"; status = "disabled";
}; };

View File

@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr); cpu_topology[cpuid].socket_id, mpidr);
} }
static inline const int cpu_corepower_flags(void) static inline int cpu_corepower_flags(void)
{ {
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
} }

View File

@ -40,15 +40,17 @@ static inline void cpu_leave_lowpower(void)
static inline void platform_do_lowpower(unsigned int cpu, int *spurious) static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{ {
u32 mpidr = cpu_logical_map(cpu);
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
for (;;) { for (;;) {
/* make cpu1 to be turned off at next WFI command */ /* Turn the CPU off on next WFI instruction. */
if (cpu == 1) exynos_cpu_power_down(core_id);
exynos_cpu_power_down(cpu);
wfi(); wfi();
if (pen_release == cpu_logical_map(cpu)) { if (pen_release == core_id) {
/* /*
* OK, proper wakeup, we're done * OK, proper wakeup, we're done
*/ */

View File

@ -90,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
unsigned long timeout; unsigned long timeout;
unsigned long phys_cpu = cpu_logical_map(cpu); u32 mpidr = cpu_logical_map(cpu);
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
int ret = -ENOSYS; int ret = -ENOSYS;
/* /*
@ -104,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* the holding pen - release it, then wait for it to flag * the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release. * that it has been released by resetting pen_release.
* *
* Note that "pen_release" is the hardware CPU ID, whereas * Note that "pen_release" is the hardware CPU core ID, whereas
* "cpu" is Linux's internal ID. * "cpu" is Linux's internal ID.
*/ */
write_pen_release(phys_cpu); write_pen_release(core_id);
if (!exynos_cpu_power_state(cpu)) { if (!exynos_cpu_power_state(core_id)) {
exynos_cpu_power_up(cpu); exynos_cpu_power_up(core_id);
timeout = 10; timeout = 10;
/* wait max 10 ms until cpu1 is on */ /* wait max 10 ms until cpu1 is on */
while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) { while (exynos_cpu_power_state(core_id)
!= S5P_CORE_LOCAL_PWR_EN) {
if (timeout-- == 0) if (timeout-- == 0)
break; break;
@ -145,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Try to set boot address using firmware first * Try to set boot address using firmware first
* and fall back to boot register if it fails. * and fall back to boot register if it fails.
*/ */
ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
if (ret && ret != -ENOSYS) if (ret && ret != -ENOSYS)
goto fail; goto fail;
if (ret == -ENOSYS) { if (ret == -ENOSYS) {
void __iomem *boot_reg = cpu_boot_reg(phys_cpu); void __iomem *boot_reg = cpu_boot_reg(core_id);
if (IS_ERR(boot_reg)) { if (IS_ERR(boot_reg)) {
ret = PTR_ERR(boot_reg); ret = PTR_ERR(boot_reg);
goto fail; goto fail;
} }
__raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); __raw_writel(boot_addr, cpu_boot_reg(core_id));
} }
call_firmware_op(cpu_boot, phys_cpu); call_firmware_op(cpu_boot, core_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu)); arch_send_wakeup_ipi_mask(cpumask_of(cpu));
@ -227,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
* boot register if it fails. * boot register if it fails.
*/ */
for (i = 1; i < max_cpus; ++i) { for (i = 1; i < max_cpus; ++i) {
unsigned long phys_cpu;
unsigned long boot_addr; unsigned long boot_addr;
u32 mpidr;
u32 core_id;
int ret; int ret;
phys_cpu = cpu_logical_map(i); mpidr = cpu_logical_map(i);
core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
boot_addr = virt_to_phys(exynos4_secondary_startup); boot_addr = virt_to_phys(exynos4_secondary_startup);
ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
if (ret && ret != -ENOSYS) if (ret && ret != -ENOSYS)
break; break;
if (ret == -ENOSYS) { if (ret == -ENOSYS) {
void __iomem *boot_reg = cpu_boot_reg(phys_cpu); void __iomem *boot_reg = cpu_boot_reg(core_id);
if (IS_ERR(boot_reg)) if (IS_ERR(boot_reg))
break; break;
__raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); __raw_writel(boot_addr, cpu_boot_reg(core_id));
} }
} }
} }

View File

@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
.notifier_call = mvebu_hwcc_notifier, .notifier_call = mvebu_hwcc_notifier,
}; };
static struct notifier_block mvebu_hwcc_pci_nb = {
.notifier_call = mvebu_hwcc_notifier,
};
static void __init armada_370_coherency_init(struct device_node *np) static void __init armada_370_coherency_init(struct device_node *np)
{ {
struct resource res; struct resource res;
@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
{ {
if (coherency_available()) if (coherency_available())
bus_register_notifier(&pci_bus_type, bus_register_notifier(&pci_bus_type,
&mvebu_hwcc_nb); &mvebu_hwcc_pci_nb);
return 0; return 0;
} }

View File

@ -15,6 +15,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h>
__CPUINIT __CPUINIT
#define CPU_RESUME_ADDR_REG 0xf10182d4 #define CPU_RESUME_ADDR_REG 0xf10182d4
@ -22,13 +24,18 @@
.global armada_375_smp_cpu1_enable_code_end .global armada_375_smp_cpu1_enable_code_end
armada_375_smp_cpu1_enable_code_start: armada_375_smp_cpu1_enable_code_start:
ldr r0, [pc, #4] ARM_BE8(setend be)
adr r0, 1f
ldr r0, [r0]
ldr r1, [r0] ldr r1, [r0]
ARM_BE8(rev r1, r1)
mov pc, r1 mov pc, r1
1:
.word CPU_RESUME_ADDR_REG .word CPU_RESUME_ADDR_REG
armada_375_smp_cpu1_enable_code_end: armada_375_smp_cpu1_enable_code_end:
ENTRY(mvebu_cortex_a9_secondary_startup) ENTRY(mvebu_cortex_a9_secondary_startup)
ARM_BE8(setend be)
bl v7_invalidate_l1 bl v7_invalidate_l1
b secondary_startup b secondary_startup
ENDPROC(mvebu_cortex_a9_secondary_startup) ENDPROC(mvebu_cortex_a9_secondary_startup)

View File

@ -201,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
/* Test the CR_C bit and set it if it was cleared */ /* Test the CR_C bit and set it if it was cleared */
asm volatile( asm volatile(
"mrc p15, 0, %0, c1, c0, 0 \n\t" "mrc p15, 0, r0, c1, c0, 0 \n\t"
"tst %0, #(1 << 2) \n\t" "tst r0, #(1 << 2) \n\t"
"orreq %0, %0, #(1 << 2) \n\t" "orreq r0, r0, #(1 << 2) \n\t"
"mcreq p15, 0, %0, c1, c0, 0 \n\t" "mcreq p15, 0, r0, c1, c0, 0 \n\t"
"isb " "isb "
: : "r" (0)); : : : "r0");
pr_warn("Failed to suspend the system\n"); pr_warn("Failed to suspend the system\n");

View File

@ -4,6 +4,7 @@ config ARM64
select ARCH_HAS_OPP select ARCH_HAS_OPP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS

View File

@ -12,8 +12,6 @@
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/libfdt.h> #include <linux/libfdt.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <generated/compile.h>
#include <generated/utsrelease.h>
/* /*
* AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from

View File

@ -145,6 +145,7 @@ config PPC
select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_EXIT_ON_IRQ_STACK
select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
config GENERIC_CSUM config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN def_bool CPU_LITTLE_ENDIAN

View File

@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
/* cpumask of CPUs with asymetric SMT dependancy */ /* cpumask of CPUs with asymetric SMT dependancy */
static const int powerpc_smt_flags(void) static int powerpc_smt_flags(void)
{ {
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;

View File

@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
case BPF_ANC | SKF_AD_VLAN_TAG: case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
vlan_tci)); vlan_tci));
if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
PPC_ANDI(r_A, r_A, VLAN_VID_MASK); PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
else } else {
PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
PPC_SRWI(r_A, r_A, 12);
}
break; break;
case BPF_ANC | SKF_AD_QUEUE: case BPF_ANC | SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,

View File

@ -78,6 +78,7 @@ config SPARC64
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select NO_BOOTMEM select NO_BOOTMEM
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
config ARCH_DEFCONFIG config ARCH_DEFCONFIG
string string

View File

@ -12,6 +12,7 @@
#include <mem_user.h> #include <mem_user.h>
#include <os.h> #include <os.h>
#include <skas.h> #include <skas.h>
#include <kern_util.h>
struct host_vm_change { struct host_vm_change {
struct host_vm_op { struct host_vm_op {
@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last; struct host_vm_op *last;
int ret = 0; int ret = 0;
if ((addr >= STUB_START) && (addr < STUB_END))
return -EINVAL;
if (hvc->index != 0) { if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1]; last = &hvc->ops[hvc->index - 1];
if ((last->type == MUNMAP) && if ((last->type == MUNMAP) &&
@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
/* This is not an else because ret is modified above */ /* This is not an else because ret is modified above */
if (ret) { if (ret) {
printk(KERN_ERR "fix_range_common: failed, killing current " printk(KERN_ERR "fix_range_common: failed, killing current "
"process\n"); "process: %d\n", task_tgid_vnr(current));
/* We are under mmap_sem, release it such that current can terminate */
up_write(&current->mm->mmap_sem);
force_sig(SIGKILL, current); force_sig(SIGKILL, current);
do_signal();
} }
} }

View File

@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
int is_write = FAULT_WRITE(fi); int is_write = FAULT_WRITE(fi);
unsigned long address = FAULT_ADDRESS(fi); unsigned long address = FAULT_ADDRESS(fi);
if (regs) if (!is_user && regs)
current->thread.segv_regs = container_of(regs, struct pt_regs, regs); current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
if (!is_user && (address >= start_vm) && (address < end_vm)) { if (!is_user && (address >= start_vm) && (address < end_vm)) {

View File

@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
void wait_stub_done(int pid) void wait_stub_done(int pid)
{ {
int n, status, err, bad_stop = 0; int n, status, err;
while (1) { while (1) {
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@ -74,8 +74,6 @@ void wait_stub_done(int pid)
if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
return; return;
else
bad_stop = 1;
bad_wait: bad_wait:
err = ptrace_dump_regs(pid); err = ptrace_dump_regs(pid);
@ -85,10 +83,7 @@ bad_wait:
printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
"pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
status); status);
if (bad_stop) fatal_sigsegv();
kill(pid, SIGKILL);
else
fatal_sigsegv();
} }
extern unsigned long current_stub_stack(void); extern unsigned long current_stub_stack(void);

View File

@ -131,6 +131,7 @@ config X86
select HAVE_CC_STACKPROTECTOR select HAVE_CC_STACKPROTECTOR
select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_AUTOPROBE
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
def_bool y def_bool y

View File

@ -91,10 +91,9 @@ bs_die:
.section ".bsdata", "a" .section ".bsdata", "a"
bugger_off_msg: bugger_off_msg:
.ascii "Direct floppy boot is not supported. " .ascii "Use a boot loader.\r\n"
.ascii "Use a boot loader program instead.\r\n"
.ascii "\n" .ascii "\n"
.ascii "Remove disk and press any key to reboot ...\r\n" .ascii "Remove disk and press any key to reboot...\r\n"
.byte 0 .byte 0
#ifdef CONFIG_EFI_STUB #ifdef CONFIG_EFI_STUB
@ -108,7 +107,7 @@ coff_header:
#else #else
.word 0x8664 # x86-64 .word 0x8664 # x86-64
#endif #endif
.word 3 # nr_sections .word 4 # nr_sections
.long 0 # TimeDateStamp .long 0 # TimeDateStamp
.long 0 # PointerToSymbolTable .long 0 # PointerToSymbolTable
.long 1 # NumberOfSymbols .long 1 # NumberOfSymbols
@ -250,6 +249,25 @@ section_table:
.word 0 # NumberOfLineNumbers .word 0 # NumberOfLineNumbers
.long 0x60500020 # Characteristics (section flags) .long 0x60500020 # Characteristics (section flags)
#
# The offset & size fields are filled in by build.c.
#
.ascii ".bss"
.byte 0
.byte 0
.byte 0
.byte 0
.long 0
.long 0x0
.long 0 # Size of initialized data
# on disk
.long 0x0
.long 0 # PointerToRelocations
.long 0 # PointerToLineNumbers
.word 0 # NumberOfRelocations
.word 0 # NumberOfLineNumbers
.long 0xc8000080 # Characteristics (section flags)
#endif /* CONFIG_EFI_STUB */ #endif /* CONFIG_EFI_STUB */
# Kernel attributes; used by setup. This is part 1 of the # Kernel attributes; used by setup. This is part 1 of the

View File

@ -143,7 +143,7 @@ static void usage(void)
#ifdef CONFIG_EFI_STUB #ifdef CONFIG_EFI_STUB
static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
{ {
unsigned int pe_header; unsigned int pe_header;
unsigned short num_sections; unsigned short num_sections;
@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
put_unaligned_le32(size, section + 0x8); put_unaligned_le32(size, section + 0x8);
/* section header vma field */ /* section header vma field */
put_unaligned_le32(offset, section + 0xc); put_unaligned_le32(vma, section + 0xc);
/* section header 'size of initialised data' field */ /* section header 'size of initialised data' field */
put_unaligned_le32(size, section + 0x10); put_unaligned_le32(datasz, section + 0x10);
/* section header 'file offset' field */ /* section header 'file offset' field */
put_unaligned_le32(offset, section + 0x14); put_unaligned_le32(offset, section + 0x14);
@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
} }
} }
static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
{
update_pecoff_section_header_fields(section_name, offset, size, size, offset);
}
static void update_pecoff_setup_and_reloc(unsigned int size) static void update_pecoff_setup_and_reloc(unsigned int size)
{ {
u32 setup_offset = 0x200; u32 setup_offset = 0x200;
@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
pe_header = get_unaligned_le32(&buf[0x3c]); pe_header = get_unaligned_le32(&buf[0x3c]);
/* Size of image */
put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
/* /*
* Size of code: Subtract the size of the first sector (512 bytes) * Size of code: Subtract the size of the first sector (512 bytes)
* which includes the header. * which includes the header.
@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
update_pecoff_section_header(".text", text_start, text_sz); update_pecoff_section_header(".text", text_start, text_sz);
} }
static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
{
unsigned int pe_header;
unsigned int bss_sz = init_sz - file_sz;
pe_header = get_unaligned_le32(&buf[0x3c]);
/* Size of uninitialized data */
put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
/* Size of image */
put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
}
static int reserve_pecoff_reloc_section(int c) static int reserve_pecoff_reloc_section(int c)
{ {
/* Reserve 0x20 bytes for .reloc section */ /* Reserve 0x20 bytes for .reloc section */
@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
static inline void update_pecoff_setup_and_reloc(unsigned int size) {} static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
static inline void update_pecoff_text(unsigned int text_start, static inline void update_pecoff_text(unsigned int text_start,
unsigned int file_sz) {} unsigned int file_sz) {}
static inline void update_pecoff_bss(unsigned int file_sz,
unsigned int init_sz) {}
static inline void efi_stub_defaults(void) {} static inline void efi_stub_defaults(void) {}
static inline void efi_stub_entry_update(void) {} static inline void efi_stub_entry_update(void) {}
@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
int main(int argc, char ** argv) int main(int argc, char ** argv)
{ {
unsigned int i, sz, setup_sectors; unsigned int i, sz, setup_sectors, init_sz;
int c; int c;
u32 sys_size; u32 sys_size;
struct stat sb; struct stat sb;
@ -376,7 +396,9 @@ int main(int argc, char ** argv)
buf[0x1f1] = setup_sectors-1; buf[0x1f1] = setup_sectors-1;
put_unaligned_le32(sys_size, &buf[0x1f4]); put_unaligned_le32(sys_size, &buf[0x1f4]);
update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
init_sz = get_unaligned_le32(&buf[0x260]);
update_pecoff_bss(i + (sys_size * 16), init_sz);
efi_stub_entry_update(); efi_stub_entry_update();

View File

@ -841,7 +841,6 @@ static int apm_do_idle(void)
u32 eax; u32 eax;
u8 ret = 0; u8 ret = 0;
int idled = 0; int idled = 0;
int polling;
int err = 0; int err = 0;
if (!need_resched()) { if (!need_resched()) {

View File

@ -1381,6 +1381,15 @@ again:
intel_pmu_lbr_read(); intel_pmu_lbr_read();
/*
* CondChgd bit 63 doesn't mean any overflow status. Ignore
* and clear the bit.
*/
if (__test_and_clear_bit(63, (unsigned long *)&status)) {
if (!status)
goto done;
}
/* /*
* PEBS overflow sets bit 62 in the global status register * PEBS overflow sets bit 62 in the global status register
*/ */

View File

@ -175,7 +175,7 @@ void init_espfix_ap(void)
if (!pud_present(pud)) { if (!pud_present(pud)) {
pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PUD_CLONES; n++) for (n = 0; n < ESPFIX_PUD_CLONES; n++)
set_pud(&pud_p[n], pud); set_pud(&pud_p[n], pud);
} }
@ -185,7 +185,7 @@ void init_espfix_ap(void)
if (!pmd_present(pmd)) { if (!pmd_present(pmd)) {
pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT); paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PMD_CLONES; n++) for (n = 0; n < ESPFIX_PMD_CLONES; n++)
set_pmd(&pmd_p[n], pmd); set_pmd(&pmd_p[n], pmd);
} }
@ -193,7 +193,6 @@ void init_espfix_ap(void)
pte_p = pte_offset_kernel(&pmd, addr); pte_p = pte_offset_kernel(&pmd, addr);
stack_page = (void *)__get_free_page(GFP_KERNEL); stack_page = (void *)__get_free_page(GFP_KERNEL);
pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PTE_CLONES; n++) for (n = 0; n < ESPFIX_PTE_CLONES; n++)
set_pte(&pte_p[n*PTE_STRIDE], pte); set_pte(&pte_p[n*PTE_STRIDE], pte);

View File

@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS)) if (!(freq->flags & CPUFREQ_CONST_LOOPS))
mark_tsc_unstable("cpufreq changes"); mark_tsc_unstable("cpufreq changes");
}
set_cyc2ns_scale(tsc_khz, freq->cpu); set_cyc2ns_scale(tsc_khz, freq->cpu);
}
return 0; return 0;
} }

View File

@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver"); MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static bool brightness_switch_enabled; static bool brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644); module_param(brightness_switch_enabled, bool, 0644);
/* /*
@ -581,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
}, },
{ {
.callback = video_set_use_native_backlight, .callback = video_set_use_native_backlight,
.ident = "HP ProBook 4540s",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "HP ProBook 2013 models", .ident = "HP ProBook 2013 models",
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),

View File

@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
return dev->archdata.irqs[num]; return dev->archdata.irqs[num];
#else #else
struct resource *r; struct resource *r;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
return of_irq_get(dev->dev.of_node, num); int ret;
ret = of_irq_get(dev->dev.of_node, num);
if (ret >= 0 || ret == -EPROBE_DEFER)
return ret;
}
r = platform_get_resource(dev, IORESOURCE_IRQ, num); r = platform_get_resource(dev, IORESOURCE_IRQ, num);
@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
{ {
struct resource *r; struct resource *r;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
return of_irq_get_byname(dev->dev.of_node, name); int ret;
ret = of_irq_get_byname(dev->dev.of_node, name);
if (ret >= 0 || ret == -EPROBE_DEFER)
return ret;
}
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
return r ? r->start : -ENXIO; return r ? r->start : -ENXIO;

View File

@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0) }, { USB_DEVICE(0x0b05, 0x17d0) },
{ USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x0036) },
{ USB_DEVICE(0x0CF3, 0x3004) }, { USB_DEVICE(0x0CF3, 0x3004) },
{ USB_DEVICE(0x0CF3, 0x3005) },
{ USB_DEVICE(0x0CF3, 0x3008) }, { USB_DEVICE(0x0CF3, 0x3008) },
{ USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x0CF3, 0x311E) }, { USB_DEVICE(0x0CF3, 0x311E) },
@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },

View File

@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },

View File

@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
BT_ERR("Non-link packet received in non-active state"); BT_ERR("Non-link packet received in non-active state");
h5_reset_rx(h5); h5_reset_rx(h5);
return 0;
} }
h5->rx_func = h5_rx_payload; h5->rx_func = h5_rx_payload;

View File

@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex);
static int data_avail; static int data_avail;
static u8 *rng_buffer; static u8 *rng_buffer;
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
static size_t rng_buffer_size(void) static size_t rng_buffer_size(void)
{ {
return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
} }
static void add_early_randomness(struct hwrng *rng)
{
unsigned char bytes[16];
int bytes_read;
/*
* Currently only virtio-rng cannot return data during device
* probe, and that's handled in virtio-rng.c itself. If there
* are more such devices, this call to rng_get_data can be
* made conditional here instead of doing it per-device.
*/
bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
if (bytes_read > 0)
add_device_randomness(bytes, bytes_read);
}
static inline int hwrng_init(struct hwrng *rng) static inline int hwrng_init(struct hwrng *rng)
{ {
if (!rng->init) if (rng->init) {
return 0; int ret;
return rng->init(rng);
ret = rng->init(rng);
if (ret)
return ret;
}
add_early_randomness(rng);
return 0;
} }
static inline void hwrng_cleanup(struct hwrng *rng) static inline void hwrng_cleanup(struct hwrng *rng)
@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng)
{ {
int err = -EINVAL; int err = -EINVAL;
struct hwrng *old_rng, *tmp; struct hwrng *old_rng, *tmp;
unsigned char bytes[16];
int bytes_read;
if (rng->name == NULL || if (rng->name == NULL ||
(rng->data_read == NULL && rng->read == NULL)) (rng->data_read == NULL && rng->read == NULL))
@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng)
INIT_LIST_HEAD(&rng->list); INIT_LIST_HEAD(&rng->list);
list_add_tail(&rng->list, &rng_list); list_add_tail(&rng->list, &rng_list);
bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); if (old_rng && !rng->init) {
if (bytes_read > 0) /*
add_device_randomness(bytes, bytes_read); * Use a new device's input to add some randomness to
* the system. If this rng device isn't going to be
* used right away, its init function hasn't been
* called yet; so only use the randomness from devices
* that don't need an init callback.
*/
add_early_randomness(rng);
}
out_unlock: out_unlock:
mutex_unlock(&rng_mutex); mutex_unlock(&rng_mutex);
out: out:

View File

@ -38,6 +38,8 @@ struct virtrng_info {
int index; int index;
}; };
static bool probe_done;
static void random_recv_done(struct virtqueue *vq) static void random_recv_done(struct virtqueue *vq)
{ {
struct virtrng_info *vi = vq->vdev->priv; struct virtrng_info *vi = vq->vdev->priv;
@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
int ret; int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv; struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
/*
* Don't ask host for data till we're setup. This call can
* happen during hwrng_register(), after commit d9e7972619.
*/
if (unlikely(!probe_done))
return 0;
if (!vi->busy) { if (!vi->busy) {
vi->busy = true; vi->busy = true;
init_completion(&vi->have_data); init_completion(&vi->have_data);
@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev)
return err; return err;
} }
probe_done = true;
return 0; return 0;
} }

View File

@ -641,7 +641,7 @@ retry:
} while (unlikely(entropy_count < pool_size-2 && pnfrac)); } while (unlikely(entropy_count < pool_size-2 && pnfrac));
} }
if (entropy_count < 0) { if (unlikely(entropy_count < 0)) {
pr_warn("random: negative entropy/overflow: pool %s count %d\n", pr_warn("random: negative entropy/overflow: pool %s count %d\n",
r->name, entropy_count); r->name, entropy_count);
WARN_ON(1); WARN_ON(1);
@ -981,7 +981,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
int reserved) int reserved)
{ {
int entropy_count, orig; int entropy_count, orig;
size_t ibytes; size_t ibytes, nfrac;
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
@ -999,7 +999,17 @@ retry:
} }
if (ibytes < min) if (ibytes < min)
ibytes = 0; ibytes = 0;
if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0)
if (unlikely(entropy_count < 0)) {
pr_warn("random: negative entropy count: pool %s count %d\n",
r->name, entropy_count);
WARN_ON(1);
entropy_count = 0;
}
nfrac = ibytes << (ENTROPY_SHIFT + 3);
if ((size_t) entropy_count > nfrac)
entropy_count -= nfrac;
else
entropy_count = 0; entropy_count = 0;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
@ -1376,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
"with %d bits of entropy available\n", "with %d bits of entropy available\n",
current->comm, nonblocking_pool.entropy_total); current->comm, nonblocking_pool.entropy_total);
nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),

View File

@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6 cpufreq support" tristate "Freescale i.MX6 cpufreq support"
depends on ARCH_MXC depends on ARCH_MXC
depends on REGULATOR_ANATOP depends on REGULATOR_ANATOP
select PM_OPP
help help
This adds cpufreq driver support for Freescale i.MX6 series SoCs. This adds cpufreq driver support for Freescale i.MX6 series SoCs.
@ -118,7 +119,7 @@ config ARM_INTEGRATOR
If in doubt, say Y. If in doubt, say Y.
config ARM_KIRKWOOD_CPUFREQ config ARM_KIRKWOOD_CPUFREQ
def_bool MACH_KIRKWOOD def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
help help
This adds the CPUFreq driver for Marvell Kirkwood This adds the CPUFreq driver for Marvell Kirkwood
SoCs. SoCs.

View File

@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
goto out_put_reg; goto out_put_reg;
} }
ret = of_init_opp_table(cpu_dev); /* OPPs might be populated at runtime, don't check for error here */
if (ret) { of_init_opp_table(cpu_dev);
pr_err("failed to init OPP table: %d\n", ret);
goto out_put_clk;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) { if (ret) {

View File

@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
* the creation of a brand new one. So we need to perform this update * the creation of a brand new one. So we need to perform this update
* by invoking update_policy_cpu(). * by invoking update_policy_cpu().
*/ */
if (recover_policy && cpu != policy->cpu) if (recover_policy && cpu != policy->cpu) {
update_policy_cpu(policy, cpu); update_policy_cpu(policy, cpu);
else WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
} else {
policy->cpu = cpu; policy->cpu = cpu;
}
cpumask_copy(policy->cpus, cpumask_of(cpu)); cpumask_copy(policy->cpus, cpumask_of(cpu));

View File

@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void)
name = "K4S641632D"; name = "K4S641632D";
if (machine_is_h3100()) if (machine_is_h3100())
name = "KM416S4030CT"; name = "KM416S4030CT";
if (machine_is_jornada720()) if (machine_is_jornada720() || machine_is_h3600())
name = "K4S281632B-1H"; name = "K4S281632B-1H";
if (machine_is_nanoengine()) if (machine_is_nanoengine())
name = "MT48LC8M16A2TG-75"; name = "MT48LC8M16A2TG-75";

View File

@ -1,4 +1,5 @@
menu "IEEE 1394 (FireWire) support" menu "IEEE 1394 (FireWire) support"
depends on HAS_DMA
depends on PCI || COMPILE_TEST depends on PCI || COMPILE_TEST
# firewire-core does not depend on PCI but is # firewire-core does not depend on PCI but is
# not useful without PCI controller driver # not useful without PCI controller driver

View File

@ -346,6 +346,7 @@ static __initdata struct {
struct param_info { struct param_info {
int verbose; int verbose;
int found;
void *params; void *params;
}; };
@ -362,16 +363,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
return 0; return 0;
pr_info("Getting parameters from FDT:\n");
for (i = 0; i < ARRAY_SIZE(dt_params); i++) { for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
if (!prop) { if (!prop)
pr_err("Can't find %s in device tree!\n",
dt_params[i].name);
return 0; return 0;
}
dest = info->params + dt_params[i].offset; dest = info->params + dt_params[i].offset;
info->found++;
val = of_read_number(prop, len / sizeof(u32)); val = of_read_number(prop, len / sizeof(u32));
@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
{ {
struct param_info info; struct param_info info;
int ret;
pr_info("Getting EFI parameters from FDT:\n");
info.verbose = verbose; info.verbose = verbose;
info.found = 0;
info.params = params; info.params = params;
return of_scan_flat_dt(fdt_find_uefi_params, &info); ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
if (!info.found)
pr_info("UEFI not found.\n");
else if (!ret)
pr_err("Can't find '%s' in device tree!\n",
dt_params[info.found].name);
return ret;
} }
#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ #endif /* CONFIG_EFI_PARAMS_FROM_FDT */

View File

@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
u32 fdt_val32; u32 fdt_val32;
u64 fdt_val64; u64 fdt_val64;
/*
* Copy definition of linux_banner here. Since this code is
* built as part of the decompressor for ARM v7, pulling
* in version.c where linux_banner is defined for the
* kernel brings other kernel dependencies with it.
*/
const char linux_banner[] =
"Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
/* Do some checks on provided FDT, if it exists*/ /* Do some checks on provided FDT, if it exists*/
if (orig_fdt) { if (orig_fdt) {
if (fdt_check_header(orig_fdt)) { if (fdt_check_header(orig_fdt)) {

View File

@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi)
if (spi_present_mask & (1 << addr)) if (spi_present_mask & (1 << addr))
chips++; chips++;
} }
if (!chips)
return -ENODEV;
} else { } else {
type = spi_get_device_id(spi)->driver_data; type = spi_get_device_id(spi)->driver_data;
pdata = dev_get_platdata(&spi->dev); pdata = dev_get_platdata(&spi->dev);
@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi)
if (!(spi_present_mask & (1 << addr))) if (!(spi_present_mask & (1 << addr)))
continue; continue;
chips--; chips--;
if (chips < 0) {
dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
goto fail;
}
data->mcp[addr] = &data->chip[chips]; data->mcp[addr] = &data->chip[chips];
status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi, status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
0x40 | (addr << 1), type, base, 0x40 | (addr << 1), type, base,

View File

@ -11673,6 +11673,9 @@ static struct intel_quirk intel_quirks[] = {
/* Toshiba CB35 Chromebook (Celeron 2955U) */ /* Toshiba CB35 Chromebook (Celeron 2955U) */
{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
/* HP Chromebook 14 (Celeron 2955U) */
{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
}; };
static void intel_init_quirks(struct drm_device *dev) static void intel_init_quirks(struct drm_device *dev)
@ -11911,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* ... */ * ... */
plane = crtc->plane; plane = crtc->plane;
crtc->plane = !plane; crtc->plane = !plane;
crtc->primary_enabled = true;
dev_priv->display.crtc_disable(&crtc->base); dev_priv->display.crtc_disable(&crtc->base);
crtc->plane = plane; crtc->plane = plane;

View File

@ -906,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp); bpp);
for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = min_clock; clock <= max_clock; clock++) {
for (clock = min_clock; clock <= max_clock; clock++) { for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock, link_avail = intel_dp_max_data_rate(link_clock,
lane_count); lane_count);

View File

@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags; pipe_config->adjusted_mode.flags |= flags;
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) {
tmp = I915_READ(PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
dotclock = pipe_config->port_clock; dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev_priv->dev)) if (HAS_PCH_SPLIT(dev_priv->dev))

View File

@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY); PFIT_FILTER_FUZZY);
/* Make sure pre-965 set dither correctly for 18bpp panels. */
if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
out: out:
if ((pfit_control & PFIT_ENABLE) == 0) { if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0; pfit_control = 0;
pfit_pgm_ratios = 0; pfit_pgm_ratios = 0;
} }
/* Make sure pre-965 set dither correctly for 18bpp panels. */
if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control; pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border; pipe_config->gmch_pfit.lvds_border_bits = border;

View File

@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown, nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
NOUVEAU_THERM_THRS_SHUTDOWN); NOUVEAU_THERM_THRS_SHUTDOWN);
spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
/* schedule the next poll in one second */ /* schedule the next poll in one second */
if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); ptimer->alarm(ptimer, 1000000000ULL, alarm);
spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
} }
void void

View File

@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
pending = xchg(&qdev->ram_header->int_pending, 0); pending = xchg(&qdev->ram_header->int_pending, 0);
if (!pending)
return IRQ_NONE;
atomic_inc(&qdev->irq_received); atomic_inc(&qdev->irq_received);
if (pending & QXL_INTERRUPT_DISPLAY) { if (pending & QXL_INTERRUPT_DISPLAY) {

View File

@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
/* set pageflip to happen anywhere in vblank interval */ /* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb); radeon_fb = to_radeon_framebuffer(fb);
@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
/* set pageflip to happen anywhere in vblank interval */ /* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb); radeon_fb = to_radeon_framebuffer(fb);

View File

@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct backlight_properties props; struct backlight_properties props;
struct radeon_backlight_privdata *pdata; struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig; struct radeon_encoder_atom_dig *dig;
u8 backlight_level;
char bl_name[16]; char bl_name[16];
/* Mac laptops with multiple GPUs use the gmux driver for backlight /* Mac laptops with multiple GPUs use the gmux driver for backlight
@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
pdata->encoder = radeon_encoder; pdata->encoder = radeon_encoder;
backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
dig = radeon_encoder->enc_priv; dig = radeon_encoder->enc_priv;
dig->bl_dev = bd; dig->bl_dev = bd;
bd->props.brightness = radeon_atom_backlight_get_brightness(bd); bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
/* Set a reasonable default here if the level is 0 otherwise
* fbdev will attempt to turn the backlight on after console
* unblanking and it will try and restore 0 which turns the backlight
* off again.
*/
if (bd->props.brightness == 0)
bd->props.brightness = RADEON_MAX_BL_LEVEL;
bd->props.power = FB_BLANK_UNBLANK; bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd); backlight_update_status(bd);

View File

@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
for (i = 0; i < rdev->num_crtc; i++) { for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) { if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
if ((tmp & 0x3) != 0) { if ((tmp & 0x7) != 3) {
tmp &= ~0x3; tmp &= ~0x7;
tmp |= 0x3;
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
} }
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);

View File

@ -239,7 +239,6 @@
# define EVERGREEN_CRTC_V_BLANK (1 << 0) # define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 #define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 #define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8

View File

@ -684,10 +684,9 @@ struct radeon_flip_work {
struct work_struct unpin_work; struct work_struct unpin_work;
struct radeon_device *rdev; struct radeon_device *rdev;
int crtc_id; int crtc_id;
struct drm_framebuffer *fb; uint64_t base;
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo; struct radeon_bo *old_rbo;
struct radeon_bo *new_rbo;
struct radeon_fence *fence; struct radeon_fence *fence;
}; };

View File

@ -366,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
radeon_fence_unref(&work->fence);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
queue_work(radeon_crtc->flip_queue, &work->unpin_work); queue_work(radeon_crtc->flip_queue, &work->unpin_work);
} }
@ -386,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &radeon_crtc->base; struct drm_crtc *crtc = &radeon_crtc->base;
struct drm_framebuffer *fb = work->fb;
uint32_t tiling_flags, pitch_pixels;
uint64_t base;
unsigned long flags; unsigned long flags;
int r; int r;
down_read(&rdev->exclusive_lock); down_read(&rdev->exclusive_lock);
while (work->fence) { if (work->fence) {
r = radeon_fence_wait(work->fence, false); r = radeon_fence_wait(work->fence, false);
if (r == -EDEADLK) { if (r == -EDEADLK) {
up_read(&rdev->exclusive_lock); up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev); r = radeon_gpu_reset(rdev);
down_read(&rdev->exclusive_lock); down_read(&rdev->exclusive_lock);
} }
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
if (r) { /* We continue with the page flip even if we failed to wait on
DRM_ERROR("failed to wait on page flip fence (%d)!\n", * the fence, otherwise the DRM core and userspace will be
r); * confused about which BO the CRTC is scanning out
goto cleanup; */
} else
radeon_fence_unref(&work->fence); radeon_fence_unref(&work->fence);
} }
/* pin the new buffer */ /* We borrow the event spin lock for protecting flip_status */
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", spin_lock_irqsave(&crtc->dev->event_lock, flags);
work->old_rbo, work->new_rbo);
r = radeon_bo_reserve(work->new_rbo, false); /* set the proper interrupt */
radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
up_read(&rdev->exclusive_lock);
}
static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *old_radeon_fb;
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_flip_work *work;
struct radeon_bo *new_rbo;
uint32_t tiling_flags, pitch_pixels;
uint64_t base;
unsigned long flags;
int r;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
INIT_WORK(&work->flip_work, radeon_flip_work_func);
INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
work->event = event;
/* schedule unpin of the old buffer */
old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
obj = old_radeon_fb->obj;
/* take a reference to the old object */
drm_gem_object_reference(obj);
work->old_rbo = gem_to_radeon_bo(obj);
new_radeon_fb = to_radeon_framebuffer(fb);
obj = new_radeon_fb->obj;
new_rbo = gem_to_radeon_bo(obj);
spin_lock(&new_rbo->tbo.bdev->fence_lock);
if (new_rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
spin_unlock(&new_rbo->tbo.bdev->fence_lock);
/* pin the new buffer */
DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
work->old_rbo, new_rbo);
r = radeon_bo_reserve(new_rbo, false);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n"); DRM_ERROR("failed to reserve new rbo buffer before flip\n");
goto cleanup; goto cleanup;
} }
/* Only 27 bit offset for legacy CRTC */ /* Only 27 bit offset for legacy CRTC */
r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
radeon_bo_unreserve(work->new_rbo); radeon_bo_unreserve(new_rbo);
r = -EINVAL; r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n"); DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup; goto cleanup;
} }
radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(work->new_rbo); radeon_bo_unreserve(new_rbo);
if (!ASIC_IS_AVIVO(rdev)) { if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */ /* crtc offset is from display base addr not FB location */
@ -467,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
} }
base &= ~7; base &= ~7;
} }
work->base = base;
r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
if (r) { if (r) {
@ -477,88 +534,11 @@ static void radeon_flip_work_func(struct work_struct *__work)
/* We borrow the event spin lock for protecting flip_work */ /* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags); spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* set the proper interrupt */
radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
up_read(&rdev->exclusive_lock);
return;
pflip_cleanup:
if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
radeon_bo_unreserve(work->new_rbo);
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
up_read(&rdev->exclusive_lock);
}
static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *old_radeon_fb;
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_flip_work *work;
unsigned long flags;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
INIT_WORK(&work->flip_work, radeon_flip_work_func);
INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
work->fb = fb;
work->event = event;
/* schedule unpin of the old buffer */
old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
obj = old_radeon_fb->obj;
/* take a reference to the old object */
drm_gem_object_reference(obj);
work->old_rbo = gem_to_radeon_bo(obj);
new_radeon_fb = to_radeon_framebuffer(fb);
obj = new_radeon_fb->obj;
work->new_rbo = gem_to_radeon_bo(obj);
spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
if (work->new_rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); r = -EBUSY;
radeon_fence_unref(&work->fence); goto vblank_cleanup;
kfree(work);
return -EBUSY;
} }
radeon_crtc->flip_status = RADEON_FLIP_PENDING; radeon_crtc->flip_status = RADEON_FLIP_PENDING;
radeon_crtc->flip_work = work; radeon_crtc->flip_work = work;
@ -569,8 +549,27 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
queue_work(radeon_crtc->flip_queue, &work->flip_work); queue_work(radeon_crtc->flip_queue, &work->flip_work);
return 0; return 0;
vblank_cleanup:
drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
pflip_cleanup:
if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
radeon_bo_unreserve(new_rbo);
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
return r;
} }
static int static int
@ -830,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
int ret = 0; int ret = 0;
/* don't leak the edid if we already fetched it in detect() */
if (radeon_connector->edid)
goto got_edid;
/* on hw with routers, select right port */ /* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid) if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector); radeon_router_select_ddc_port(radeon_connector);
@ -868,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
} }
if (radeon_connector->edid) { if (radeon_connector->edid) {
got_edid:
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);

View File

@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
for (i = 0; i < rdev->num_crtc; i++) { for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) { if (save->crtc_enabled[i]) {
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
if ((tmp & 0x3) != 0) { if ((tmp & 0x7) != 3) {
tmp &= ~0x3; tmp &= ~0x7;
tmp |= 0x3;
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
} }
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);

View File

@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
/* /*
* Send the information to the user-level daemon. * Send the information to the user-level daemon.
*/ */
fcopy_send_data();
schedule_delayed_work(&fcopy_work, 5*HZ); schedule_delayed_work(&fcopy_work, 5*HZ);
fcopy_send_data();
return; return;
} }
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;

View File

@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
return -EINVAL; return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000); temp = DIV_ROUND_CLOSEST(temp, 1000);
temp = clamp_val(temp, 0, 255); temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock); mutex_lock(&data->lock);
data->temp_min[attr->index] = temp; data->temp_min[attr->index] = temp;
@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
return -EINVAL; return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000); temp = DIV_ROUND_CLOSEST(temp, 1000);
temp = clamp_val(temp, 0, 255); temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock); mutex_lock(&data->lock);
data->temp_max[attr->index] = temp; data->temp_max[attr->index] = temp;
@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
return -EINVAL; return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000); temp = DIV_ROUND_CLOSEST(temp, 1000);
temp = clamp_val(temp, 0, 255); temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock); mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp; data->pwm_tmin[attr->index] = temp;

View File

@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
struct device_attribute *devattr, struct device_attribute *devattr,
char *buf) char *buf)
{ {
return sprintf(buf, "da9052-hwmon\n"); return sprintf(buf, "da9052\n");
} }
static ssize_t show_label(struct device *dev, static ssize_t show_label(struct device *dev,

View File

@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
struct device_attribute *devattr, struct device_attribute *devattr,
char *buf) char *buf)
{ {
return sprintf(buf, "da9055-hwmon\n"); return sprintf(buf, "da9055\n");
} }
static ssize_t show_label(struct device *dev, static ssize_t show_label(struct device *dev,

View File

@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = {
{6, 250000}, {1, 560000} {6, 250000}, {1, 560000}
}; };
/*
* Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
* The userspace interface uses m/s^2 and we declare micro units
* So scale factor is given by:
* g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
*/
static const int mma8452_scales[3][2] = { static const int mma8452_scales[3][2] = {
{0, 977}, {0, 1953}, {0, 3906} {0, 9577}, {0, 19154}, {0, 38307}
}; };
static ssize_t mma8452_show_samp_freq_avail(struct device *dev, static ssize_t mma8452_show_samp_freq_avail(struct device *dev,

View File

@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
&indio_dev->event_interface->dev_attr_list); &indio_dev->event_interface->dev_attr_list);
kfree(postfix); kfree(postfix);
if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
continue;
if (ret) if (ret)
return ret; return ret;

View File

@ -432,8 +432,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
*/ */
static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{ {
struct c4iw_ep *ep = handle;
printk(KERN_ERR MOD "ARP failure duing connect\n"); printk(KERN_ERR MOD "ARP failure duing connect\n");
kfree_skb(skb); kfree_skb(skb);
connect_reply_upcall(ep, -EHOSTUNREACH);
state_set(&ep->com, DEAD);
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
c4iw_put_ep(&ep->com);
} }
/* /*
@ -658,7 +667,7 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= T5_OPT_2_VALID; opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
} }
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
if (ep->com.remote_addr.ss_family == AF_INET) { if (ep->com.remote_addr.ss_family == AF_INET) {
@ -2180,7 +2189,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
BUG_ON(skb_cloned(skb)); BUG_ON(skb_cloned(skb));
skb_trim(skb, sizeof(struct cpl_tid_release)); skb_trim(skb, sizeof(struct cpl_tid_release));
skb_get(skb);
release_tid(&dev->rdev, hwtid, skb); release_tid(&dev->rdev, hwtid, skb);
return; return;
} }
@ -3917,7 +3925,7 @@ int __init c4iw_cm_init(void)
return 0; return 0;
} }
void __exit c4iw_cm_term(void) void c4iw_cm_term(void)
{ {
WARN_ON(!list_empty(&timeout_list)); WARN_ON(!list_empty(&timeout_list));
flush_workqueue(workq); flush_workqueue(workq);

View File

@ -696,6 +696,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
pr_err(MOD "error allocating status page\n"); pr_err(MOD "error allocating status page\n");
goto err4; goto err4;
} }
rdev->status_page->db_off = 0;
return 0; return 0;
err4: err4:
c4iw_rqtpool_destroy(rdev); c4iw_rqtpool_destroy(rdev);
@ -729,7 +730,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
if (ctx->dev->rdev.oc_mw_kva) if (ctx->dev->rdev.oc_mw_kva)
iounmap(ctx->dev->rdev.oc_mw_kva); iounmap(ctx->dev->rdev.oc_mw_kva);
ib_dealloc_device(&ctx->dev->ibdev); ib_dealloc_device(&ctx->dev->ibdev);
iwpm_exit(RDMA_NL_C4IW);
ctx->dev = NULL; ctx->dev = NULL;
} }
@ -826,12 +826,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
setup_debugfs(devp); setup_debugfs(devp);
} }
ret = iwpm_init(RDMA_NL_C4IW);
if (ret) {
pr_err("port mapper initialization failed with %d\n", ret);
ib_dealloc_device(&devp->ibdev);
return ERR_PTR(ret);
}
return devp; return devp;
} }
@ -1332,6 +1326,15 @@ static int __init c4iw_init_module(void)
pr_err("%s[%u]: Failed to add netlink callback\n" pr_err("%s[%u]: Failed to add netlink callback\n"
, __func__, __LINE__); , __func__, __LINE__);
err = iwpm_init(RDMA_NL_C4IW);
if (err) {
pr_err("port mapper initialization failed with %d\n", err);
ibnl_remove_client(RDMA_NL_C4IW);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
return err;
}
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
return 0; return 0;
@ -1349,6 +1352,7 @@ static void __exit c4iw_exit_module(void)
} }
mutex_unlock(&dev_mutex); mutex_unlock(&dev_mutex);
cxgb4_unregister_uld(CXGB4_ULD_RDMA); cxgb4_unregister_uld(CXGB4_ULD_RDMA);
iwpm_exit(RDMA_NL_C4IW);
ibnl_remove_client(RDMA_NL_C4IW); ibnl_remove_client(RDMA_NL_C4IW);
c4iw_cm_term(); c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root); debugfs_remove_recursive(c4iw_debugfs_root);

View File

@ -908,7 +908,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_register_device(struct c4iw_dev *dev); int c4iw_register_device(struct c4iw_dev *dev);
void c4iw_unregister_device(struct c4iw_dev *dev); void c4iw_unregister_device(struct c4iw_dev *dev);
int __init c4iw_cm_init(void); int __init c4iw_cm_init(void);
void __exit c4iw_cm_term(void); void c4iw_cm_term(void);
void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
struct c4iw_dev_ucontext *uctx); struct c4iw_dev_ucontext *uctx);
void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,

View File

@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err; int err;
uuari = &dev->mdev.priv.uuari; uuari = &dev->mdev.priv.uuari;
if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN) if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
return -EINVAL; return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)

View File

@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn)
static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
{ {
/* Bug if not a power of 2 */ /* Bug if not a power of 2 */
BUG_ON(!is_power_of_2(addrspace_size)); BUG_ON((addrspace_size & (addrspace_size - 1)));
/* window size is 2^(WSE+1) bytes */ /* window size is 2^(WSE+1) bytes */
return __ffs(addrspace_size) - 1; return fls64(addrspace_size) - 2;
} }
/* Derive the PAACE window count encoding for the subwindow count */ /* Derive the PAACE window count encoding for the subwindow count */
@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
struct paace *ppaace; struct paace *ppaace;
unsigned long fspi; unsigned long fspi;
if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) { if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
pr_debug("window size too small or not a power of two %llx\n", win_size); pr_debug("window size too small or not a power of two %llx\n", win_size);
return -EINVAL; return -EINVAL;
} }
@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
return -ENOENT; return -ENOENT;
} }
if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) { if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
pr_debug("subwindow size out of range, or not a power of 2\n"); pr_debug("subwindow size out of range, or not a power of 2\n");
return -EINVAL; return -EINVAL;
} }

View File

@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova)
* Size must be a power of two and at least be equal * Size must be a power of two and at least be equal
* to PAMU page size. * to PAMU page size.
*/ */
if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) { if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
pr_debug("%s: size too small or not a power of two\n", __func__); pr_debug("%s: size too small or not a power of two\n", __func__);
return -EINVAL; return -EINVAL;
} }
@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
return domain; return domain;
} }
static inline struct device_domain_info *find_domain(struct device *dev)
{
return dev->archdata.iommu_domain;
}
static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
{ {
unsigned long flags; unsigned long flags;
@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* Check here if the device is already attached to domain or not. * Check here if the device is already attached to domain or not.
* If the device is already attached to a domain detach it. * If the device is already attached to a domain detach it.
*/ */
old_domain_info = find_domain(dev); old_domain_info = dev->archdata.iommu_domain;
if (old_domain_info && old_domain_info->domain != dma_domain) { if (old_domain_info && old_domain_info->domain != dma_domain) {
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
detach_device(dev, old_domain_info->domain); detach_device(dev, old_domain_info->domain);
@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* the info for the first LIODN as all * the info for the first LIODN as all
* LIODNs share the same domain * LIODNs share the same domain
*/ */
if (!old_domain_info) if (!dev->archdata.iommu_domain)
dev->archdata.iommu_domain = info; dev->archdata.iommu_domain = info;
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
@ -1042,12 +1037,15 @@ root_bus:
group = get_shared_pci_device_group(pdev); group = get_shared_pci_device_group(pdev);
} }
if (!group)
group = ERR_PTR(-ENODEV);
return group; return group;
} }
static int fsl_pamu_add_device(struct device *dev) static int fsl_pamu_add_device(struct device *dev)
{ {
struct iommu_group *group = NULL; struct iommu_group *group = ERR_PTR(-ENODEV);
struct pci_dev *pdev; struct pci_dev *pdev;
const u32 *prop; const u32 *prop;
int ret, len; int ret, len;
@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev)
group = get_device_iommu_group(dev); group = get_device_iommu_group(dev);
} }
if (!group || IS_ERR(group)) if (IS_ERR(group))
return PTR_ERR(group); return PTR_ERR(group);
ret = iommu_group_add_device(group, dev); ret = iommu_group_add_device(group, dev);

View File

@ -42,6 +42,7 @@
#include <linux/irqchip/chained_irq.h> #include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h> #include <linux/irqchip/arm-gic.h>
#include <asm/cputype.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/exception.h> #include <asm/exception.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
} }
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
unsigned long offset = percpu_offset * cpu_logical_map(cpu); u32 mpidr = cpu_logical_map(cpu);
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
unsigned long offset = percpu_offset * core_id;
*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
} }
@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
gic_cnt++; gic_cnt++;
return 0; return 0;
} }
IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);

View File

@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */ memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */ l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
if (ic->parm.ni1_io.timeout > 0) if (ic->parm.ni1_io.timeout > 0) {
if (!(pc = ni1_new_l3_process(st, -1))) pc = ni1_new_l3_process(st, -1);
{ free_invoke_id(st, id); if (!pc) {
free_invoke_id(st, id);
return (-2); return (-2);
} }
pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */ /* remember id */
pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */ pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
/* and procedure */
pc->prot.ni1.proc = ic->parm.ni1_io.proc;
}
if (!(skb = l3_alloc_skb(l))) if (!(skb = l3_alloc_skb(l)))
{ free_invoke_id(st, id); { free_invoke_id(st, id);

View File

@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
{ {
struct sock_fprog uprog; struct sock_fprog uprog;
struct sock_filter *code = NULL; struct sock_filter *code = NULL;
int len, err; int len;
if (copy_from_user(&uprog, arg, sizeof(uprog))) if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT; return -EFAULT;
@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
if (IS_ERR(code)) if (IS_ERR(code))
return PTR_ERR(code); return PTR_ERR(code);
err = sk_chk_filter(code, uprog.len);
if (err) {
kfree(code);
return err;
}
*p = code; *p = code;
return uprog.len; return uprog.len;
} }

View File

@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
disk_super = dm_block_data(sblock); disk_super = dm_block_data(sblock);
/* Verify the data block size hasn't changed */
if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
DMERR("changing the data block size (from %u to %llu) is not supported",
le32_to_cpu(disk_super->data_block_size),
(unsigned long long)cmd->data_block_size);
r = -EINVAL;
goto bad;
}
r = __check_incompat_features(disk_super, cmd); r = __check_incompat_features(disk_super, cmd);
if (r < 0) if (r < 0)
goto bad; goto bad;

View File

@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
disk_super = dm_block_data(sblock); disk_super = dm_block_data(sblock);
/* Verify the data block size hasn't changed */
if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
DMERR("changing the data block size (from %u to %llu) is not supported",
le32_to_cpu(disk_super->data_block_size),
(unsigned long long)pmd->data_block_size);
r = -EINVAL;
goto bad_unlock_sblock;
}
r = __check_incompat_features(disk_super, pmd); r = __check_incompat_features(disk_super, pmd);
if (r < 0) if (r < 0)
goto bad_unlock_sblock; goto bad_unlock_sblock;

View File

@ -52,6 +52,11 @@
/* Atmel chips */ /* Atmel chips */
#define AT49BV640D 0x02de #define AT49BV640D 0x02de
#define AT49BV640DT 0x02db #define AT49BV640DT 0x02db
/* Sharp chips */
#define LH28F640BFHE_PTTL90 0x00b0
#define LH28F640BFHE_PBTL90 0x00b1
#define LH28F640BFHE_PTTL70A 0x00b2
#define LH28F640BFHE_PBTL70A 0x00b3
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
}; };
static int is_LH28F640BF(struct cfi_private *cfi)
{
/* Sharp LH28F640BF Family */
if (cfi->mfr == CFI_MFR_SHARP && (
cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
return 1;
return 0;
}
static void fixup_LH28F640BF(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
/* Reset the Partition Configuration Register on LH28F640BF
* to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
if (is_LH28F640BF(cfi)) {
printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
map_write(map, CMD(0x60), 0);
map_write(map, CMD(0x04), 0);
/* We have set one single partition thus
* Simultaneous Operations are not allowed */
printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
extp->FeatureSupport &= ~512;
}
}
static void fixup_use_point(struct mtd_info *mtd) static void fixup_use_point(struct mtd_info *mtd)
{ {
struct map_info *map = mtd->priv; struct map_info *map = mtd->priv;
@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct }, { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb }, { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock }, { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
{ 0, 0, NULL } { 0, 0, NULL }
}; };
@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
initial_adr = adr; initial_adr = adr;
cmd_adr = adr & ~(wbufsize-1); cmd_adr = adr & ~(wbufsize-1);
/* Sharp LH28F640BF chips need the first address for the
* Page Buffer Program command. See Table 5 of
* LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
if (is_LH28F640BF(cfi))
cmd_adr = adr;
/* Let's determine this according to the interleave only once */ /* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9); write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);

View File

@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info)
ELM_SYNDROME_FRAGMENT_1 + offset); ELM_SYNDROME_FRAGMENT_1 + offset);
regs->elm_syndrome_fragment_0[i] = elm_read_reg(info, regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_0 + offset); ELM_SYNDROME_FRAGMENT_0 + offset);
break;
default: default:
return -EINVAL; return -EINVAL;
} }
@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info)
regs->elm_syndrome_fragment_1[i]); regs->elm_syndrome_fragment_1[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset, elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
regs->elm_syndrome_fragment_0[i]); regs->elm_syndrome_fragment_0[i]);
break;
default: default:
return -EINVAL; return -EINVAL;
} }

View File

@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd)
ecc->layout->oobavail += ecc->layout->oobfree[i].length; ecc->layout->oobavail += ecc->layout->oobfree[i].length;
mtd->oobavail = ecc->layout->oobavail; mtd->oobavail = ecc->layout->oobavail;
/* ECC sanity check: warn noisily if it's too weak */ /* ECC sanity check: warn if it's too weak */
WARN_ON(!nand_ecc_strength_good(mtd)); if (!nand_ecc_strength_good(mtd))
pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
mtd->name);
/* /*
* Set the number of read / write steps for one page depending on ECC * Set the number of read / write steps for one page depending on ECC

View File

@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
parent = *p; parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb); av = rb_entry(parent, struct ubi_ainf_volume, rb);
if (vol_id < av->vol_id) if (vol_id > av->vol_id)
p = &(*p)->rb_left; p = &(*p)->rb_left;
else else
p = &(*p)->rb_right; p = &(*p)->rb_right;
@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
pnum, err); pnum, err);
ret = err > 0 ? UBI_BAD_FASTMAP : err; ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out; goto out;
} else if (ret == UBI_IO_BITFLIPS) } else if (err == UBI_IO_BITFLIPS)
scrub = 1; scrub = 1;
/* /*

View File

@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond_params *params)
} }
if (ad_select) { if (ad_select) {
bond_opt_initstr(&newval, lacp_rate); bond_opt_initstr(&newval, ad_select);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
&newval); &newval);
if (!valptr) { if (!valptr) {

View File

@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
work_done = bcm_sysport_tx_reclaim(ring->priv, ring); work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
if (work_done < budget) { if (work_done == 0) {
napi_complete(napi); napi_complete(napi);
/* re-enable TX interrupt */ /* re-enable TX interrupt */
intrl2_1_mask_clear(ring->priv, BIT(ring->index)); intrl2_1_mask_clear(ring->priv, BIT(ring->index));
} }
return work_done; return 0;
} }
static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
usleep_range(1000, 2000); usleep_range(1000, 2000);
} }
static inline int umac_reset(struct bcm_sysport_priv *priv) static inline void umac_reset(struct bcm_sysport_priv *priv)
{ {
unsigned int timeout = 0;
u32 reg; u32 reg;
int ret = 0;
umac_writel(priv, 0, UMAC_CMD); reg = umac_readl(priv, UMAC_CMD);
while (timeout++ < 1000) { reg |= CMD_SW_RESET;
reg = umac_readl(priv, UMAC_CMD); umac_writel(priv, reg, UMAC_CMD);
if (!(reg & CMD_SW_RESET)) udelay(10);
break; reg = umac_readl(priv, UMAC_CMD);
reg &= ~CMD_SW_RESET;
udelay(1); umac_writel(priv, reg, UMAC_CMD);
}
if (timeout == 1000) {
dev_err(&priv->pdev->dev,
"timeout waiting for MAC to come out of reset\n");
ret = -ETIMEDOUT;
}
return ret;
} }
static void umac_set_hw_addr(struct bcm_sysport_priv *priv, static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev)
int ret; int ret;
/* Reset UniMAC */ /* Reset UniMAC */
ret = umac_reset(priv); umac_reset(priv);
if (ret) {
netdev_err(dev, "UniMAC reset failed\n");
return ret;
}
/* Flush TX and RX FIFOs at TOPCTRL level */ /* Flush TX and RX FIFOs at TOPCTRL level */
topctrl_flush(priv); topctrl_flush(priv);
@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
dev->needed_headroom += sizeof(struct bcm_tsb); dev->needed_headroom += sizeof(struct bcm_tsb);
/* We are interfaced to a switch which handles the multicast
* filtering for us, so we do not support programming any
* multicast hash table in this Ethernet MAC.
*/
dev->flags &= ~IFF_MULTICAST;
/* libphy will adjust the link state accordingly */ /* libphy will adjust the link state accordingly */
netif_carrier_off(dev); netif_carrier_off(dev);

View File

@ -797,7 +797,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return; return;
} }
bnx2x_frag_free(fp, new_data); if (new_data)
bnx2x_frag_free(fp, new_data);
drop: drop:
/* drop the packet and keep the buffer in the bin */ /* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS, DP(NETIF_MSG_RX_STATUS,

View File

@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
* without the default SB. * without the default SB.
* For VFs there is no default SB, then we return (index+1). * For VFs there is no default SB, then we return (index+1).
*/ */
pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE; index = control & PCI_MSIX_FLAGS_QSIZE;

View File

@ -1408,13 +1408,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
if (cb->skb) if (cb->skb)
continue; continue;
/* set the DMA descriptor length once and for all
* it will only change if we support dynamically sizing
* priv->rx_buf_len, but we do not
*/
dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
ret = bcmgenet_rx_refill(priv, cb); ret = bcmgenet_rx_refill(priv, cb);
if (ret) if (ret)
break; break;
@ -2535,14 +2528,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
err = register_netdev(dev); /* libphy will determine the link state */
if (err) netif_carrier_off(dev);
goto err_clk_disable;
/* Turn off the main clock, WOL clock is handled separately */ /* Turn off the main clock, WOL clock is handled separately */
if (!IS_ERR(priv->clk)) if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk); clk_disable_unprepare(priv->clk);
err = register_netdev(dev);
if (err)
goto err;
return err; return err;
err_clk_disable: err_clk_disable:

View File

@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
#define EXT_ENERGY_DET_MASK (1 << 12) #define EXT_ENERGY_DET_MASK (1 << 12)
#define EXT_RGMII_OOB_CTRL 0x0C #define EXT_RGMII_OOB_CTRL 0x0C
#define RGMII_MODE_EN (1 << 0)
#define RGMII_LINK (1 << 4) #define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5) #define OOB_DISABLE (1 << 5)
#define RGMII_MODE_EN (1 << 6)
#define ID_MODE_DIS (1 << 16) #define ID_MODE_DIS (1 << 16)
#define EXT_GPHY_CTRL 0x1C #define EXT_GPHY_CTRL 0x1C

View File

@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) { for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi); napi_enable(&eqo->napi);
be_enable_busy_poll(eqo); be_enable_busy_poll(eqo);
be_eq_notify(adapter, eqo->q.id, true, false, 0); be_eq_notify(adapter, eqo->q.id, true, true, 0);
} }
adapter->flags |= BE_FLAGS_NAPI_ENABLED; adapter->flags |= BE_FLAGS_NAPI_ENABLED;

View File

@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
if (ug_info->rxExtendedFiltering) { if (ug_info->rxExtendedFiltering) {
size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ug_info->largestexternallookupkeysize == if (ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
size += size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
if (ug_info->largestexternallookupkeysize == if (ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
size += size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
} }

View File

@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
s32 ret_val; s32 ret_val;
u16 i, rar_count = mac->rar_entry_count; u16 i, rar_count = mac->rar_entry_count;
if ((hw->mac.type >= e1000_i210) &&
!(igb_get_flash_presence_i210(hw))) {
ret_val = igb_pll_workaround_i210(hw);
if (ret_val)
return ret_val;
}
/* Initialize identification LED */ /* Initialize identification LED */
ret_val = igb_id_led_init(hw); ret_val = igb_id_led_init(hw);
if (ret_val) { if (ret_val) {

View File

@ -46,14 +46,15 @@
#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ #define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
/* Physical Func Reset Done Indication */ /* Physical Func Reset Done Indication */
#define E1000_CTRL_EXT_PFRSTD 0x00004000 #define E1000_CTRL_EXT_PFRSTD 0x00004000
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 #define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 #define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
#define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_IRCA 0x00000001 #define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_IRCA 0x00000001
/* Interrupt delay cancellation */ /* Interrupt delay cancellation */
/* Driver loaded bit for FW */ /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 #define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@ -62,6 +63,7 @@
/* packet buffer parity error detection enabled */ /* packet buffer parity error detection enabled */
/* descriptor FIFO parity error detection enable */ /* descriptor FIFO parity error detection enable */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
#define E1000_I2CCMD_REG_ADDR_SHIFT 16 #define E1000_I2CCMD_REG_ADDR_SHIFT 16
#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 #define E1000_I2CCMD_PHY_ADDR_SHIFT 24
#define E1000_I2CCMD_OPCODE_READ 0x08000000 #define E1000_I2CCMD_OPCODE_READ 0x08000000

View File

@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
/* These functions must be implemented by drivers */ /* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */ #endif /* _E1000_HW_H_ */

View File

@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
} }
return ret_val; return ret_val;
} }
/**
* igb_pll_workaround_i210
* @hw: pointer to the HW structure
*
* Works around an errata in the PLL circuit where it occasionally
* provides the wrong clock frequency after power up.
**/
s32 igb_pll_workaround_i210(struct e1000_hw *hw)
{
s32 ret_val;
u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
u16 nvm_word, phy_word, pci_word, tmp_nvm;
int i;
/* Get and set needed register values */
wuc = rd32(E1000_WUC);
mdicnfg = rd32(E1000_MDICNFG);
reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
wr32(E1000_MDICNFG, reg_val);
/* Get data from NVM, or set default */
ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
&nvm_word);
if (ret_val)
nvm_word = E1000_INVM_DEFAULT_AL;
tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
/* check current state directly from internal PHY */
igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
E1000_PHY_PLL_FREQ_REG), &phy_word);
if ((phy_word & E1000_PHY_PLL_UNCONF)
!= E1000_PHY_PLL_UNCONF) {
ret_val = 0;
break;
} else {
ret_val = -E1000_ERR_PHY;
}
/* directly reset the internal PHY */
ctrl = rd32(E1000_CTRL);
wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
ctrl_ext = rd32(E1000_CTRL_EXT);
ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
wr32(E1000_CTRL_EXT, ctrl_ext);
wr32(E1000_WUC, 0);
reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
wr32(E1000_EEARBC_I210, reg_val);
igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
pci_word |= E1000_PCI_PMCSR_D3;
igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
usleep_range(1000, 2000);
pci_word &= ~E1000_PCI_PMCSR_D3;
igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
wr32(E1000_EEARBC_I210, reg_val);
/* restore WUC register */
wr32(E1000_WUC, wuc);
}
/* restore MDICNFG setting */
wr32(E1000_MDICNFG, mdicnfg);
return ret_val;
}

View File

@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
s32 igb_init_nvm_params_i210(struct e1000_hw *hw); s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
bool igb_get_flash_presence_i210(struct e1000_hw *hw); bool igb_get_flash_presence_i210(struct e1000_hw *hw);
s32 igb_pll_workaround_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00 #define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 #define NVM_LED_1_CFG_DEFAULT_I211 0x0184
#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C #define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
/* PLL Defines */
#define E1000_PCI_PMCSR 0x44
#define E1000_PCI_PMCSR_D3 0x03
#define E1000_MAX_PLL_TRIES 5
#define E1000_PHY_PLL_UNCONF 0xFF
#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
#define E1000_PHY_PLL_FREQ_REG 0x000E
#define E1000_INVM_DEFAULT_AL 0x202F
#define E1000_INVM_AUTOLOAD 0x0A
#define E1000_INVM_PLL_WO_VAL 0x0010
#endif #endif

View File

@ -66,6 +66,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */

View File

@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
} }
} }
void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
pci_read_config_word(adapter->pdev, reg, value);
}
void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
pci_write_config_word(adapter->pdev, reg, *value);
}
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{ {
struct igb_adapter *adapter = hw->back; struct igb_adapter *adapter = hw->back;
@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
if (netif_running(netdev)) if (netif_running(netdev))
igb_close(netdev); igb_close(netdev);
else
igb_reset(adapter);
igb_clear_interrupt_scheme(adapter); igb_clear_interrupt_scheme(adapter);

View File

@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
if (l3_proto == swab16(ETH_P_IP)) if (l3_proto == htons(ETH_P_IP))
command |= MVNETA_TXD_IP_CSUM; command |= MVNETA_TXD_IP_CSUM;
else else
command |= MVNETA_TX_L3_IP6; command |= MVNETA_TX_L3_IP6;
@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
if (phydev->speed == SPEED_1000) if (phydev->speed == SPEED_1000)
val |= MVNETA_GMAC_CONFIG_GMII_SPEED; val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
else else if (phydev->speed == SPEED_100)
val |= MVNETA_GMAC_CONFIG_MII_SPEED; val |= MVNETA_GMAC_CONFIG_MII_SPEED;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);

View File

@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free); init_completion(&cq->free);
cq->irq = priv->eq_table.eq[cq->vector].irq; cq->irq = priv->eq_table.eq[cq->vector].irq;
cq->irq_affinity_change = false;
return 0; return 0;
err_radix: err_radix:

View File

@ -128,6 +128,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name); name);
} }
cq->irq_desc =
irq_to_desc(mlx4_eq_get_irq(mdev->dev,
cq->vector));
} }
} else { } else {
cq->vector = (cq->ring + 1 + priv->port) % cq->vector = (cq->ring + 1 + priv->port) %
@ -187,8 +191,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector) { if (priv->mdev->dev->caps.comp_pool && cq->vector) {
if (!cq->is_tx)
irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector); mlx4_release_eq(priv->mdev->dev, cq->vector);
} }
cq->vector = 0; cq->vector = 0;
@ -204,6 +206,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) { if (!cq->is_tx) {
napi_hash_del(&cq->napi); napi_hash_del(&cq->napi);
synchronize_rcu(); synchronize_rcu();
irq_set_affinity_hint(cq->mcq.irq, NULL);
} }
netif_napi_del(&cq->napi); netif_napi_del(&cq->napi);

View File

@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
coal->tx_coalesce_usecs = priv->tx_usecs; coal->tx_coalesce_usecs = priv->tx_usecs;
coal->tx_max_coalesced_frames = priv->tx_frames; coal->tx_max_coalesced_frames = priv->tx_frames;
coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
coal->rx_coalesce_usecs = priv->rx_usecs; coal->rx_coalesce_usecs = priv->rx_usecs;
coal->rx_max_coalesced_frames = priv->rx_frames; coal->rx_max_coalesced_frames = priv->rx_frames;
@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
coal->rx_coalesce_usecs_high = priv->rx_usecs_high; coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
coal->rate_sample_interval = priv->sample_interval; coal->rate_sample_interval = priv->sample_interval;
coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
return 0; return 0;
} }
@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
if (!coal->tx_max_coalesced_frames_irq)
return -EINVAL;
priv->rx_frames = (coal->rx_max_coalesced_frames == priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ? MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET : MLX4_EN_RX_COAL_TARGET :
@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
priv->rx_usecs_high = coal->rx_coalesce_usecs_high; priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
priv->sample_interval = coal->rate_sample_interval; priv->sample_interval = coal->rate_sample_interval;
priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
return mlx4_en_moderation_update(priv); return mlx4_en_moderation_update(priv);
} }

View File

@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
__be16 current_port; __be16 current_port;
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)) if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return; return;
if (sa_family == AF_INET6) if (sa_family == AF_INET6)
@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MLX4_WQE_CTRL_SOLICITED); MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num; priv->tx_ring_num = prof->tx_ring_num;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL); GFP_KERNEL);

Some files were not shown because too many files have changed in this diff Show More