ASoC: Updates for v3.11

A few small fixes, all driver specific.  The removal of the GPIO based
 pinmuxing is a bug fix, since the obsolete nodes had been removed from
 the DT it stopped the driver loading.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIbBAABAgAGBQJR1pSAAAoJELSic+t+oim9UCkP9it8xj9B7aXZZPPSuAAMkB6o
 SPc3dluSqmrhGpMo4C8DAEgkRg8bmNTjK8asL47z/3XXT64HO32fPXsnBZcH+eRl
 7PNGVGmKpKdyS63DinonzSadvb/zudYa0UOSC8sdPtNtdQvOiKSOYLYijWBBATGd
 mvQ8ZlOIc7TzDDuCU8MdwvJwWaT6WgNV6dkcqwsCbAKP4e6fGwI/pP9NVRBQwxTd
 mgu/FAV+Rn7+qgWY1wqxM5AgTjjGXBfGwVhUEpKAwJmhz0nEK3euzXnlzPzb9a+E
 WvnzIkzcjEeY6e71PkeaTkHg5G9Vlb/GnL91Jgx5MC5CFL7W8c5jIu774Qa7V2jU
 ICxhr+dRP3mO72VLFbtZti0O2rsWOr4VsC7wV/c2cbSibiUtnf0fqQ0MoVbKzN1V
 GW0wl7PmVJvhad+2JVubsJpR8VCwy25qLiQ8IoJXqwLxsQJ1pzA9jfNKURPKc7t7
 iDkvtEZhp783yyYG1EUdfAe46NZw1jgq3KBwMu0sgNSXRBMpcFujfDKHzMGmeWaL
 cCOrAriDvc+ucsQSfHbLcjqJVQN+0zqbmjHETrTaWyDF/ymggGn4PIUOUv9tAlYR
 ThzBWxDLOPzeHzC23vsinfypbkFkVAUIBUuHcpQDFR9xgcblQCVPHaE0GELSbtRh
 GK8epbwcKDGu0G1KxtY=
 =+rVP
 -----END PGP SIGNATURE-----

Merge tag 'asoc-v3.11-4' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Updates for v3.11

A few small fixes, all driver specific.  The removal of the GPIO based
pinmuxing is a bug fix, since the obsolete nodes had been removed from
the DT it stopped the driver loading.
This commit is contained in:
Takashi Iwai 2013-07-05 11:54:27 +02:00
commit 38f0ad7c90
154 changed files with 5702 additions and 674 deletions

View File

@ -420,10 +420,10 @@ tcp_synack_retries - INTEGER
for a passive TCP connection will happen after 63seconds. for a passive TCP connection will happen after 63seconds.
tcp_syncookies - BOOLEAN tcp_syncookies - BOOLEAN
Only valid when the kernel was compiled with CONFIG_SYNCOOKIES Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
Send out syncookies when the syn backlog queue of a socket Send out syncookies when the syn backlog queue of a socket
overflows. This is to prevent against the common 'SYN flood attack' overflows. This is to prevent against the common 'SYN flood attack'
Default: FALSE Default: 1
Note, that syncookies is fallback facility. Note, that syncookies is fallback facility.
It MUST NOT be used to help highly loaded servers to stand It MUST NOT be used to help highly loaded servers to stand

View File

@ -3220,7 +3220,7 @@ F: lib/fault-inject.c
FCOE SUBSYSTEM (libfc, libfcoe, fcoe) FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
M: Robert Love <robert.w.love@intel.com> M: Robert Love <robert.w.love@intel.com>
L: devel@open-fcoe.org L: fcoe-devel@open-fcoe.org
W: www.Open-FCoE.org W: www.Open-FCoE.org
S: Supported S: Supported
F: drivers/scsi/libfc/ F: drivers/scsi/libfc/

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc7 EXTRAVERSION =
NAME = Unicycling Gorilla NAME = Unicycling Gorilla
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -1087,6 +1087,20 @@ if !MMU
source "arch/arm/Kconfig-nommu" source "arch/arm/Kconfig-nommu"
endif endif
config PJ4B_ERRATA_4742
bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation"
depends on CPU_PJ4B && MACH_ARMADA_370
default y
help
When coming out of either a Wait for Interrupt (WFI) or a Wait for
Event (WFE) IDLE states, a specific timing sensitivity exists between
the retiring WFI/WFE instructions and the newly issued subsequent
instructions. This sensitivity can result in a CPU hang scenario.
Workaround:
The software must insert either a Data Synchronization Barrier (DSB)
or Data Memory Barrier (DMB) command immediately after the WFI/WFE
instruction
config ARM_ERRATA_326103 config ARM_ERRATA_326103
bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
depends on CPU_V6 depends on CPU_V6

View File

@ -32,6 +32,8 @@
#define MPIDR_HWID_BITMASK 0xFFFFFF #define MPIDR_HWID_BITMASK 0xFFFFFF
#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
#define MPIDR_LEVEL_BITS 8 #define MPIDR_LEVEL_BITS 8
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)

View File

@ -230,6 +230,15 @@
# endif # endif
#endif #endif
#ifdef CONFIG_CPU_PJ4B
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_pj4b
# endif
#endif
#ifndef MULTI_CPU #ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init) #define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) #define cpu_proc_fin __glue(CPU_NAME,_proc_fin)

View File

@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void)
/* /*
* Logical CPU mapping. * Logical CPU mapping.
*/ */
extern int __cpu_logical_map[]; extern u32 __cpu_logical_map[];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu] #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/* /*
* Retrieve logical cpu index corresponding to a given MPIDR[23:0] * Retrieve logical cpu index corresponding to a given MPIDR[23:0]

View File

@ -82,7 +82,7 @@ void __init arm_dt_init_cpu_maps(void)
u32 i, j, cpuidx = 1; u32 i, j, cpuidx = 1;
u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX }; u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
bool bootcpu_valid = false; bool bootcpu_valid = false;
cpus = of_find_node_by_path("/cpus"); cpus = of_find_node_by_path("/cpus");
@ -92,6 +92,9 @@ void __init arm_dt_init_cpu_maps(void)
for_each_child_of_node(cpus, cpu) { for_each_child_of_node(cpus, cpu) {
u32 hwid; u32 hwid;
if (of_node_cmp(cpu->type, "cpu"))
continue;
pr_debug(" * %s...\n", cpu->full_name); pr_debug(" * %s...\n", cpu->full_name);
/* /*
* A device tree containing CPU nodes with missing "reg" * A device tree containing CPU nodes with missing "reg"
@ -149,9 +152,10 @@ void __init arm_dt_init_cpu_maps(void)
tmp_map[i] = hwid; tmp_map[i] = hwid;
} }
if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], " if (!bootcpu_valid) {
"fall back to default cpu_logical_map\n")) pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
return; return;
}
/* /*
* Since the boot CPU node contains proper data, and all nodes have * Since the boot CPU node contains proper data, and all nodes have

View File

@ -444,7 +444,7 @@ void notrace cpu_init(void)
: "r14"); : "r14");
} }
int __cpu_logical_map[NR_CPUS]; u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
void __init smp_setup_processor_id(void) void __init smp_setup_processor_id(void)
{ {

View File

@ -57,6 +57,12 @@ void flush_dcache_page(struct page *page)
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);
void flush_kernel_dcache_page(struct page *page)
{
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
EXPORT_SYMBOL(flush_kernel_dcache_page);
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src, unsigned long uaddr, void *dst, const void *src,
unsigned long len) unsigned long len)

View File

@ -81,7 +81,6 @@ ENDPROC(cpu_fa526_reset)
*/ */
.align 4 .align 4
ENTRY(cpu_fa526_do_idle) ENTRY(cpu_fa526_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr mov pc, lr

View File

@ -333,3 +333,8 @@ ENTRY(\name\()_tlb_fns)
.endif .endif
.size \name\()_tlb_fns, . - \name\()_tlb_fns .size \name\()_tlb_fns, . - \name\()_tlb_fns
.endm .endm
.macro globl_equ x, y
.globl \x
.equ \x, \y
.endm

View File

@ -138,6 +138,29 @@ ENTRY(cpu_v7_do_resume)
mov r0, r8 @ control register mov r0, r8 @ control register
b cpu_resume_mmu b cpu_resume_mmu
ENDPROC(cpu_v7_do_resume) ENDPROC(cpu_v7_do_resume)
#endif
#ifdef CONFIG_CPU_PJ4B
globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm
globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init
globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin
globl_equ cpu_pj4b_reset, cpu_v7_reset
#ifdef CONFIG_PJ4B_ERRATA_4742
ENTRY(cpu_pj4b_do_idle)
dsb @ WFI may enter a low-power mode
wfi
dsb @barrier
mov pc, lr
ENDPROC(cpu_pj4b_do_idle)
#else
globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
#endif
globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend
globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume
globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size
#endif #endif
__CPUINIT __CPUINIT
@ -350,6 +373,9 @@ __v7_setup_stack:
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S) @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
#ifdef CONFIG_CPU_PJ4B
define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
#endif
.section ".rodata" .section ".rodata"
@ -362,7 +388,7 @@ __v7_setup_stack:
/* /*
* Standard v7 proc info content * Standard v7 proc info content
*/ */
.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
@ -375,7 +401,7 @@ __v7_setup_stack:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
HWCAP_EDSP | HWCAP_TLS | \hwcaps HWCAP_EDSP | HWCAP_TLS | \hwcaps
.long cpu_v7_name .long cpu_v7_name
.long v7_processor_functions .long \proc_fns
.long v7wbi_tlb_fns .long v7wbi_tlb_fns
.long v6_user_fns .long v6_user_fns
.long v7_cache_fns .long v7_cache_fns
@ -407,12 +433,14 @@ __v7_ca9mp_proc_info:
/* /*
* Marvell PJ4B processor. * Marvell PJ4B processor.
*/ */
#ifdef CONFIG_CPU_PJ4B
.type __v7_pj4b_proc_info, #object .type __v7_pj4b_proc_info, #object
__v7_pj4b_proc_info: __v7_pj4b_proc_info:
.long 0x560f5800 .long 0x560f5800
.long 0xff0fff00 .long 0xff0fff00
__v7_proc __v7_pj4b_setup __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions
.size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
#endif
/* /*
* ARM Ltd. Cortex A7 processor. * ARM Ltd. Cortex A7 processor.

View File

@ -161,7 +161,7 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_check(x, ptr, size) \ #define __get_user_check(x, ptr, size) \
({ \ ({ \
const __typeof__(ptr) __guc_ptr = (ptr); \ const __typeof__(*(ptr))* __guc_ptr = (ptr); \
int _e; \ int _e; \
if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \ if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
_e = __get_user_nocheck((x), __guc_ptr, (size)); \ _e = __get_user_nocheck((x), __guc_ptr, (size)); \

View File

@ -38,6 +38,7 @@ struct mn10300_cpuinfo boot_cpu_data;
/* For PCI or other memory-mapped resources */ /* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0x18000000; unsigned long pci_mem_start = 0x18000000;
static char __initdata cmd_line[COMMAND_LINE_SIZE];
char redboot_command_line[COMMAND_LINE_SIZE] = char redboot_command_line[COMMAND_LINE_SIZE] =
"console=ttyS0,115200 root=/dev/mtdblock3 rw"; "console=ttyS0,115200 root=/dev/mtdblock3 rw";
@ -74,45 +75,19 @@ static const char *const mn10300_cputypes[] = {
}; };
/* /*
* * Pick out the memory size. We look for mem=size,
* where size is "size[KkMm]"
*/ */
static void __init parse_mem_cmdline(char **cmdline_p) static int __init early_mem(char *p)
{ {
char *from, *to, c; memory_size = memparse(p, &p);
/* save unparsed command line copy for /proc/cmdline */
strcpy(boot_command_line, redboot_command_line);
/* see if there's an explicit memory size option */
from = redboot_command_line;
to = redboot_command_line;
c = ' ';
for (;;) {
if (c == ' ' && !memcmp(from, "mem=", 4)) {
if (to != redboot_command_line)
to--;
memory_size = memparse(from + 4, &from);
}
c = *(from++);
if (!c)
break;
*(to++) = c;
}
*to = '\0';
*cmdline_p = redboot_command_line;
if (memory_size == 0) if (memory_size == 0)
panic("Memory size not known\n"); panic("Memory size not known\n");
memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS + return 0;
memory_size;
if (memory_end > phys_memory_end)
memory_end = phys_memory_end;
} }
early_param("mem", early_mem);
/* /*
* architecture specific setup * architecture specific setup
@ -125,7 +100,20 @@ void __init setup_arch(char **cmdline_p)
cpu_init(); cpu_init();
unit_setup(); unit_setup();
smp_init_cpus(); smp_init_cpus();
parse_mem_cmdline(cmdline_p);
/* save unparsed command line copy for /proc/cmdline */
strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE);
/* populate cmd_line too for later use, preserving boot_command_line */
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
parse_early_param();
memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
memory_size;
if (memory_end > phys_memory_end)
memory_end = phys_memory_end;
init_mm.start_code = (unsigned long)&_text; init_mm.start_code = (unsigned long)&_text;
init_mm.end_code = (unsigned long) &_etext; init_mm.end_code = (unsigned long) &_etext;

View File

@ -994,7 +994,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
ppc_md.pci_dma_bus_setup(bus); ppc_md.pci_dma_bus_setup(bus);
} }
void pcibios_setup_device(struct pci_dev *dev) static void pcibios_setup_device(struct pci_dev *dev)
{ {
/* Fixup NUMA node as it may not be setup yet by the generic /* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init * code and is needed by the DMA init
@ -1015,6 +1015,17 @@ void pcibios_setup_device(struct pci_dev *dev)
ppc_md.pci_irq_fixup(dev); ppc_md.pci_irq_fixup(dev);
} }
int pcibios_add_device(struct pci_dev *dev)
{
/*
* We can only call pcibios_setup_device() after bus setup is complete,
* since some of the platform specific DMA setup code depends on it.
*/
if (dev->bus->is_added)
pcibios_setup_device(dev);
return 0;
}
void pcibios_setup_bus_devices(struct pci_bus *bus) void pcibios_setup_bus_devices(struct pci_bus *bus)
{ {
struct pci_dev *dev; struct pci_dev *dev;
@ -1469,10 +1480,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
if (ppc_md.pcibios_enable_device_hook(dev)) if (ppc_md.pcibios_enable_device_hook(dev))
return -EINVAL; return -EINVAL;
/* avoid pcie irq fix up impact on cardbus */
if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
pcibios_setup_device(dev);
return pci_enable_resources(dev, mask); return pci_enable_resources(dev, mask);
} }

View File

@ -294,8 +294,6 @@ void __init eeh_addr_cache_build(void)
spin_lock_init(&pci_io_addr_cache_root.piar_lock); spin_lock_init(&pci_io_addr_cache_root.piar_lock);
for_each_pci_dev(dev) { for_each_pci_dev(dev) {
eeh_addr_cache_insert_dev(dev);
dn = pci_device_to_OF_node(dev); dn = pci_device_to_OF_node(dev);
if (!dn) if (!dn)
continue; continue;
@ -308,6 +306,8 @@ void __init eeh_addr_cache_build(void)
dev->dev.archdata.edev = edev; dev->dev.archdata.edev = edev;
edev->pdev = dev; edev->pdev = dev;
eeh_addr_cache_insert_dev(dev);
eeh_sysfs_add_device(dev); eeh_sysfs_add_device(dev);
} }

View File

@ -639,7 +639,8 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
if (pe->type & EEH_PE_PHB) { if (pe->type & EEH_PE_PHB) {
bus = pe->phb->bus; bus = pe->phb->bus;
} else if (pe->type & EEH_PE_BUS) { } else if (pe->type & EEH_PE_BUS ||
pe->type & EEH_PE_DEVICE) {
edev = list_first_entry(&pe->edevs, struct eeh_dev, list); edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
pdev = eeh_dev_to_pci_dev(edev); pdev = eeh_dev_to_pci_dev(edev);
if (pdev) if (pdev)

View File

@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
return indirect_read_config(bus, devfn, offset, len, val); return indirect_read_config(bus, devfn, offset, len, val);
} }
static struct pci_ops fsl_indirect_pci_ops = #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static struct pci_ops fsl_indirect_pcie_ops =
{ {
.read = fsl_indirect_read_config, .read = fsl_indirect_read_config,
.write = indirect_write_config, .write = indirect_write_config,
}; };
static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags)
{
setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
hose->ops = &fsl_indirect_pci_ops;
}
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
#define MAX_PHYS_ADDR_BITS 40 #define MAX_PHYS_ADDR_BITS 40
static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
if (!hose->private_data) if (!hose->private_data)
goto no_bridge; goto no_bridge;
fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
PPC_INDIRECT_TYPE_BIG_ENDIAN); PPC_INDIRECT_TYPE_BIG_ENDIAN);
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
/* use fsl_indirect_read_config for PCIe */
hose->ops = &fsl_indirect_pcie_ops;
/* For PCIE read HEADER_TYPE to identify controler mode */ /* For PCIE read HEADER_TYPE to identify controler mode */
early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
if (ret) if (ret)
goto err0; goto err0;
} else { } else {
fsl_setup_indirect_pci(hose, rsrc_cfg.start, setup_indirect_pci(hose, rsrc_cfg.start,
rsrc_cfg.start + 4, 0); rsrc_cfg.start + 4, 0);
} }
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "

View File

@ -50,9 +50,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, dma_addr);
if (dma_ops->mapping_error) if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr); return dma_ops->mapping_error(dev, dma_addr);
return (dma_addr == 0UL); return (dma_addr == DMA_ERROR_CODE);
} }
static inline void *dma_alloc_coherent(struct device *dev, size_t size, static inline void *dma_alloc_coherent(struct device *dev, size_t size,

View File

@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
.write = reipl_fcp_scpdata_write, .write = reipl_fcp_scpdata_write,
}; };
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
reipl_block_fcp->ipl_info.fcp.wwpn); reipl_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
reipl_block_fcp->ipl_info.fcp.lun); reipl_block_fcp->ipl_info.fcp.lun);
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
reipl_block_fcp->ipl_info.fcp.bootprog); reipl_block_fcp->ipl_info.fcp.bootprog);
@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = {
/* FCP dump device attributes */ /* FCP dump device attributes */
DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
dump_block_fcp->ipl_info.fcp.wwpn); dump_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
dump_block_fcp->ipl_info.fcp.lun); dump_block_fcp->ipl_info.fcp.lun);
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
dump_block_fcp->ipl_info.fcp.bootprog); dump_block_fcp->ipl_info.fcp.bootprog);

View File

@ -312,6 +312,7 @@ void measurement_alert_subclass_unregister(void)
} }
EXPORT_SYMBOL(measurement_alert_subclass_unregister); EXPORT_SYMBOL(measurement_alert_subclass_unregister);
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq) void synchronize_irq(unsigned int irq)
{ {
/* /*
@ -320,6 +321,7 @@ void synchronize_irq(unsigned int irq)
*/ */
} }
EXPORT_SYMBOL_GPL(synchronize_irq); EXPORT_SYMBOL_GPL(synchronize_irq);
#endif
#ifndef CONFIG_PCI #ifndef CONFIG_PCI

View File

@ -123,7 +123,8 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
continue; continue;
} else if ((addr <= chunk->addr) && } else if ((addr <= chunk->addr) &&
(addr + size >= chunk->addr + chunk->size)) { (addr + size >= chunk->addr + chunk->size)) {
memset(chunk, 0 , sizeof(*chunk)); memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
} else if (addr + size < chunk->addr + chunk->size) { } else if (addr + size < chunk->addr + chunk->size) {
chunk->size = chunk->addr + chunk->size - addr - size; chunk->size = chunk->addr + chunk->size - addr - size;
chunk->addr = addr + size; chunk->addr = addr + size;

View File

@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
return insn.length; return insn.length;
} }
static void __kprobes arch_copy_kprobe(struct kprobe *p) static int __kprobes arch_copy_kprobe(struct kprobe *p)
{ {
int ret;
/* Copy an instruction with recovering if other optprobe modifies it.*/ /* Copy an instruction with recovering if other optprobe modifies it.*/
__copy_instruction(p->ainsn.insn, p->addr); ret = __copy_instruction(p->ainsn.insn, p->addr);
if (!ret)
return -EINVAL;
/* /*
* __copy_instruction can modify the displacement of the instruction, * __copy_instruction can modify the displacement of the instruction,
@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
/* Also, displacement change doesn't affect the first byte */ /* Also, displacement change doesn't affect the first byte */
p->opcode = p->ainsn.insn[0]; p->opcode = p->ainsn.insn[0];
return 0;
} }
int __kprobes arch_prepare_kprobe(struct kprobe *p) int __kprobes arch_prepare_kprobe(struct kprobe *p)
@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
p->ainsn.insn = get_insn_slot(); p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn) if (!p->ainsn.insn)
return -ENOMEM; return -ENOMEM;
arch_copy_kprobe(p);
return 0; return arch_copy_kprobe(p);
} }
void __kprobes arch_arm_kprobe(struct kprobe *p) void __kprobes arch_arm_kprobe(struct kprobe *p)

View File

@ -45,10 +45,9 @@ struct cryptomgr_param {
} nu32; } nu32;
} attrs[CRYPTO_MAX_ATTRS]; } attrs[CRYPTO_MAX_ATTRS];
char larval[CRYPTO_MAX_ALG_NAME];
char template[CRYPTO_MAX_ALG_NAME]; char template[CRYPTO_MAX_ALG_NAME];
struct completion *completion; struct crypto_larval *larval;
u32 otype; u32 otype;
u32 omask; u32 omask;
@ -87,7 +86,8 @@ static int cryptomgr_probe(void *data)
crypto_tmpl_put(tmpl); crypto_tmpl_put(tmpl);
out: out:
complete_all(param->completion); complete_all(&param->larval->completion);
crypto_alg_put(&param->larval->alg);
kfree(param); kfree(param);
module_put_and_exit(0); module_put_and_exit(0);
} }
@ -187,18 +187,19 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
param->otype = larval->alg.cra_flags; param->otype = larval->alg.cra_flags;
param->omask = larval->mask; param->omask = larval->mask;
memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); crypto_alg_get(&larval->alg);
param->larval = larval;
param->completion = &larval->completion;
thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe"); thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
if (IS_ERR(thread)) if (IS_ERR(thread))
goto err_free_param; goto err_put_larval;
wait_for_completion_interruptible(&larval->completion); wait_for_completion_interruptible(&larval->completion);
return NOTIFY_STOP; return NOTIFY_STOP;
err_put_larval:
crypto_alg_put(&larval->alg);
err_free_param: err_free_param:
kfree(param); kfree(param);
err_put_module: err_put_module:

View File

@ -34,12 +34,6 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
BLOCKING_NOTIFIER_HEAD(crypto_chain); BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain);
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
atomic_inc(&alg->cra_refcnt);
return alg;
}
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
{ {
return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;

View File

@ -103,6 +103,12 @@ int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb); int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v); int crypto_probing_notify(unsigned long val, void *v);
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
atomic_inc(&alg->cra_refcnt);
return alg;
}
static inline void crypto_alg_put(struct crypto_alg *alg) static inline void crypto_alg_put(struct crypto_alg *alg)
{ {
if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)

View File

@ -66,20 +66,21 @@ struct dock_station {
spinlock_t dd_lock; spinlock_t dd_lock;
struct mutex hp_lock; struct mutex hp_lock;
struct list_head dependent_devices; struct list_head dependent_devices;
struct list_head hotplug_devices;
struct list_head sibling; struct list_head sibling;
struct platform_device *dock_device; struct platform_device *dock_device;
}; };
static LIST_HEAD(dock_stations); static LIST_HEAD(dock_stations);
static int dock_station_count; static int dock_station_count;
static DEFINE_MUTEX(hotplug_lock);
struct dock_dependent_device { struct dock_dependent_device {
struct list_head list; struct list_head list;
struct list_head hotplug_list;
acpi_handle handle; acpi_handle handle;
const struct acpi_dock_ops *ops; const struct acpi_dock_ops *hp_ops;
void *context; void *hp_context;
unsigned int hp_refcount;
void (*hp_release)(void *);
}; };
#define DOCK_DOCKING 0x00000001 #define DOCK_DOCKING 0x00000001
@ -111,7 +112,6 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
dd->handle = handle; dd->handle = handle;
INIT_LIST_HEAD(&dd->list); INIT_LIST_HEAD(&dd->list);
INIT_LIST_HEAD(&dd->hotplug_list);
spin_lock(&ds->dd_lock); spin_lock(&ds->dd_lock);
list_add_tail(&dd->list, &ds->dependent_devices); list_add_tail(&dd->list, &ds->dependent_devices);
@ -121,35 +121,90 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
} }
/** /**
* dock_add_hotplug_device - associate a hotplug handler with the dock station * dock_init_hotplug - Initialize a hotplug device on a docking station.
* @ds: The dock station * @dd: Dock-dependent device.
* @dd: The dependent device struct * @ops: Dock operations to attach to the dependent device.
* * @context: Data to pass to the @ops callbacks and @release.
* Add the dependent device to the dock's hotplug device list * @init: Optional initialization routine to run after setting up context.
* @release: Optional release routine to run on removal.
*/ */
static void static int dock_init_hotplug(struct dock_dependent_device *dd,
dock_add_hotplug_device(struct dock_station *ds, const struct acpi_dock_ops *ops, void *context,
struct dock_dependent_device *dd) void (*init)(void *), void (*release)(void *))
{ {
mutex_lock(&ds->hp_lock); int ret = 0;
list_add_tail(&dd->hotplug_list, &ds->hotplug_devices);
mutex_unlock(&ds->hp_lock); mutex_lock(&hotplug_lock);
if (dd->hp_context) {
ret = -EEXIST;
} else {
dd->hp_refcount = 1;
dd->hp_ops = ops;
dd->hp_context = context;
dd->hp_release = release;
}
if (!WARN_ON(ret) && init)
init(context);
mutex_unlock(&hotplug_lock);
return ret;
} }
/** /**
* dock_del_hotplug_device - remove a hotplug handler from the dock station * dock_release_hotplug - Decrement hotplug reference counter of dock device.
* @ds: The dock station * @dd: Dock-dependent device.
* @dd: the dependent device struct
* *
* Delete the dependent device from the dock's hotplug device list * Decrement the reference counter of @dd and if 0, detach its hotplug
* operations from it, reset its context pointer and run the optional release
* routine if present.
*/ */
static void static void dock_release_hotplug(struct dock_dependent_device *dd)
dock_del_hotplug_device(struct dock_station *ds,
struct dock_dependent_device *dd)
{ {
mutex_lock(&ds->hp_lock); void (*release)(void *) = NULL;
list_del(&dd->hotplug_list); void *context = NULL;
mutex_unlock(&ds->hp_lock);
mutex_lock(&hotplug_lock);
if (dd->hp_context && !--dd->hp_refcount) {
dd->hp_ops = NULL;
context = dd->hp_context;
dd->hp_context = NULL;
release = dd->hp_release;
dd->hp_release = NULL;
}
if (release && context)
release(context);
mutex_unlock(&hotplug_lock);
}
static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
bool uevent)
{
acpi_notify_handler cb = NULL;
bool run = false;
mutex_lock(&hotplug_lock);
if (dd->hp_context) {
run = true;
dd->hp_refcount++;
if (dd->hp_ops)
cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler;
}
mutex_unlock(&hotplug_lock);
if (!run)
return;
if (cb)
cb(dd->handle, event, dd->hp_context);
dock_release_hotplug(dd);
} }
/** /**
@ -360,9 +415,8 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
/* /*
* First call driver specific hotplug functions * First call driver specific hotplug functions
*/ */
list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) list_for_each_entry(dd, &ds->dependent_devices, list)
if (dd->ops && dd->ops->handler) dock_hotplug_event(dd, event, false);
dd->ops->handler(dd->handle, event, dd->context);
/* /*
* Now make sure that an acpi_device is created for each * Now make sure that an acpi_device is created for each
@ -398,9 +452,8 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
if (num == DOCK_EVENT) if (num == DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) list_for_each_entry(dd, &ds->dependent_devices, list)
if (dd->ops && dd->ops->uevent) dock_hotplug_event(dd, event, true);
dd->ops->uevent(dd->handle, event, dd->context);
if (num != DOCK_EVENT) if (num != DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
@ -570,19 +623,24 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
* @handle: the handle of the device * @handle: the handle of the device
* @ops: handlers to call after docking * @ops: handlers to call after docking
* @context: device specific data * @context: device specific data
* @init: Optional initialization routine to run after registration
* @release: Optional release routine to run on unregistration
* *
* If a driver would like to perform a hotplug operation after a dock * If a driver would like to perform a hotplug operation after a dock
* event, they can register an acpi_notifiy_handler to be called by * event, they can register an acpi_notifiy_handler to be called by
* the dock driver after _DCK is executed. * the dock driver after _DCK is executed.
*/ */
int int register_hotplug_dock_device(acpi_handle handle,
register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, const struct acpi_dock_ops *ops, void *context,
void *context) void (*init)(void *), void (*release)(void *))
{ {
struct dock_dependent_device *dd; struct dock_dependent_device *dd;
struct dock_station *dock_station; struct dock_station *dock_station;
int ret = -EINVAL; int ret = -EINVAL;
if (WARN_ON(!context))
return -EINVAL;
if (!dock_station_count) if (!dock_station_count)
return -ENODEV; return -ENODEV;
@ -597,12 +655,8 @@ register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops
* ops * ops
*/ */
dd = find_dock_dependent_device(dock_station, handle); dd = find_dock_dependent_device(dock_station, handle);
if (dd) { if (dd && !dock_init_hotplug(dd, ops, context, init, release))
dd->ops = ops;
dd->context = context;
dock_add_hotplug_device(dock_station, dd);
ret = 0; ret = 0;
}
} }
return ret; return ret;
@ -624,7 +678,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
list_for_each_entry(dock_station, &dock_stations, sibling) { list_for_each_entry(dock_station, &dock_stations, sibling) {
dd = find_dock_dependent_device(dock_station, handle); dd = find_dock_dependent_device(dock_station, handle);
if (dd) if (dd)
dock_del_hotplug_device(dock_station, dd); dock_release_hotplug(dd);
} }
} }
EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device); EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
@ -953,7 +1007,6 @@ static int __init dock_add(acpi_handle handle)
mutex_init(&dock_station->hp_lock); mutex_init(&dock_station->hp_lock);
spin_lock_init(&dock_station->dd_lock); spin_lock_init(&dock_station->dd_lock);
INIT_LIST_HEAD(&dock_station->sibling); INIT_LIST_HEAD(&dock_station->sibling);
INIT_LIST_HEAD(&dock_station->hotplug_devices);
ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
INIT_LIST_HEAD(&dock_station->dependent_devices); INIT_LIST_HEAD(&dock_station->dependent_devices);
@ -993,30 +1046,6 @@ err_unregister:
return ret; return ret;
} }
/**
* dock_remove - free up resources related to the dock station
*/
static int dock_remove(struct dock_station *ds)
{
struct dock_dependent_device *dd, *tmp;
struct platform_device *dock_device = ds->dock_device;
if (!dock_station_count)
return 0;
/* remove dependent devices */
list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
kfree(dd);
list_del(&ds->sibling);
/* cleanup sysfs */
sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
platform_device_unregister(dock_device);
return 0;
}
/** /**
* find_dock_and_bay - look for dock stations and bays * find_dock_and_bay - look for dock stations and bays
* @handle: acpi handle of a device * @handle: acpi handle of a device
@ -1035,7 +1064,7 @@ find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK; return AE_OK;
} }
static int __init dock_init(void) int __init acpi_dock_init(void)
{ {
if (acpi_disabled) if (acpi_disabled)
return 0; return 0;
@ -1054,19 +1083,3 @@ static int __init dock_init(void)
ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
return 0; return 0;
} }
static void __exit dock_exit(void)
{
struct dock_station *tmp, *dock_station;
unregister_acpi_bus_notifier(&dock_acpi_notifier);
list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
dock_remove(dock_station);
}
/*
* Must be called before drivers of devices in dock, otherwise we can't know
* which devices are in a dock
*/
subsys_initcall(dock_init);
module_exit(dock_exit);

View File

@ -40,6 +40,11 @@ void acpi_container_init(void);
#else #else
static inline void acpi_container_init(void) {} static inline void acpi_container_init(void) {}
#endif #endif
#ifdef CONFIG_ACPI_DOCK
void acpi_dock_init(void);
#else
static inline void acpi_dock_init(void) {}
#endif
#ifdef CONFIG_ACPI_HOTPLUG_MEMORY #ifdef CONFIG_ACPI_HOTPLUG_MEMORY
void acpi_memory_hotplug_init(void); void acpi_memory_hotplug_init(void);
#else #else

View File

@ -2042,6 +2042,7 @@ int __init acpi_scan_init(void)
acpi_lpss_init(); acpi_lpss_init();
acpi_container_init(); acpi_container_init();
acpi_memory_hotplug_init(); acpi_memory_hotplug_init();
acpi_dock_init();
mutex_lock(&acpi_scan_lock); mutex_lock(&acpi_scan_lock);
/* /*

View File

@ -156,8 +156,10 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags); spin_unlock_irqrestore(ap->lock, flags);
if (wait) if (wait) {
ata_port_wait_eh(ap); ata_port_wait_eh(ap);
flush_work(&ap->hotplug_task.work);
}
} }
static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
@ -214,6 +216,39 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
.uevent = ata_acpi_ap_uevent, .uevent = ata_acpi_ap_uevent,
}; };
void ata_acpi_hotplug_init(struct ata_host *host)
{
int i;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
acpi_handle handle;
struct ata_device *dev;
if (!ap)
continue;
handle = ata_ap_acpi_handle(ap);
if (handle) {
/* we might be on a docking station */
register_hotplug_dock_device(handle,
&ata_acpi_ap_dock_ops, ap,
NULL, NULL);
}
ata_for_each_dev(dev, &ap->link, ALL) {
handle = ata_dev_acpi_handle(dev);
if (!handle)
continue;
/* we might be on a docking station */
register_hotplug_dock_device(handle,
&ata_acpi_dev_dock_ops,
dev, NULL, NULL);
}
}
}
/** /**
* ata_acpi_dissociate - dissociate ATA host from ACPI objects * ata_acpi_dissociate - dissociate ATA host from ACPI objects
* @host: target ATA host * @host: target ATA host

View File

@ -6148,6 +6148,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
if (rc) if (rc)
goto err_tadd; goto err_tadd;
ata_acpi_hotplug_init(host);
/* set cable, sata_spd_limit and report */ /* set cable, sata_spd_limit and report */
for (i = 0; i < host->n_ports; i++) { for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i]; struct ata_port *ap = host->ports[i];

View File

@ -122,6 +122,7 @@ extern int ata_acpi_register(void);
extern void ata_acpi_unregister(void); extern void ata_acpi_unregister(void);
extern void ata_acpi_bind(struct ata_device *dev); extern void ata_acpi_bind(struct ata_device *dev);
extern void ata_acpi_unbind(struct ata_device *dev); extern void ata_acpi_unbind(struct ata_device *dev);
extern void ata_acpi_hotplug_init(struct ata_host *host);
#else #else
static inline void ata_acpi_dissociate(struct ata_host *host) { } static inline void ata_acpi_dissociate(struct ata_host *host) { }
static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@ -134,6 +135,7 @@ static inline int ata_acpi_register(void) { return 0; }
static inline void ata_acpi_unregister(void) { } static inline void ata_acpi_unregister(void) { }
static inline void ata_acpi_bind(struct ata_device *dev) { } static inline void ata_acpi_bind(struct ata_device *dev) { }
static inline void ata_acpi_unbind(struct ata_device *dev) { } static inline void ata_acpi_unbind(struct ata_device *dev) { }
static inline void ata_acpi_hotplug_init(struct ata_host *host) {}
#endif #endif
/* libata-scsi.c */ /* libata-scsi.c */

View File

@ -2252,13 +2252,17 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
obj_request->pages, length, obj_request->pages, length,
offset & ~PAGE_MASK, false, false); offset & ~PAGE_MASK, false, false);
/*
* set obj_request->img_request before formatting
* the osd_request so that it gets the right snapc
*/
rbd_img_obj_request_add(img_request, obj_request);
if (write_request) if (write_request)
rbd_osd_req_format_write(obj_request); rbd_osd_req_format_write(obj_request);
else else
rbd_osd_req_format_read(obj_request); rbd_osd_req_format_read(obj_request);
obj_request->img_offset = img_offset; obj_request->img_offset = img_offset;
rbd_img_obj_request_add(img_request, obj_request);
img_offset += length; img_offset += length;
resid -= length; resid -= length;
@ -4243,6 +4247,10 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
down_write(&rbd_dev->header_rwsem); down_write(&rbd_dev->header_rwsem);
ret = rbd_dev_v2_image_size(rbd_dev);
if (ret)
goto out;
if (first_time) { if (first_time) {
ret = rbd_dev_v2_header_onetime(rbd_dev); ret = rbd_dev_v2_header_onetime(rbd_dev);
if (ret) if (ret)
@ -4276,10 +4284,6 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
"is EXPERIMENTAL!"); "is EXPERIMENTAL!");
} }
ret = rbd_dev_v2_image_size(rbd_dev);
if (ret)
goto out;
if (rbd_dev->spec->snap_id == CEPH_NOSNAP) if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
if (rbd_dev->mapping.size != rbd_dev->header.image_size) if (rbd_dev->mapping.size != rbd_dev->header.image_size)
rbd_dev->mapping.size = rbd_dev->header.image_size; rbd_dev->mapping.size = rbd_dev->header.image_size;

View File

@ -498,6 +498,10 @@ static int btmrvl_service_main_thread(void *data)
add_wait_queue(&thread->wait_q, &wait); add_wait_queue(&thread->wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
BT_DBG("main_thread: break from main thread");
break;
}
if (adapter->wakeup_tries || if (adapter->wakeup_tries ||
((!adapter->int_count) && ((!adapter->int_count) &&
@ -513,11 +517,6 @@ static int btmrvl_service_main_thread(void *data)
BT_DBG("main_thread woke up"); BT_DBG("main_thread woke up");
if (kthread_should_stop()) {
BT_DBG("main_thread: break from main thread");
break;
}
spin_lock_irqsave(&priv->driver_lock, flags); spin_lock_irqsave(&priv->driver_lock, flags);
if (adapter->int_count) { if (adapter->int_count) {
adapter->int_count = 0; adapter->int_count = 0;

View File

@ -47,6 +47,8 @@ static struct od_ops od_ops;
static struct cpufreq_governor cpufreq_gov_ondemand; static struct cpufreq_governor cpufreq_gov_ondemand;
#endif #endif
static unsigned int default_powersave_bias;
static void ondemand_powersave_bias_init_cpu(int cpu) static void ondemand_powersave_bias_init_cpu(int cpu)
{ {
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@ -543,7 +545,7 @@ static int od_init(struct dbs_data *dbs_data)
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice = 0; tuners->ignore_nice = 0;
tuners->powersave_bias = 0; tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy(); tuners->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners; dbs_data->tuners = tuners;
@ -585,6 +587,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
unsigned int cpu; unsigned int cpu;
cpumask_t done; cpumask_t done;
default_powersave_bias = powersave_bias;
cpumask_clear(&done); cpumask_clear(&done);
get_online_cpus(); get_online_cpus();
@ -593,11 +596,17 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
continue; continue;
policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
dbs_data = policy->governor_data; if (!policy)
od_tuners = dbs_data->tuners; continue;
od_tuners->powersave_bias = powersave_bias;
cpumask_or(&done, &done, policy->cpus); cpumask_or(&done, &done, policy->cpus);
if (policy->governor != &cpufreq_gov_ondemand)
continue;
dbs_data = policy->governor_data;
od_tuners = dbs_data->tuners;
od_tuners->powersave_bias = default_powersave_bias;
} }
put_online_cpus(); put_online_cpus();
} }

View File

@ -1094,6 +1094,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
const struct omap_gpio_platform_data *pdata; const struct omap_gpio_platform_data *pdata;
struct resource *res; struct resource *res;
struct gpio_bank *bank; struct gpio_bank *bank;
#ifdef CONFIG_ARCH_OMAP1
int irq_base;
#endif
match = of_match_device(of_match_ptr(omap_gpio_match), dev); match = of_match_device(of_match_ptr(omap_gpio_match), dev);
@ -1135,11 +1138,28 @@ static int omap_gpio_probe(struct platform_device *pdev)
pdata->get_context_loss_count; pdata->get_context_loss_count;
} }
#ifdef CONFIG_ARCH_OMAP1
/*
* REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
* irq_alloc_descs() and irq_domain_add_legacy() and just use a
* linear IRQ domain mapping for all OMAP platforms.
*/
irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
if (irq_base < 0) {
dev_err(dev, "Couldn't allocate IRQ numbers\n");
return -ENODEV;
}
bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
0, &irq_domain_simple_ops, NULL);
#else
bank->domain = irq_domain_add_linear(node, bank->width, bank->domain = irq_domain_add_linear(node, bank->width,
&irq_domain_simple_ops, NULL); &irq_domain_simple_ops, NULL);
if (!bank->domain) #endif
if (!bank->domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
return -ENODEV; return -ENODEV;
}
if (bank->regs->set_dataout && bank->regs->clr_dataout) if (bank->regs->set_dataout && bank->regs->clr_dataout)
bank->set_dataout = _set_gpio_dataout_reg; bank->set_dataout = _set_gpio_dataout_reg;

View File

@ -1697,6 +1697,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags); struct drm_gem_object *gem_obj, int flags);
void i915_gem_restore_fences(struct drm_device *dev);
/* i915_gem_context.c */ /* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev); void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev);

View File

@ -1801,7 +1801,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT); gfp &= ~(__GFP_IO | __GFP_WAIT);
} }
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
sg = sg_next(sg);
continue;
}
#endif
if (!i || page_to_pfn(page) != last_pfn + 1) { if (!i || page_to_pfn(page) != last_pfn + 1) {
if (i) if (i)
sg = sg_next(sg); sg = sg_next(sg);
@ -1812,8 +1819,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
} }
last_pfn = page_to_pfn(page); last_pfn = page_to_pfn(page);
} }
#ifdef CONFIG_SWIOTLB
sg_mark_end(sg); if (!swiotlb_nr_tbl())
#endif
sg_mark_end(sg);
obj->pages = st; obj->pages = st;
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
@ -2117,25 +2126,15 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
} }
} }
static void i915_gem_reset_fences(struct drm_device *dev) void i915_gem_restore_fences(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
for (i = 0; i < dev_priv->num_fence_regs; i++) { for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
i915_gem_write_fence(dev, i, reg->obj);
if (reg->obj)
i915_gem_object_fence_lost(reg->obj);
i915_gem_write_fence(dev, i, NULL);
reg->pin_count = 0;
reg->obj = NULL;
INIT_LIST_HEAD(&reg->lru_list);
} }
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
} }
void i915_gem_reset(struct drm_device *dev) void i915_gem_reset(struct drm_device *dev)
@ -2158,8 +2157,7 @@ void i915_gem_reset(struct drm_device *dev)
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
} }
/* The fence registers are invalidated so clear them out */ i915_gem_restore_fences(dev);
i915_gem_reset_fences(dev);
} }
/** /**
@ -3865,8 +3863,6 @@ i915_gem_idle(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET)) if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev); i915_gem_evict_everything(dev);
i915_gem_reset_fences(dev);
/* Hack! Don't let anybody do execbuf while we don't control the chip. /* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something. * We need to replace this with a semaphore, or something.
* And not confound mm.suspended! * And not confound mm.suspended!
@ -4193,7 +4189,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->num_fence_regs = 8; dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */ /* Initialize fence registers to zero */
i915_gem_reset_fences(dev); INIT_LIST_HEAD(&dev_priv->mm.fence_list);
i915_gem_restore_fences(dev);
i915_gem_detect_bit_6_swizzle(dev); i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue); init_waitqueue_head(&dev_priv->pending_flip_queue);

View File

@ -384,6 +384,7 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_restore_fences(dev);
i915_restore_display(dev); i915_restore_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) { if (!drm_core_check_feature(dev, DRIVER_MODESET)) {

View File

@ -171,6 +171,11 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
return -EINVAL; return -EINVAL;
if (!access_ok(VERIFY_READ,
(void *)(unsigned long)user_cmd.command,
user_cmd.command_size))
return -EFAULT;
ret = qxl_alloc_release_reserved(qdev, ret = qxl_alloc_release_reserved(qdev,
sizeof(union qxl_release_info) + sizeof(union qxl_release_info) +
user_cmd.command_size, user_cmd.command_size,

View File

@ -137,7 +137,7 @@ static const struct xpad_device {
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },

View File

@ -431,6 +431,7 @@ config KEYBOARD_TEGRA
config KEYBOARD_OPENCORES config KEYBOARD_OPENCORES
tristate "OpenCores Keyboard Controller" tristate "OpenCores Keyboard Controller"
depends on HAS_IOMEM
help help
Say Y here if you want to use the OpenCores Keyboard Controller Say Y here if you want to use the OpenCores Keyboard Controller
http://www.opencores.org/project,keyboardcontroller http://www.opencores.org/project,keyboardcontroller

View File

@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2
config SERIO_ALTERA_PS2 config SERIO_ALTERA_PS2
tristate "Altera UP PS/2 controller" tristate "Altera UP PS/2 controller"
depends on HAS_IOMEM
help help
Say Y here if you have Altera University Program PS/2 ports. Say Y here if you have Altera University Program PS/2 ports.

View File

@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
case 0x160802: /* Cintiq 13HD Pro Pen */ case 0x160802: /* Cintiq 13HD Pro Pen */
case 0x180802: /* DTH2242 Pen */ case 0x180802: /* DTH2242 Pen */
case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
wacom->tool[idx] = BTN_TOOL_PEN; wacom->tool[idx] = BTN_TOOL_PEN;
break; break;
@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
case 0x18080a: /* DTH2242 Eraser */ case 0x18080a: /* DTH2242 Eraser */
case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
wacom->tool[idx] = BTN_TOOL_RUBBER; wacom->tool[idx] = BTN_TOOL_RUBBER;
break; break;

View File

@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
} }
static int cyttsp_handshake(struct cyttsp *ts)
{
if (ts->pdata->use_hndshk)
return ttsp_send_command(ts,
ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
return 0;
}
static int cyttsp_load_bl_regs(struct cyttsp *ts) static int cyttsp_load_bl_regs(struct cyttsp *ts)
{ {
memset(&ts->bl_data, 0, sizeof(ts->bl_data)); memset(&ts->bl_data, 0, sizeof(ts->bl_data));
@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
memcpy(bl_cmd, bl_command, sizeof(bl_command)); memcpy(bl_cmd, bl_command, sizeof(bl_command));
if (ts->pdata->bl_keys) if (ts->pdata->bl_keys)
memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
ts->pdata->bl_keys, sizeof(bl_command)); ts->pdata->bl_keys, CY_NUM_BL_KEYS);
error = ttsp_write_block_data(ts, CY_REG_BASE, error = ttsp_write_block_data(ts, CY_REG_BASE,
sizeof(bl_cmd), bl_cmd); sizeof(bl_cmd), bl_cmd);
@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts)
if (error) if (error)
return error; return error;
error = cyttsp_handshake(ts);
if (error)
return error;
return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0;
} }
@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
if (error) if (error)
return error; return error;
error = cyttsp_handshake(ts);
if (error)
return error;
if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl)
return -EIO; return -EIO;
@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle)
goto out; goto out;
/* provide flow control handshake */ /* provide flow control handshake */
if (ts->pdata->use_hndshk) { error = cyttsp_handshake(ts);
error = ttsp_send_command(ts, if (error)
ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); goto out;
if (error)
goto out;
}
if (unlikely(ts->state == CY_IDLE_STATE)) if (unlikely(ts->state == CY_IDLE_STATE))
goto out; goto out;

View File

@ -67,8 +67,8 @@ struct cyttsp_xydata {
/* TTSP System Information interface definition */ /* TTSP System Information interface definition */
struct cyttsp_sysinfo_data { struct cyttsp_sysinfo_data {
u8 hst_mode; u8 hst_mode;
u8 mfg_cmd;
u8 mfg_stat; u8 mfg_stat;
u8 mfg_cmd;
u8 cid[3]; u8 cid[3];
u8 tt_undef1; u8 tt_undef1;
u8 uid[8]; u8 uid[8];

View File

@ -107,7 +107,7 @@ static struct mfd_cell tps6586x_cell[] = {
.name = "tps6586x-gpio", .name = "tps6586x-gpio",
}, },
{ {
.name = "tps6586x-pmic", .name = "tps6586x-regulator",
}, },
{ {
.name = "tps6586x-rtc", .name = "tps6586x-rtc",

View File

@ -2413,7 +2413,8 @@ static void bond_miimon_commit(struct bonding *bond)
pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
bond->dev->name, slave->dev->name, bond->dev->name, slave->dev->name,
slave->speed, slave->duplex ? "full" : "half"); slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
/* notify ad that the link status has changed */ /* notify ad that the link status has changed */
if (bond->params.mode == BOND_MODE_8023AD) if (bond->params.mode == BOND_MODE_8023AD)

View File

@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
err = usb_8dev_cmd_version(priv, &version); err = usb_8dev_cmd_version(priv, &version);
if (err) { if (err) {
netdev_err(netdev, "can't get firmware version\n"); netdev_err(netdev, "can't get firmware version\n");
goto cleanup_cmd_msg_buffer; goto cleanup_unregister_candev;
} else { } else {
netdev_info(netdev, netdev_info(netdev,
"firmware: %d.%d, hardware: %d.%d\n", "firmware: %d.%d, hardware: %d.%d\n",
@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf,
return 0; return 0;
cleanup_unregister_candev:
unregister_netdev(priv->netdev);
cleanup_cmd_msg_buffer: cleanup_cmd_msg_buffer:
kfree(priv->cmd_msg_buffer); kfree(priv->cmd_msg_buffer);

View File

@ -67,4 +67,22 @@ config ATL1C
To compile this driver as a module, choose M here. The module To compile this driver as a module, choose M here. The module
will be called atl1c. will be called atl1c.
config ALX
tristate "Qualcomm Atheros AR816x/AR817x support"
depends on PCI
select CRC32
select NET_CORE
select MDIO
help
This driver supports the Qualcomm Atheros L1F ethernet adapter,
i.e. the following chipsets:
1969:1091 - AR8161 Gigabit Ethernet
1969:1090 - AR8162 Fast Ethernet
1969:10A1 - AR8171 Gigabit Ethernet
1969:10A0 - AR8172 Fast Ethernet
To compile this driver as a module, choose M here. The module
will be called alx.
endif # NET_VENDOR_ATHEROS endif # NET_VENDOR_ATHEROS

View File

@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_ATL2) += atlx/ obj-$(CONFIG_ATL2) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/ obj-$(CONFIG_ATL1E) += atl1e/
obj-$(CONFIG_ATL1C) += atl1c/ obj-$(CONFIG_ATL1C) += atl1c/
obj-$(CONFIG_ALX) += alx/

View File

@ -0,0 +1,3 @@
obj-$(CONFIG_ALX) += alx.o
alx-objs := main.o ethtool.o hw.o
ccflags-y += -D__CHECK_ENDIAN__

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
*
* This file is free software: you may copy, redistribute and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 2 of the License, or (at your
* option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _ALX_H_
#define _ALX_H_
#include <linux/types.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include "hw.h"
#define ALX_WATCHDOG_TIME (5 * HZ)
struct alx_buffer {
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(size);
};
struct alx_rx_queue {
struct alx_rrd *rrd;
dma_addr_t rrd_dma;
struct alx_rfd *rfd;
dma_addr_t rfd_dma;
struct alx_buffer *bufs;
u16 write_idx, read_idx;
u16 rrd_read_idx;
};
#define ALX_RX_ALLOC_THRESH 32
struct alx_tx_queue {
struct alx_txd *tpd;
dma_addr_t tpd_dma;
struct alx_buffer *bufs;
u16 write_idx, read_idx;
};
#define ALX_DEFAULT_TX_WORK 128
enum alx_device_quirks {
ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
};
struct alx_priv {
struct net_device *dev;
struct alx_hw hw;
/* all descriptor memory */
struct {
dma_addr_t dma;
void *virt;
int size;
} descmem;
/* protect int_mask updates */
spinlock_t irq_lock;
u32 int_mask;
int tx_ringsz;
int rx_ringsz;
int rxbuf_size;
struct napi_struct napi;
struct alx_tx_queue txq;
struct alx_rx_queue rxq;
struct work_struct link_check_wk;
struct work_struct reset_wk;
u16 msg_enable;
bool msi;
};
extern const struct ethtool_ops alx_ethtool_ops;
extern const char alx_drv_name[];
#endif

View File

@ -0,0 +1,272 @@
/*
* Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
*
* This file is free software: you may copy, redistribute and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 2 of the License, or (at your
* option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/mdio.h>
#include <linux/interrupt.h>
#include <asm/byteorder.h>
#include "alx.h"
#include "reg.h"
#include "hw.h"
static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
ecmd->supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP |
SUPPORTED_Pause;
if (alx_hw_giga(hw))
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_TP;
if (hw->adv_cfg & ADVERTISED_Autoneg)
ecmd->advertising |= hw->adv_cfg;
ecmd->port = PORT_TP;
ecmd->phy_address = 0;
if (hw->adv_cfg & ADVERTISED_Autoneg)
ecmd->autoneg = AUTONEG_ENABLE;
else
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_INTERNAL;
if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
if (hw->flowctrl & ALX_FC_RX) {
ecmd->advertising |= ADVERTISED_Pause;
if (!(hw->flowctrl & ALX_FC_TX))
ecmd->advertising |= ADVERTISED_Asym_Pause;
} else if (hw->flowctrl & ALX_FC_TX) {
ecmd->advertising |= ADVERTISED_Asym_Pause;
}
}
if (hw->link_speed != SPEED_UNKNOWN) {
ethtool_cmd_speed_set(ecmd,
hw->link_speed - hw->link_speed % 10);
ecmd->duplex = hw->link_speed % 10;
} else {
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
u32 adv_cfg;
ASSERT_RTNL();
if (ecmd->autoneg == AUTONEG_ENABLE) {
if (ecmd->advertising & ADVERTISED_1000baseT_Half)
return -EINVAL;
adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
} else {
int speed = ethtool_cmd_speed(ecmd);
switch (speed + ecmd->duplex) {
case SPEED_10 + DUPLEX_HALF:
adv_cfg = ADVERTISED_10baseT_Half;
break;
case SPEED_10 + DUPLEX_FULL:
adv_cfg = ADVERTISED_10baseT_Full;
break;
case SPEED_100 + DUPLEX_HALF:
adv_cfg = ADVERTISED_100baseT_Half;
break;
case SPEED_100 + DUPLEX_FULL:
adv_cfg = ADVERTISED_100baseT_Full;
break;
default:
return -EINVAL;
}
}
hw->adv_cfg = adv_cfg;
return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
}
static void alx_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
if (hw->flowctrl & ALX_FC_ANEG &&
hw->adv_cfg & ADVERTISED_Autoneg)
pause->autoneg = AUTONEG_ENABLE;
else
pause->autoneg = AUTONEG_DISABLE;
if (hw->flowctrl & ALX_FC_TX)
pause->tx_pause = 1;
else
pause->tx_pause = 0;
if (hw->flowctrl & ALX_FC_RX)
pause->rx_pause = 1;
else
pause->rx_pause = 0;
}
static int alx_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
int err = 0;
bool reconfig_phy = false;
u8 fc = 0;
if (pause->tx_pause)
fc |= ALX_FC_TX;
if (pause->rx_pause)
fc |= ALX_FC_RX;
if (pause->autoneg)
fc |= ALX_FC_ANEG;
ASSERT_RTNL();
/* restart auto-neg for auto-mode */
if (hw->adv_cfg & ADVERTISED_Autoneg) {
if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
reconfig_phy = true;
if (fc & hw->flowctrl & ALX_FC_ANEG &&
(fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
reconfig_phy = true;
}
if (reconfig_phy) {
err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
return err;
}
/* flow control on mac */
if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
alx_cfg_mac_flowcontrol(hw, fc);
hw->flowctrl = fc;
return 0;
}
static u32 alx_get_msglevel(struct net_device *netdev)
{
struct alx_priv *alx = netdev_priv(netdev);
return alx->msg_enable;
}
static void alx_set_msglevel(struct net_device *netdev, u32 data)
{
struct alx_priv *alx = netdev_priv(netdev);
alx->msg_enable = data;
}
static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
wol->supported = WAKE_MAGIC | WAKE_PHY;
wol->wolopts = 0;
if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
wol->wolopts |= WAKE_MAGIC;
if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
wol->wolopts |= WAKE_PHY;
}
static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
return -EOPNOTSUPP;
hw->sleep_ctrl = 0;
if (wol->wolopts & WAKE_MAGIC)
hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
if (wol->wolopts & WAKE_PHY)
hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
return 0;
}
static void alx_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct alx_priv *alx = netdev_priv(netdev);
strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
sizeof(drvinfo->bus_info));
}
const struct ethtool_ops alx_ethtool_ops = {
.get_settings = alx_get_settings,
.set_settings = alx_set_settings,
.get_pauseparam = alx_get_pauseparam,
.set_pauseparam = alx_set_pauseparam,
.get_drvinfo = alx_get_drvinfo,
.get_msglevel = alx_get_msglevel,
.set_msglevel = alx_set_msglevel,
.get_wol = alx_get_wol,
.set_wol = alx_set_wol,
.get_link = ethtool_op_get_link,
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,499 @@
/*
* Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
*
* This file is free software: you may copy, redistribute and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 2 of the License, or (at your
* option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef ALX_HW_H_
#define ALX_HW_H_
#include <linux/types.h>
#include <linux/mdio.h>
#include <linux/pci.h>
#include "reg.h"
/* Transmit Packet Descriptor, contains 4 32-bit words.
*
* 31 16 0
* +----------------+----------------+
* | vlan-tag | buf length |
* +----------------+----------------+
* | Word 1 |
* +----------------+----------------+
* | Word 2: buf addr lo |
* +----------------+----------------+
* | Word 3: buf addr hi |
* +----------------+----------------+
*
* Word 2 and 3 combine to form a 64-bit buffer address
*
* Word 1 has three forms, depending on the state of bit 8/12/13:
* if bit8 =='1', the definition is just for custom checksum offload.
* if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
* for the skb is special for LSO V2, Word 2 become total skb length ,
* Word 3 is meaningless.
* other condition, the definition is for general skb or ip/tcp/udp
* checksum or LSO(TSO) offload.
*
* Here is the depiction:
*
* 0-+ 0-+
* 1 | 1 |
* 2 | 2 |
* 3 | Payload offset 3 | L4 header offset
* 4 | (7:0) 4 | (7:0)
* 5 | 5 |
* 6 | 6 |
* 7-+ 7-+
* 8 Custom csum enable = 1 8 Custom csum enable = 0
* 9 General IPv4 checksum 9 General IPv4 checksum
* 10 General TCP checksum 10 General TCP checksum
* 11 General UDP checksum 11 General UDP checksum
* 12 Large Send Segment enable 12 Large Send Segment enable
* 13 Large Send Segment type 13 Large Send Segment type
* 14 VLAN tagged 14 VLAN tagged
* 15 Insert VLAN tag 15 Insert VLAN tag
* 16 IPv4 packet 16 IPv4 packet
* 17 Ethernet frame type 17 Ethernet frame type
* 18-+ 18-+
* 19 | 19 |
* 20 | 20 |
* 21 | Custom csum offset 21 |
* 22 | (25:18) 22 |
* 23 | 23 | MSS (30:18)
* 24 | 24 |
* 25-+ 25 |
* 26-+ 26 |
* 27 | 27 |
* 28 | Reserved 28 |
* 29 | 29 |
* 30-+ 30-+
* 31 End of packet 31 End of packet
*/
struct alx_txd {
__le16 len;
__le16 vlan_tag;
__le32 word1;
union {
__le64 addr;
struct {
__le32 pkt_len;
__le32 resvd;
} l;
} adrl;
} __packed;
/* tpd word 1 */
#define TPD_CXSUMSTART_MASK 0x00FF
#define TPD_CXSUMSTART_SHIFT 0
#define TPD_L4HDROFFSET_MASK 0x00FF
#define TPD_L4HDROFFSET_SHIFT 0
#define TPD_CXSUM_EN_MASK 0x0001
#define TPD_CXSUM_EN_SHIFT 8
#define TPD_IP_XSUM_MASK 0x0001
#define TPD_IP_XSUM_SHIFT 9
#define TPD_TCP_XSUM_MASK 0x0001
#define TPD_TCP_XSUM_SHIFT 10
#define TPD_UDP_XSUM_MASK 0x0001
#define TPD_UDP_XSUM_SHIFT 11
#define TPD_LSO_EN_MASK 0x0001
#define TPD_LSO_EN_SHIFT 12
#define TPD_LSO_V2_MASK 0x0001
#define TPD_LSO_V2_SHIFT 13
#define TPD_VLTAGGED_MASK 0x0001
#define TPD_VLTAGGED_SHIFT 14
#define TPD_INS_VLTAG_MASK 0x0001
#define TPD_INS_VLTAG_SHIFT 15
#define TPD_IPV4_MASK 0x0001
#define TPD_IPV4_SHIFT 16
#define TPD_ETHTYPE_MASK 0x0001
#define TPD_ETHTYPE_SHIFT 17
#define TPD_CXSUMOFFSET_MASK 0x00FF
#define TPD_CXSUMOFFSET_SHIFT 18
#define TPD_MSS_MASK 0x1FFF
#define TPD_MSS_SHIFT 18
#define TPD_EOP_MASK 0x0001
#define TPD_EOP_SHIFT 31
#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
/* Receive Free Descriptor */
struct alx_rfd {
__le64 addr; /* data buffer address, length is
* declared in register --- every
* buffer has the same size
*/
} __packed;
/* Receive Return Descriptor, contains 4 32-bit words.
*
* 31 16 0
* +----------------+----------------+
* | Word 0 |
* +----------------+----------------+
* | Word 1: RSS Hash value |
* +----------------+----------------+
* | Word 2 |
* +----------------+----------------+
* | Word 3 |
* +----------------+----------------+
*
* Word 0 depiction & Word 2 depiction:
*
* 0--+ 0--+
* 1 | 1 |
* 2 | 2 |
* 3 | 3 |
* 4 | 4 |
* 5 | 5 |
* 6 | 6 |
* 7 | IP payload checksum 7 | VLAN tag
* 8 | (15:0) 8 | (15:0)
* 9 | 9 |
* 10 | 10 |
* 11 | 11 |
* 12 | 12 |
* 13 | 13 |
* 14 | 14 |
* 15-+ 15-+
* 16-+ 16-+
* 17 | Number of RFDs 17 |
* 18 | (19:16) 18 |
* 19-+ 19 | Protocol ID
* 20-+ 20 | (23:16)
* 21 | 21 |
* 22 | 22 |
* 23 | 23-+
* 24 | 24 | Reserved
* 25 | Start index of RFD-ring 25-+
* 26 | (31:20) 26 | RSS Q-num (27:25)
* 27 | 27-+
* 28 | 28-+
* 29 | 29 | RSS Hash algorithm
* 30 | 30 | (31:28)
* 31-+ 31-+
*
* Word 3 depiction:
*
* 0--+
* 1 |
* 2 |
* 3 |
* 4 |
* 5 |
* 6 |
* 7 | Packet length (include FCS)
* 8 | (13:0)
* 9 |
* 10 |
* 11 |
* 12 |
* 13-+
* 14 L4 Header checksum error
* 15 IPv4 checksum error
* 16 VLAN tagged
* 17-+
* 18 | Protocol ID (19:17)
* 19-+
* 20 Receive error summary
* 21 FCS(CRC) error
* 22 Frame alignment error
* 23 Truncated packet
* 24 Runt packet
* 25 Incomplete packet due to insufficient rx-desc
* 26 Broadcast packet
* 27 Multicast packet
* 28 Ethernet type (EII or 802.3)
* 29 FIFO overflow
* 30 Length error (for 802.3, length field mismatch with actual len)
* 31 Updated, indicate to driver that this RRD is refreshed.
*/
struct alx_rrd {
__le32 word0;
__le32 rss_hash;
__le32 word2;
__le32 word3;
} __packed;
/* rrd word 0 */
#define RRD_XSUM_MASK 0xFFFF
#define RRD_XSUM_SHIFT 0
#define RRD_NOR_MASK 0x000F
#define RRD_NOR_SHIFT 16
#define RRD_SI_MASK 0x0FFF
#define RRD_SI_SHIFT 20
/* rrd word 2 */
#define RRD_VLTAG_MASK 0xFFFF
#define RRD_VLTAG_SHIFT 0
#define RRD_PID_MASK 0x00FF
#define RRD_PID_SHIFT 16
/* non-ip packet */
#define RRD_PID_NONIP 0
/* ipv4(only) */
#define RRD_PID_IPV4 1
/* tcp/ipv6 */
#define RRD_PID_IPV6TCP 2
/* tcp/ipv4 */
#define RRD_PID_IPV4TCP 3
/* udp/ipv6 */
#define RRD_PID_IPV6UDP 4
/* udp/ipv4 */
#define RRD_PID_IPV4UDP 5
/* ipv6(only) */
#define RRD_PID_IPV6 6
/* LLDP packet */
#define RRD_PID_LLDP 7
/* 1588 packet */
#define RRD_PID_1588 8
#define RRD_RSSQ_MASK 0x0007
#define RRD_RSSQ_SHIFT 25
#define RRD_RSSALG_MASK 0x000F
#define RRD_RSSALG_SHIFT 28
#define RRD_RSSALG_TCPV6 0x1
#define RRD_RSSALG_IPV6 0x2
#define RRD_RSSALG_TCPV4 0x4
#define RRD_RSSALG_IPV4 0x8
/* rrd word 3 */
#define RRD_PKTLEN_MASK 0x3FFF
#define RRD_PKTLEN_SHIFT 0
#define RRD_ERR_L4_MASK 0x0001
#define RRD_ERR_L4_SHIFT 14
#define RRD_ERR_IPV4_MASK 0x0001
#define RRD_ERR_IPV4_SHIFT 15
#define RRD_VLTAGGED_MASK 0x0001
#define RRD_VLTAGGED_SHIFT 16
#define RRD_OLD_PID_MASK 0x0007
#define RRD_OLD_PID_SHIFT 17
#define RRD_ERR_RES_MASK 0x0001
#define RRD_ERR_RES_SHIFT 20
#define RRD_ERR_FCS_MASK 0x0001
#define RRD_ERR_FCS_SHIFT 21
#define RRD_ERR_FAE_MASK 0x0001
#define RRD_ERR_FAE_SHIFT 22
#define RRD_ERR_TRUNC_MASK 0x0001
#define RRD_ERR_TRUNC_SHIFT 23
#define RRD_ERR_RUNT_MASK 0x0001
#define RRD_ERR_RUNT_SHIFT 24
#define RRD_ERR_ICMP_MASK 0x0001
#define RRD_ERR_ICMP_SHIFT 25
#define RRD_BCAST_MASK 0x0001
#define RRD_BCAST_SHIFT 26
#define RRD_MCAST_MASK 0x0001
#define RRD_MCAST_SHIFT 27
#define RRD_ETHTYPE_MASK 0x0001
#define RRD_ETHTYPE_SHIFT 28
#define RRD_ERR_FIFOV_MASK 0x0001
#define RRD_ERR_FIFOV_SHIFT 29
#define RRD_ERR_LEN_MASK 0x0001
#define RRD_ERR_LEN_SHIFT 30
#define RRD_UPDATED_MASK 0x0001
#define RRD_UPDATED_SHIFT 31
#define ALX_MAX_SETUP_LNK_CYCLE 50
/* for FlowControl */
#define ALX_FC_RX 0x01
#define ALX_FC_TX 0x02
#define ALX_FC_ANEG 0x04
/* for sleep control */
#define ALX_SLEEP_WOL_PHY 0x00000001
#define ALX_SLEEP_WOL_MAGIC 0x00000002
#define ALX_SLEEP_CIFS 0x00000004
#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \
ALX_SLEEP_WOL_MAGIC | \
ALX_SLEEP_CIFS)
/* for RSS hash type */
#define ALX_RSS_HASH_TYPE_IPV4 0x1
#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2
#define ALX_RSS_HASH_TYPE_IPV6 0x4
#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8
#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \
ALX_RSS_HASH_TYPE_IPV4_TCP | \
ALX_RSS_HASH_TYPE_IPV6 | \
ALX_RSS_HASH_TYPE_IPV6_TCP)
#define ALX_DEF_RXBUF_SIZE 1536
#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
#define ALX_MAX_TSO_PKT_SIZE (7*1024)
#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
#define ALX_MIN_FRAME_SIZE 68
#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
#define ALX_MAX_RX_QUEUES 8
#define ALX_MAX_TX_QUEUES 4
#define ALX_MAX_HANDLED_INTRS 5
#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \
ALX_ISR_DMAW | \
ALX_ISR_DMAR | \
ALX_ISR_SMB | \
ALX_ISR_MANU | \
ALX_ISR_TIMER)
#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \
ALX_ISR_DMAW | ALX_ISR_DMAR)
#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \
ALX_ISR_TXF_UR | \
ALX_ISR_RFD_UR)
#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \
ALX_ISR_TX_Q1 | \
ALX_ISR_TX_Q2 | \
ALX_ISR_TX_Q3 | \
ALX_ISR_RX_Q0 | \
ALX_ISR_RX_Q1 | \
ALX_ISR_RX_Q2 | \
ALX_ISR_RX_Q3 | \
ALX_ISR_RX_Q4 | \
ALX_ISR_RX_Q5 | \
ALX_ISR_RX_Q6 | \
ALX_ISR_RX_Q7)
/* maximum interrupt vectors for msix */
#define ALX_MAX_MSIX_INTRS 16
#define ALX_GET_FIELD(_data, _field) \
(((_data) >> _field ## _SHIFT) & _field ## _MASK)
#define ALX_SET_FIELD(_data, _field, _value) do { \
(_data) &= ~(_field ## _MASK << _field ## _SHIFT); \
(_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
} while (0)
struct alx_hw {
struct pci_dev *pdev;
u8 __iomem *hw_addr;
/* current & permanent mac addr */
u8 mac_addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
u16 mtu;
u16 imt;
u8 dma_chnl;
u8 max_dma_chnl;
/* tpd threshold to trig INT */
u32 ith_tpd;
u32 rx_ctrl;
u32 mc_hash[2];
u32 smb_timer;
/* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
int link_speed;
/* auto-neg advertisement or force mode config */
u32 adv_cfg;
u8 flowctrl;
u32 sleep_ctrl;
spinlock_t mdio_lock;
struct mdio_if_info mdio;
u16 phy_id[2];
/* PHY link patch flag */
bool lnk_patch;
};
static inline int alx_hw_revision(struct alx_hw *hw)
{
return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
}
static inline bool alx_hw_with_cr(struct alx_hw *hw)
{
return hw->pdev->revision & 1;
}
static inline bool alx_hw_giga(struct alx_hw *hw)
{
return hw->pdev->device & 1;
}
static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
{
writeb(val, hw->hw_addr + reg);
}
static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
{
writew(val, hw->hw_addr + reg);
}
static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
{
return readw(hw->hw_addr + reg);
}
static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
{
writel(val, hw->hw_addr + reg);
}
static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
{
return readl(hw->hw_addr + reg);
}
static inline void alx_post_write(struct alx_hw *hw)
{
readl(hw->hw_addr);
}
int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
void alx_reset_phy(struct alx_hw *hw);
void alx_reset_pcie(struct alx_hw *hw);
void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
void alx_post_phy_link(struct alx_hw *hw);
int alx_pre_suspend(struct alx_hw *hw, int speed);
int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
int alx_get_phy_link(struct alx_hw *hw, int *speed);
int alx_clear_phy_intr(struct alx_hw *hw);
int alx_config_wol(struct alx_hw *hw);
void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
void alx_start_mac(struct alx_hw *hw);
int alx_reset_mac(struct alx_hw *hw);
void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
bool alx_phy_configured(struct alx_hw *hw);
void alx_configure_basic(struct alx_hw *hw);
void alx_disable_rss(struct alx_hw *hw);
int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
bool alx_get_phy_info(struct alx_hw *hw);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,810 @@
/*
* Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
*
* This file is free software: you may copy, redistribute and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 2 of the License, or (at your
* option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef ALX_REG_H
#define ALX_REG_H
#define ALX_DEV_ID_AR8161 0x1091
#define ALX_DEV_ID_E2200 0xe091
#define ALX_DEV_ID_AR8162 0x1090
#define ALX_DEV_ID_AR8171 0x10A1
#define ALX_DEV_ID_AR8172 0x10A0
/* rev definition,
* bit(0): with xD support
* bit(1): with Card Reader function
* bit(7:2): real revision
*/
#define ALX_PCI_REVID_SHIFT 3
#define ALX_REV_A0 0
#define ALX_REV_A1 1
#define ALX_REV_B0 2
#define ALX_REV_C0 3
#define ALX_DEV_CTRL 0x0060
#define ALX_DEV_CTRL_MAXRRS_MIN 2
#define ALX_MSIX_MASK 0x0090
#define ALX_UE_SVRT 0x010C
#define ALX_UE_SVRT_FCPROTERR BIT(13)
#define ALX_UE_SVRT_DLPROTERR BIT(4)
/* eeprom & flash load register */
#define ALX_EFLD 0x0204
#define ALX_EFLD_F_EXIST BIT(10)
#define ALX_EFLD_E_EXIST BIT(9)
#define ALX_EFLD_STAT BIT(5)
#define ALX_EFLD_START BIT(0)
/* eFuse load register */
#define ALX_SLD 0x0218
#define ALX_SLD_STAT BIT(12)
#define ALX_SLD_START BIT(11)
#define ALX_SLD_MAX_TO 100
#define ALX_PDLL_TRNS1 0x1104
#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11)
#define ALX_PMCTRL 0x12F8
#define ALX_PMCTRL_HOTRST_WTEN BIT(31)
/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
#define ALX_PMCTRL_ASPM_FCEN BIT(30)
#define ALX_PMCTRL_SADLY_EN BIT(29)
#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF
#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24
#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC
/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
#define ALX_PMCTRL_L1REQ_TO_MASK 0xF
#define ALX_PMCTRL_L1REQ_TO_SHIFT 20
#define ALX_PMCTRL_L1REG_TO_DEF 0xF
#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19)
#define ALX_PMCTRL_L1_TIMER_MASK 0x7
#define ALX_PMCTRL_L1_TIMER_SHIFT 16
#define ALX_PMCTRL_L1_TIMER_16US 4
#define ALX_PMCTRL_RCVR_WT_1US BIT(15)
/* bit13: enable pcie clk switch in L1 state */
#define ALX_PMCTRL_L1_CLKSW_EN BIT(13)
#define ALX_PMCTRL_L0S_EN BIT(12)
#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11)
#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7)
/* bit6: power down serdes RX */
#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6)
#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5)
#define ALX_PMCTRL_L1_SRDS_EN BIT(4)
#define ALX_PMCTRL_L1_EN BIT(3)
/*******************************************************/
/* following registers are mapped only to memory space */
/*******************************************************/
#define ALX_MASTER 0x1400
/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
#define ALX_MASTER_PCLKSEL_SRDS BIT(12)
/* bit11: irq moduration for rx */
#define ALX_MASTER_IRQMOD2_EN BIT(11)
/* bit10: irq moduration for tx/rx */
#define ALX_MASTER_IRQMOD1_EN BIT(10)
#define ALX_MASTER_SYSALVTIMER_EN BIT(7)
#define ALX_MASTER_OOB_DIS BIT(6)
/* bit5: wakeup without pcie clk */
#define ALX_MASTER_WAKEN_25M BIT(5)
/* bit0: MAC & DMA reset */
#define ALX_MASTER_DMA_MAC_RST BIT(0)
#define ALX_DMA_MAC_RST_TO 50
#define ALX_IRQ_MODU_TIMER 0x1408
#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF
#define ALX_IRQ_MODU_TIMER1_SHIFT 0
#define ALX_PHY_CTRL 0x140C
#define ALX_PHY_CTRL_100AB_EN BIT(17)
/* bit14: affect MAC & PHY, go to low power sts */
#define ALX_PHY_CTRL_POWER_DOWN BIT(14)
/* bit13: 1:pll always ON, 0:can switch in lpw */
#define ALX_PHY_CTRL_PLL_ON BIT(13)
#define ALX_PHY_CTRL_RST_ANALOG BIT(12)
#define ALX_PHY_CTRL_HIB_PULSE BIT(11)
#define ALX_PHY_CTRL_HIB_EN BIT(10)
#define ALX_PHY_CTRL_IDDQ BIT(7)
#define ALX_PHY_CTRL_GATE_25M BIT(5)
#define ALX_PHY_CTRL_LED_MODE BIT(2)
/* bit0: out of dsp RST state */
#define ALX_PHY_CTRL_DSPRST_OUT BIT(0)
#define ALX_PHY_CTRL_DSPRST_TO 80
#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \
ALX_PHY_CTRL_100AB_EN | \
ALX_PHY_CTRL_PLL_ON)
#define ALX_MAC_STS 0x1410
#define ALX_MAC_STS_TXQ_BUSY BIT(3)
#define ALX_MAC_STS_RXQ_BUSY BIT(2)
#define ALX_MAC_STS_TXMAC_BUSY BIT(1)
#define ALX_MAC_STS_RXMAC_BUSY BIT(0)
#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \
ALX_MAC_STS_RXQ_BUSY | \
ALX_MAC_STS_TXMAC_BUSY | \
ALX_MAC_STS_RXMAC_BUSY)
#define ALX_MDIO 0x1414
#define ALX_MDIO_MODE_EXT BIT(30)
#define ALX_MDIO_BUSY BIT(27)
#define ALX_MDIO_CLK_SEL_MASK 0x7
#define ALX_MDIO_CLK_SEL_SHIFT 24
#define ALX_MDIO_CLK_SEL_25MD4 0
#define ALX_MDIO_CLK_SEL_25MD128 7
#define ALX_MDIO_START BIT(23)
#define ALX_MDIO_SPRES_PRMBL BIT(22)
/* bit21: 1:read,0:write */
#define ALX_MDIO_OP_READ BIT(21)
#define ALX_MDIO_REG_MASK 0x1F
#define ALX_MDIO_REG_SHIFT 16
#define ALX_MDIO_DATA_MASK 0xFFFF
#define ALX_MDIO_DATA_SHIFT 0
#define ALX_MDIO_MAX_AC_TO 120
#define ALX_MDIO_EXTN 0x1448
#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F
#define ALX_MDIO_EXTN_DEVAD_SHIFT 16
#define ALX_MDIO_EXTN_REG_MASK 0xFFFF
#define ALX_MDIO_EXTN_REG_SHIFT 0
#define ALX_SERDES 0x1424
#define ALX_SERDES_PHYCLK_SLWDWN BIT(18)
#define ALX_SERDES_MACCLK_SLWDWN BIT(17)
#define ALX_LPI_CTRL 0x1440
#define ALX_LPI_CTRL_EN BIT(0)
/* for B0+, bit[13..] for C0+ */
#define ALX_HRTBT_EXT_CTRL 0x1AD0
#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F
#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24
#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23)
#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22)
#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21)
#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20)
#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19)
#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18)
#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17)
#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16)
#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15)
#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14)
#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13)
#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12)
#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF
#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4
#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3)
#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2)
#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1)
#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0)
#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4
#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478
#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8
#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC
#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0
#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4
/* 1B8C ~ 1B94 for C0+ */
#define ALX_SWOI_ACER_CTRL 0x1B8C
#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20)
#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF
#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12
#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF
#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0
#define ALX_SWOI_IOAC_CTRL_2 0x1B90
#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF
#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24
#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF
#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12
#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF
#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0
#define ALX_SWOI_IOAC_CTRL_3 0x1B94
#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF
#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24
#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF
#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12
#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF
#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0
/* for B0 */
#define ALX_IDLE_DECISN_TIMER 0x1474
/* 1ms */
#define ALX_IDLE_DECISN_TIMER_DEF 0x400
#define ALX_MAC_CTRL 0x1480
#define ALX_MAC_CTRL_FAST_PAUSE BIT(31)
#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30)
/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29)
#define ALX_MAC_CTRL_BRD_EN BIT(26)
#define ALX_MAC_CTRL_MULTIALL_EN BIT(25)
#define ALX_MAC_CTRL_SPEED_MASK 0x3
#define ALX_MAC_CTRL_SPEED_SHIFT 20
#define ALX_MAC_CTRL_SPEED_10_100 1
#define ALX_MAC_CTRL_SPEED_1000 2
#define ALX_MAC_CTRL_PROMISC_EN BIT(15)
#define ALX_MAC_CTRL_VLANSTRIP BIT(14)
#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF
#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10
#define ALX_MAC_CTRL_PCRCE BIT(7)
#define ALX_MAC_CTRL_CRCE BIT(6)
#define ALX_MAC_CTRL_FULLD BIT(5)
#define ALX_MAC_CTRL_RXFC_EN BIT(3)
#define ALX_MAC_CTRL_TXFC_EN BIT(2)
#define ALX_MAC_CTRL_RX_EN BIT(1)
#define ALX_MAC_CTRL_TX_EN BIT(0)
#define ALX_STAD0 0x1488
#define ALX_STAD1 0x148C
#define ALX_HASH_TBL0 0x1490
#define ALX_HASH_TBL1 0x1494
#define ALX_MTU 0x149C
#define ALX_MTU_JUMBO_TH 1514
#define ALX_MTU_STD_ALGN 1536
#define ALX_SRAM5 0x1524
#define ALX_SRAM_RXF_LEN_MASK 0xFFF
#define ALX_SRAM_RXF_LEN_SHIFT 0
#define ALX_SRAM_RXF_LEN_8K (8*1024)
#define ALX_SRAM9 0x1534
#define ALX_SRAM_LOAD_PTR BIT(0)
#define ALX_RX_BASE_ADDR_HI 0x1540
#define ALX_TX_BASE_ADDR_HI 0x1544
#define ALX_RFD_ADDR_LO 0x1550
#define ALX_RFD_RING_SZ 0x1560
#define ALX_RFD_BUF_SZ 0x1564
#define ALX_RRD_ADDR_LO 0x1568
#define ALX_RRD_RING_SZ 0x1578
/* pri3: highest, pri0: lowest */
#define ALX_TPD_PRI3_ADDR_LO 0x14E4
#define ALX_TPD_PRI2_ADDR_LO 0x14E0
#define ALX_TPD_PRI1_ADDR_LO 0x157C
#define ALX_TPD_PRI0_ADDR_LO 0x1580
/* producer index is 16bit */
#define ALX_TPD_PRI3_PIDX 0x1618
#define ALX_TPD_PRI2_PIDX 0x161A
#define ALX_TPD_PRI1_PIDX 0x15F0
#define ALX_TPD_PRI0_PIDX 0x15F2
/* consumer index is 16bit */
#define ALX_TPD_PRI3_CIDX 0x161C
#define ALX_TPD_PRI2_CIDX 0x161E
#define ALX_TPD_PRI1_CIDX 0x15F4
#define ALX_TPD_PRI0_CIDX 0x15F6
#define ALX_TPD_RING_SZ 0x1584
#define ALX_TXQ0 0x1590
#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF
#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16
#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200
#define ALX_TXQ0_LSO_8023_EN BIT(7)
#define ALX_TXQ0_MODE_ENHANCE BIT(6)
#define ALX_TXQ0_EN BIT(5)
#define ALX_TXQ0_SUPT_IPOPT BIT(4)
#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF
#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0
#define ALX_TXQ_TPD_BURSTPREF_DEF 5
#define ALX_TXQ1 0x1594
/* bit11: drop large packet, len > (rfd buf) */
#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11)
#define ALX_TXQ1_JUMBO_TSO_TH (7*1024)
#define ALX_RXQ0 0x15A0
#define ALX_RXQ0_EN BIT(31)
#define ALX_RXQ0_RSS_HASH_EN BIT(29)
#define ALX_RXQ0_RSS_MODE_MASK 0x3
#define ALX_RXQ0_RSS_MODE_SHIFT 26
#define ALX_RXQ0_RSS_MODE_DIS 0
#define ALX_RXQ0_RSS_MODE_MQMI 3
#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F
#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20
#define ALX_RXQ0_NUM_RFD_PREF_DEF 8
#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF
#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8
#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100
#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128
#define ALX_RXQ0_IPV6_PARSE_EN BIT(7)
#define ALX_RXQ0_RSS_HSTYP_MASK 0xF
#define ALX_RXQ0_RSS_HSTYP_SHIFT 2
#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5)
#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4)
#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3)
#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2)
#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
ALX_RXQ0_RSS_HSTYP_IPV4_EN)
#define ALX_RXQ0_ASPM_THRESH_MASK 0x3
#define ALX_RXQ0_ASPM_THRESH_SHIFT 0
#define ALX_RXQ0_ASPM_THRESH_100M 3
#define ALX_RXQ2 0x15A8
#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF
#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16
#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF
#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0
/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
* rx-packet(1522) + delay-of-link(64)
* = 3212.
*/
#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212
#define ALX_DMA 0x15C0
#define ALX_DMA_RCHNL_SEL_MASK 0x3
#define ALX_DMA_RCHNL_SEL_SHIFT 26
#define ALX_DMA_WDLY_CNT_MASK 0xF
#define ALX_DMA_WDLY_CNT_SHIFT 16
#define ALX_DMA_WDLY_CNT_DEF 4
#define ALX_DMA_RDLY_CNT_MASK 0x1F
#define ALX_DMA_RDLY_CNT_SHIFT 11
#define ALX_DMA_RDLY_CNT_DEF 15
/* bit10: 0:tpd with pri, 1: data */
#define ALX_DMA_RREQ_PRI_DATA BIT(10)
#define ALX_DMA_RREQ_BLEN_MASK 0x7
#define ALX_DMA_RREQ_BLEN_SHIFT 4
#define ALX_DMA_RORDER_MODE_MASK 0x7
#define ALX_DMA_RORDER_MODE_SHIFT 0
#define ALX_DMA_RORDER_MODE_OUT 4
#define ALX_WOL0 0x14A0
#define ALX_WOL0_PME_LINK BIT(5)
#define ALX_WOL0_LINK_EN BIT(4)
#define ALX_WOL0_PME_MAGIC_EN BIT(3)
#define ALX_WOL0_MAGIC_EN BIT(2)
#define ALX_RFD_PIDX 0x15E0
#define ALX_RFD_CIDX 0x15F8
/* MIB */
#define ALX_MIB_BASE 0x1700
#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
#define ALX_ISR 0x1600
#define ALX_ISR_DIS BIT(31)
#define ALX_ISR_RX_Q7 BIT(30)
#define ALX_ISR_RX_Q6 BIT(29)
#define ALX_ISR_RX_Q5 BIT(28)
#define ALX_ISR_RX_Q4 BIT(27)
#define ALX_ISR_PCIE_LNKDOWN BIT(26)
#define ALX_ISR_RX_Q3 BIT(19)
#define ALX_ISR_RX_Q2 BIT(18)
#define ALX_ISR_RX_Q1 BIT(17)
#define ALX_ISR_RX_Q0 BIT(16)
#define ALX_ISR_TX_Q0 BIT(15)
#define ALX_ISR_PHY BIT(12)
#define ALX_ISR_DMAW BIT(10)
#define ALX_ISR_DMAR BIT(9)
#define ALX_ISR_TXF_UR BIT(8)
#define ALX_ISR_TX_Q3 BIT(7)
#define ALX_ISR_TX_Q2 BIT(6)
#define ALX_ISR_TX_Q1 BIT(5)
#define ALX_ISR_RFD_UR BIT(4)
#define ALX_ISR_RXF_OV BIT(3)
#define ALX_ISR_MANU BIT(2)
#define ALX_ISR_TIMER BIT(1)
#define ALX_ISR_SMB BIT(0)
#define ALX_IMR 0x1604
/* re-send assert msg if SW no response */
#define ALX_INT_RETRIG 0x1608
/* 40ms */
#define ALX_INT_RETRIG_TO 20000
#define ALX_SMB_TIMER 0x15C4
#define ALX_TINT_TPD_THRSHLD 0x15C8
#define ALX_TINT_TIMER 0x15CC
#define ALX_CLK_GATE 0x1814
#define ALX_CLK_GATE_RXMAC BIT(5)
#define ALX_CLK_GATE_TXMAC BIT(4)
#define ALX_CLK_GATE_RXQ BIT(3)
#define ALX_CLK_GATE_TXQ BIT(2)
#define ALX_CLK_GATE_DMAR BIT(1)
#define ALX_CLK_GATE_DMAW BIT(0)
#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \
ALX_CLK_GATE_TXMAC | \
ALX_CLK_GATE_RXQ | \
ALX_CLK_GATE_TXQ | \
ALX_CLK_GATE_DMAR | \
ALX_CLK_GATE_DMAW)
/* interop between drivers */
#define ALX_DRV 0x1804
#define ALX_DRV_PHY_AUTO BIT(28)
#define ALX_DRV_PHY_1000 BIT(27)
#define ALX_DRV_PHY_100 BIT(26)
#define ALX_DRV_PHY_10 BIT(25)
#define ALX_DRV_PHY_DUPLEX BIT(24)
/* bit23: adv Pause */
#define ALX_DRV_PHY_PAUSE BIT(23)
/* bit22: adv Asym Pause */
#define ALX_DRV_PHY_MASK 0xFF
#define ALX_DRV_PHY_SHIFT 21
#define ALX_DRV_PHY_UNKNOWN 0
/* flag of phy inited */
#define ALX_PHY_INITED 0x003F
/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
#define ALX_WOL_CTRL2 0x1830
#define ALX_WOL_CTRL2_DATA_STORE BIT(3)
#define ALX_WOL_CTRL2_PTRN_EVT BIT(2)
#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1)
#define ALX_WOL_CTRL2_PTRN_EN BIT(0)
#define ALX_WOL_CTRL3 0x1834
#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF
#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0
#define ALX_WOL_CTRL4 0x1838
#define ALX_WOL_CTRL4_PT15_MATCH BIT(31)
#define ALX_WOL_CTRL4_PT14_MATCH BIT(30)
#define ALX_WOL_CTRL4_PT13_MATCH BIT(29)
#define ALX_WOL_CTRL4_PT12_MATCH BIT(28)
#define ALX_WOL_CTRL4_PT11_MATCH BIT(27)
#define ALX_WOL_CTRL4_PT10_MATCH BIT(26)
#define ALX_WOL_CTRL4_PT9_MATCH BIT(25)
#define ALX_WOL_CTRL4_PT8_MATCH BIT(24)
#define ALX_WOL_CTRL4_PT7_MATCH BIT(23)
#define ALX_WOL_CTRL4_PT6_MATCH BIT(22)
#define ALX_WOL_CTRL4_PT5_MATCH BIT(21)
#define ALX_WOL_CTRL4_PT4_MATCH BIT(20)
#define ALX_WOL_CTRL4_PT3_MATCH BIT(19)
#define ALX_WOL_CTRL4_PT2_MATCH BIT(18)
#define ALX_WOL_CTRL4_PT1_MATCH BIT(17)
#define ALX_WOL_CTRL4_PT0_MATCH BIT(16)
#define ALX_WOL_CTRL4_PT15_EN BIT(15)
#define ALX_WOL_CTRL4_PT14_EN BIT(14)
#define ALX_WOL_CTRL4_PT13_EN BIT(13)
#define ALX_WOL_CTRL4_PT12_EN BIT(12)
#define ALX_WOL_CTRL4_PT11_EN BIT(11)
#define ALX_WOL_CTRL4_PT10_EN BIT(10)
#define ALX_WOL_CTRL4_PT9_EN BIT(9)
#define ALX_WOL_CTRL4_PT8_EN BIT(8)
#define ALX_WOL_CTRL4_PT7_EN BIT(7)
#define ALX_WOL_CTRL4_PT6_EN BIT(6)
#define ALX_WOL_CTRL4_PT5_EN BIT(5)
#define ALX_WOL_CTRL4_PT4_EN BIT(4)
#define ALX_WOL_CTRL4_PT3_EN BIT(3)
#define ALX_WOL_CTRL4_PT2_EN BIT(2)
#define ALX_WOL_CTRL4_PT1_EN BIT(1)
#define ALX_WOL_CTRL4_PT0_EN BIT(0)
#define ALX_WOL_CTRL5 0x183C
#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24
#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16
#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8
#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0
#define ALX_WOL_CTRL6 0x1840
#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24
#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16
#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8
#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0
#define ALX_WOL_CTRL7 0x1844
#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24
#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16
#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8
#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0
#define ALX_WOL_CTRL8 0x1848
#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24
#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16
#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8
#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF
#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0
#define ALX_ACER_FIXED_PTN0 0x1850
#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF
#define ALX_ACER_FIXED_PTN0_SHIFT 0
#define ALX_ACER_FIXED_PTN1 0x1854
#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF
#define ALX_ACER_FIXED_PTN1_SHIFT 0
#define ALX_ACER_RANDOM_NUM0 0x1858
#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF
#define ALX_ACER_RANDOM_NUM0_SHIFT 0
#define ALX_ACER_RANDOM_NUM1 0x185C
#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF
#define ALX_ACER_RANDOM_NUM1_SHIFT 0
#define ALX_ACER_RANDOM_NUM2 0x1860
#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF
#define ALX_ACER_RANDOM_NUM2_SHIFT 0
#define ALX_ACER_RANDOM_NUM3 0x1864
#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF
#define ALX_ACER_RANDOM_NUM3_SHIFT 0
#define ALX_ACER_MAGIC 0x1868
#define ALX_ACER_MAGIC_EN BIT(31)
#define ALX_ACER_MAGIC_PME_EN BIT(30)
#define ALX_ACER_MAGIC_MATCH BIT(29)
#define ALX_ACER_MAGIC_FF_CHECK BIT(10)
#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F
#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5
#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F
#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0
#define ALX_ACER_TIMER 0x186C
#define ALX_ACER_TIMER_EN BIT(31)
#define ALX_ACER_TIMER_PME_EN BIT(30)
#define ALX_ACER_TIMER_MATCH BIT(29)
#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF
#define ALX_ACER_TIMER_THRES_SHIFT 0
#define ALX_ACER_TIMER_THRES_DEF 1
/* RSS definitions */
#define ALX_RSS_KEY0 0x14B0
#define ALX_RSS_KEY1 0x14B4
#define ALX_RSS_KEY2 0x14B8
#define ALX_RSS_KEY3 0x14BC
#define ALX_RSS_KEY4 0x14C0
#define ALX_RSS_KEY5 0x14C4
#define ALX_RSS_KEY6 0x14C8
#define ALX_RSS_KEY7 0x14CC
#define ALX_RSS_KEY8 0x14D0
#define ALX_RSS_KEY9 0x14D4
#define ALX_RSS_IDT_TBL0 0x1B00
#define ALX_MSI_MAP_TBL1 0x15D0
#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20
#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16
#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12
#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8
#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4
#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0
#define ALX_MSI_MAP_TBL2 0x15D8
#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20
#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16
#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12
#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8
#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4
#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0
#define ALX_MSI_ID_MAP 0x15D4
#define ALX_MSI_RETRANS_TIMER 0x1920
/* bit16: 1:line,0:standard */
#define ALX_MSI_MASK_SEL_LINE BIT(16)
#define ALX_MSI_RETRANS_TM_MASK 0xFFFF
#define ALX_MSI_RETRANS_TM_SHIFT 0
/* CR DMA ctrl */
/* TX QoS */
#define ALX_WRR 0x1938
#define ALX_WRR_PRI_MASK 0x3
#define ALX_WRR_PRI_SHIFT 29
#define ALX_WRR_PRI_RESTRICT_NONE 3
#define ALX_WRR_PRI3_MASK 0x1F
#define ALX_WRR_PRI3_SHIFT 24
#define ALX_WRR_PRI2_MASK 0x1F
#define ALX_WRR_PRI2_SHIFT 16
#define ALX_WRR_PRI1_MASK 0x1F
#define ALX_WRR_PRI1_SHIFT 8
#define ALX_WRR_PRI0_MASK 0x1F
#define ALX_WRR_PRI0_SHIFT 0
#define ALX_HQTPD 0x193C
#define ALX_HQTPD_BURST_EN BIT(31)
#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF
#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8
#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF
#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4
#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF
#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0
#define ALX_MISC 0x19C0
#define ALX_MISC_PSW_OCP_MASK 0x7
#define ALX_MISC_PSW_OCP_SHIFT 21
#define ALX_MISC_PSW_OCP_DEF 0x7
#define ALX_MISC_ISO_EN BIT(12)
#define ALX_MISC_INTNLOSC_OPEN BIT(3)
#define ALX_MSIC2 0x19C8
#define ALX_MSIC2_CALB_START BIT(0)
#define ALX_MISC3 0x19CC
/* bit1: 1:Software control 25M */
#define ALX_MISC3_25M_BY_SW BIT(1)
/* bit0: 25M switch to intnl OSC */
#define ALX_MISC3_25M_NOTO_INTNL BIT(0)
/* MSIX tbl in memory space */
#define ALX_MSIX_ENTRY_BASE 0x2000
/********************* PHY regs definition ***************************/
/* PHY Specific Status Register */
#define ALX_MII_GIGA_PSSR 0x11
#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800
#define ALX_GIGA_PSSR_DPLX 0x2000
#define ALX_GIGA_PSSR_SPEED 0xC000
#define ALX_GIGA_PSSR_10MBS 0x0000
#define ALX_GIGA_PSSR_100MBS 0x4000
#define ALX_GIGA_PSSR_1000MBS 0x8000
/* PHY Interrupt Enable Register */
#define ALX_MII_IER 0x12
#define ALX_IER_LINK_UP 0x0400
#define ALX_IER_LINK_DOWN 0x0800
/* PHY Interrupt Status Register */
#define ALX_MII_ISR 0x13
#define ALX_MII_DBG_ADDR 0x1D
#define ALX_MII_DBG_DATA 0x1E
/***************************** debug port *************************************/
#define ALX_MIIDBG_ANACTRL 0x00
#define ALX_ANACTRL_DEF 0x02EF
#define ALX_MIIDBG_SYSMODCTRL 0x04
/* en half bias */
#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B
#define ALX_MIIDBG_SRDSYSMOD 0x05
#define ALX_SRDSYSMOD_DEEMP_EN 0x0040
#define ALX_SRDSYSMOD_DEF 0x2C46
#define ALX_MIIDBG_HIBNEG 0x0B
#define ALX_HIBNEG_PSHIB_EN 0x8000
#define ALX_HIBNEG_HIB_PSE 0x1000
#define ALX_HIBNEG_DEF 0xBC40
#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \
~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
#define ALX_MIIDBG_TST10BTCFG 0x12
#define ALX_TST10BTCFG_DEF 0x4C04
#define ALX_MIIDBG_AZ_ANADECT 0x15
#define ALX_AZ_ANADECT_DEF 0x3220
#define ALX_AZ_ANADECT_LONG 0x3210
#define ALX_MIIDBG_MSE16DB 0x18
#define ALX_MSE16DB_UP 0x05EA
#define ALX_MSE16DB_DOWN 0x02EA
#define ALX_MIIDBG_MSE20DB 0x1C
#define ALX_MSE20DB_TH_MASK 0x7F
#define ALX_MSE20DB_TH_SHIFT 2
#define ALX_MSE20DB_TH_DEF 0x2E
#define ALX_MSE20DB_TH_HI 0x54
#define ALX_MIIDBG_AGC 0x23
#define ALX_AGC_2_VGA_MASK 0x3FU
#define ALX_AGC_2_VGA_SHIFT 8
#define ALX_AGC_LONG1G_LIMT 40
#define ALX_AGC_LONG100M_LIMT 44
#define ALX_MIIDBG_LEGCYPS 0x29
#define ALX_LEGCYPS_EN 0x8000
#define ALX_LEGCYPS_DEF 0x129D
#define ALX_MIIDBG_TST100BTCFG 0x36
#define ALX_TST100BTCFG_DEF 0xE12C
#define ALX_MIIDBG_GREENCFG 0x3B
#define ALX_GREENCFG_DEF 0x7078
#define ALX_MIIDBG_GREENCFG2 0x3D
#define ALX_GREENCFG2_BP_GREEN 0x8000
#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080
/******* dev 3 *********/
#define ALX_MIIEXT_PCS 3
#define ALX_MIIEXT_CLDCTRL3 0x8003
#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
#define ALX_MIIEXT_CLDCTRL5 0x8005
#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000
#define ALX_MIIEXT_CLDCTRL6 0x8006
#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF
#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0
#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116
#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152
#define ALX_MIIEXT_VDRVBIAS 0x8062
#define ALX_VDRVBIAS_DEF 0x3
/********* dev 7 **********/
#define ALX_MIIEXT_ANEG 7
#define ALX_MIIEXT_LOCAL_EEEADV 0x3C
#define ALX_LOCAL_EEEADV_1000BT 0x0004
#define ALX_LOCAL_EEEADV_100BT 0x0002
#define ALX_MIIEXT_AFE 0x801A
#define ALX_AFE_10BT_100M_TH 0x0040
#define ALX_MIIEXT_S3DIG10 0x8023
/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
#define ALX_MIIEXT_S3DIG10_SL 0x0001
#define ALX_MIIEXT_S3DIG10_DEF 0
#define ALX_MIIEXT_NLP78 0x8027
#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05
#endif

View File

@ -744,6 +744,9 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
status = tg3_ape_read32(tp, gnt + off); status = tg3_ape_read32(tp, gnt + off);
if (status == bit) if (status == bit)
break; break;
if (pci_channel_offline(tp->pdev))
break;
udelay(10); udelay(10);
} }
@ -1635,6 +1638,9 @@ static void tg3_wait_for_event_ack(struct tg3 *tp)
for (i = 0; i < delay_cnt; i++) { for (i = 0; i < delay_cnt; i++) {
if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
break; break;
if (pci_channel_offline(tp->pdev))
break;
udelay(8); udelay(8);
} }
} }
@ -1813,6 +1819,9 @@ static int tg3_poll_fw(struct tg3 *tp)
for (i = 0; i < 200; i++) { for (i = 0; i < 200; i++) {
if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
return 0; return 0;
if (pci_channel_offline(tp->pdev))
return -ENODEV;
udelay(100); udelay(100);
} }
return -ENODEV; return -ENODEV;
@ -1823,6 +1832,15 @@ static int tg3_poll_fw(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
break; break;
if (pci_channel_offline(tp->pdev)) {
if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
tg3_flag_set(tp, NO_FWARE_REPORTED);
netdev_info(tp->dev, "No firmware running\n");
}
break;
}
udelay(10); udelay(10);
} }
@ -3520,6 +3538,8 @@ static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
break; break;
if (pci_channel_offline(tp->pdev))
return -EBUSY;
} }
return (i == iters) ? -EBUSY : 0; return (i == iters) ? -EBUSY : 0;
@ -8589,6 +8609,14 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, boo
tw32_f(ofs, val); tw32_f(ofs, val);
for (i = 0; i < MAX_WAIT_CNT; i++) { for (i = 0; i < MAX_WAIT_CNT; i++) {
if (pci_channel_offline(tp->pdev)) {
dev_err(&tp->pdev->dev,
"tg3_stop_block device offline, "
"ofs=%lx enable_bit=%x\n",
ofs, enable_bit);
return -ENODEV;
}
udelay(100); udelay(100);
val = tr32(ofs); val = tr32(ofs);
if ((val & enable_bit) == 0) if ((val & enable_bit) == 0)
@ -8612,6 +8640,13 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
tg3_disable_ints(tp); tg3_disable_ints(tp);
if (pci_channel_offline(tp->pdev)) {
tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
err = -ENODEV;
goto err_no_dev;
}
tp->rx_mode &= ~RX_MODE_ENABLE; tp->rx_mode &= ~RX_MODE_ENABLE;
tw32_f(MAC_RX_MODE, tp->rx_mode); tw32_f(MAC_RX_MODE, tp->rx_mode);
udelay(10); udelay(10);
@ -8660,6 +8695,7 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
err_no_dev:
for (i = 0; i < tp->irq_cnt; i++) { for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_napi *tnapi = &tp->napi[i];
if (tnapi->hw_status) if (tnapi->hw_status)

View File

@ -516,6 +516,7 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set MII speed */ /* Set MII speed */
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
#if !defined(CONFIG_M5272)
/* set RX checksum */ /* set RX checksum */
val = readl(fep->hwp + FEC_RACC); val = readl(fep->hwp + FEC_RACC);
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@ -523,6 +524,7 @@ fec_restart(struct net_device *ndev, int duplex)
else else
val &= ~FEC_RACC_OPTIONS; val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC); writel(val, fep->hwp + FEC_RACC);
#endif
/* /*
* The phy interface and speed need to get configured * The phy interface and speed need to get configured
@ -575,6 +577,7 @@ fec_restart(struct net_device *ndev, int duplex)
#endif #endif
} }
#if !defined(CONFIG_M5272)
/* enable pause frame*/ /* enable pause frame*/
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
@ -592,6 +595,7 @@ fec_restart(struct net_device *ndev, int duplex)
} else { } else {
rcntl &= ~FEC_ENET_FCE; rcntl &= ~FEC_ENET_FCE;
} }
#endif /* !defined(CONFIG_M5272) */
writel(rcntl, fep->hwp + FEC_R_CNTRL); writel(rcntl, fep->hwp + FEC_R_CNTRL);
@ -1205,7 +1209,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
/* mask with MAC supported features */ /* mask with MAC supported features */
if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->supported &= PHY_GBIT_FEATURES;
#if !defined(CONFIG_M5272)
phy_dev->supported |= SUPPORTED_Pause; phy_dev->supported |= SUPPORTED_Pause;
#endif
} }
else else
phy_dev->supported &= PHY_BASIC_FEATURES; phy_dev->supported &= PHY_BASIC_FEATURES;
@ -1390,6 +1396,8 @@ static int fec_enet_get_ts_info(struct net_device *ndev,
} }
} }
#if !defined(CONFIG_M5272)
static void fec_enet_get_pauseparam(struct net_device *ndev, static void fec_enet_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause) struct ethtool_pauseparam *pause)
{ {
@ -1436,9 +1444,13 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
return 0; return 0;
} }
#endif /* !defined(CONFIG_M5272) */
static const struct ethtool_ops fec_enet_ethtool_ops = { static const struct ethtool_ops fec_enet_ethtool_ops = {
#if !defined(CONFIG_M5272)
.get_pauseparam = fec_enet_get_pauseparam, .get_pauseparam = fec_enet_get_pauseparam,
.set_pauseparam = fec_enet_set_pauseparam, .set_pauseparam = fec_enet_set_pauseparam,
#endif
.get_settings = fec_enet_get_settings, .get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings, .set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo, .get_drvinfo = fec_enet_get_drvinfo,
@ -1874,10 +1886,12 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */ /* setup board info structure */
fep = netdev_priv(ndev); fep = netdev_priv(ndev);
#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */ /* default enable pause frame auto negotiation */
if (pdev->id_entry && if (pdev->id_entry &&
(pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
fep->hwp = devm_request_and_ioremap(&pdev->dev, r); fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
fep->pdev = pdev; fep->pdev = pdev;

View File

@ -1757,7 +1757,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
memset(rxq->rx_desc_area, 0, size); memset(rxq->rx_desc_area, 0, size);
rxq->rx_desc_area_size = size; rxq->rx_desc_area_size = size;
rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb), rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
GFP_KERNEL); GFP_KERNEL);
if (rxq->rx_skb == NULL) if (rxq->rx_skb == NULL)
goto out_free; goto out_free;

View File

@ -1015,7 +1015,7 @@ static int rxq_init(struct net_device *dev)
int rx_desc_num = pep->rx_ring_size; int rx_desc_num = pep->rx_ring_size;
/* Allocate RX skb rings */ /* Allocate RX skb rings */
pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
GFP_KERNEL); GFP_KERNEL);
if (!pep->rx_skb) if (!pep->rx_skb)
return -ENOMEM; return -ENOMEM;
@ -1076,7 +1076,7 @@ static int txq_init(struct net_device *dev)
int size = 0, i = 0; int size = 0, i = 0;
int tx_desc_num = pep->tx_ring_size; int tx_desc_num = pep->tx_ring_size;
pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
GFP_KERNEL); GFP_KERNEL);
if (!pep->tx_skb) if (!pep->tx_skb)
return -ENOMEM; return -ENOMEM;

View File

@ -632,6 +632,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.cqe_size = 32; dev->caps.cqe_size = 32;
} }
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
slave_adjust_steering_mode(dev, &dev_cap, &hca_param); slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
return 0; return 0;

View File

@ -46,17 +46,25 @@
union mgmt_port_ring_entry { union mgmt_port_ring_entry {
u64 d64; u64 d64;
struct { struct {
u64 reserved_62_63:2;
/* Length of the buffer/packet in bytes */
u64 len:14;
/* For TX, signals that the packet should be timestamped */
u64 tstamp:1;
/* The RX error code */
u64 code:7;
#define RING_ENTRY_CODE_DONE 0xf #define RING_ENTRY_CODE_DONE 0xf
#define RING_ENTRY_CODE_MORE 0x10 #define RING_ENTRY_CODE_MORE 0x10
#ifdef __BIG_ENDIAN_BITFIELD
u64 reserved_62_63:2;
/* Length of the buffer/packet in bytes */
u64 len:14;
/* For TX, signals that the packet should be timestamped */
u64 tstamp:1;
/* The RX error code */
u64 code:7;
/* Physical address of the buffer */ /* Physical address of the buffer */
u64 addr:40; u64 addr:40;
#else
u64 addr:40;
u64 code:7;
u64 tstamp:1;
u64 len:14;
u64 reserved_62_63:2;
#endif
} s; } s;
}; };
@ -1141,10 +1149,13 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* For compensation state to lock. */ /* For compensation state to lock. */
ndelay(1040 * NS_PER_PHY_CLK); ndelay(1040 * NS_PER_PHY_CLK);
/* Some Ethernet switches cannot handle standard /* Default Interframe Gaps are too small. Recommended
* Interframe Gap, increase to 16 bytes. * workaround is.
*
* AGL_GMX_TX_IFG[IFG1]=14
* AGL_GMX_TX_IFG[IFG2]=10
*/ */
cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88); cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
} }
octeon_mgmt_rx_fill_ring(netdev); octeon_mgmt_rx_fill_ring(netdev);

View File

@ -642,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
qlcnic_83xx_config_intrpt(adapter, 0); qlcnic_83xx_config_intrpt(adapter, 0);
} }
/* Allow dma queues to drain after context reset */ /* Allow dma queues to drain after context reset */
msleep(20); mdelay(20);
} }
} }

View File

@ -380,8 +380,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = 0x01ff009f, .eesipr_value = 0x01ff009f,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
.tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
.apr = 1, .apr = 1,
@ -427,8 +428,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
.tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
.apr = 1, .apr = 1,
@ -478,8 +480,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.rmcr_value = 0x00000001, .rmcr_value = 0x00000001,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
.tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
.apr = 1, .apr = 1,
@ -592,9 +595,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC, .tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_ECI, EESR_TDE | EESR_ECI,
.tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
EESR_TFE, EESR_TFE,
.fdr_value = 0x0000072f, .fdr_value = 0x0000072f,
@ -674,9 +677,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC, .tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_ECI, EESR_TDE | EESR_ECI,
.tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
EESR_TFE, EESR_TFE,
@ -811,9 +814,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC, .tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_ECI, EESR_TDE | EESR_ECI,
.tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
EESR_TFE, EESR_TFE,
@ -1549,11 +1552,12 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
ignore_link: ignore_link:
if (intr_status & EESR_TWB) { if (intr_status & EESR_TWB) {
/* Write buck end. unused write back interrupt */ /* Unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */ if (intr_status & EESR_TABT) { /* Transmit Abort int */
ndev->stats.tx_aborted_errors++; ndev->stats.tx_aborted_errors++;
if (netif_msg_tx_err(mdp)) if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Transmit Abort\n"); dev_err(&ndev->dev, "Transmit Abort\n");
}
} }
if (intr_status & EESR_RABT) { if (intr_status & EESR_RABT) {

View File

@ -253,7 +253,7 @@ enum EESR_BIT {
#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
EESR_RTO) EESR_RTO)
#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
EESR_RDE | EESR_RFRMER | EESR_ADE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \
EESR_TFE | EESR_TDE | EESR_ECI) EESR_TFE | EESR_TDE | EESR_ECI)
#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ #define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \

View File

@ -2139,7 +2139,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", efx->phy_type); return sprintf(buf, "%d\n", efx->phy_type);
} }
static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
static int efx_register_netdev(struct efx_nic *efx) static int efx_register_netdev(struct efx_nic *efx)
{ {

View File

@ -297,8 +297,8 @@ struct dma_features {
#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ #define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
/* Default LPI timers */ /* Default LPI timers */
#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 #define STMMAC_DEFAULT_LIT_LS 0x3E8
#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 #define STMMAC_DEFAULT_TWT_LS 0x0
#define STMMAC_CHAIN_MODE 0x1 #define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2 #define STMMAC_RING_MODE 0x2

View File

@ -130,7 +130,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
module_param(eee_timer, int, S_IRUGO | S_IWUSR); module_param(eee_timer, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
/* By default the driver will use the ring mode to manage tx and rx descriptors /* By default the driver will use the ring mode to manage tx and rx descriptors
* but passing this value so user can force to use the chain instead of the ring * but passing this value so user can force to use the chain instead of the ring
@ -288,7 +288,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
struct stmmac_priv *priv = (struct stmmac_priv *)arg; struct stmmac_priv *priv = (struct stmmac_priv *)arg;
stmmac_enable_eee_mode(priv); stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
} }
/** /**
@ -304,22 +304,34 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
{ {
bool ret = false; bool ret = false;
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
(priv->pcs == STMMAC_PCS_RTBI))
goto out;
/* MAC core supports the EEE feature. */ /* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) { if (priv->dma_cap.eee) {
/* Check if the PHY supports EEE */ /* Check if the PHY supports EEE */
if (phy_init_eee(priv->phydev, 1)) if (phy_init_eee(priv->phydev, 1))
goto out; goto out;
priv->eee_active = 1; if (!priv->eee_active) {
init_timer(&priv->eee_ctrl_timer); priv->eee_active = 1;
priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; init_timer(&priv->eee_ctrl_timer);
priv->eee_ctrl_timer.data = (unsigned long)priv; priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer); priv->eee_ctrl_timer.data = (unsigned long)priv;
add_timer(&priv->eee_ctrl_timer); priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
add_timer(&priv->eee_ctrl_timer);
priv->hw->mac->set_eee_timer(priv->ioaddr, priv->hw->mac->set_eee_timer(priv->ioaddr,
STMMAC_DEFAULT_LIT_LS_TIMER, STMMAC_DEFAULT_LIT_LS,
priv->tx_lpi_timer); priv->tx_lpi_timer);
} else
/* Set HW EEE according to the speed */
priv->hw->mac->set_eee_pls(priv->ioaddr,
priv->phydev->link);
pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
@ -329,20 +341,6 @@ out:
return ret; return ret;
} }
/**
* stmmac_eee_adjust: adjust HW EEE according to the speed
* @priv: driver private structure
* Description:
* When the EEE has been already initialised we have to
* modify the PLS bit in the LPI ctrl & status reg according
* to the PHY link status. For this reason.
*/
static void stmmac_eee_adjust(struct stmmac_priv *priv)
{
if (priv->eee_enabled)
priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
}
/* stmmac_get_tx_hwtstamp: get HW TX timestamps /* stmmac_get_tx_hwtstamp: get HW TX timestamps
* @priv: driver private structure * @priv: driver private structure
* @entry : descriptor index to be used. * @entry : descriptor index to be used.
@ -769,7 +767,10 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv)) if (new_state && netif_msg_link(priv))
phy_print_status(phydev); phy_print_status(phydev);
stmmac_eee_adjust(priv); /* At this stage, it could be needed to setup the EEE or adjust some
* MAC related HW registers.
*/
priv->eee_enabled = stmmac_eee_init(priv);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
@ -1277,7 +1278,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
stmmac_enable_eee_mode(priv); stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
} }
spin_unlock(&priv->tx_lock); spin_unlock(&priv->tx_lock);
} }
@ -1671,14 +1672,9 @@ static int stmmac_open(struct net_device *dev)
if (priv->phydev) if (priv->phydev)
phy_start(priv->phydev); phy_start(priv->phydev);
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
/* Using PCS we cannot dial with the phy registers at this stage priv->eee_enabled = stmmac_eee_init(priv);
* so we do not support extra feature like EEE.
*/
if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
priv->pcs != STMMAC_PCS_RTBI)
priv->eee_enabled = stmmac_eee_init(priv);
stmmac_init_tx_coalesce(priv); stmmac_init_tx_coalesce(priv);

View File

@ -1679,7 +1679,7 @@ static int cpsw_probe(struct platform_device *pdev)
priv->rx_packet_max = max(rx_packet_max, 128); priv->rx_packet_max = max(rx_packet_max, 128);
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
priv->irq_enabled = true; priv->irq_enabled = true;
if (!ndev) { if (!priv->cpts) {
pr_err("error allocating cpts\n"); pr_err("error allocating cpts\n");
goto clean_ndev_ret; goto clean_ndev_ret;
} }
@ -1973,9 +1973,12 @@ static int cpsw_suspend(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev); struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) if (netif_running(ndev))
cpsw_ndo_stop(ndev); cpsw_ndo_stop(ndev);
soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset);
soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
pm_runtime_put_sync(&pdev->dev); pm_runtime_put_sync(&pdev->dev);
return 0; return 0;

View File

@ -705,6 +705,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
} }
buffer = dma_map_single(ctlr->dev, data, len, chan->dir); buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
ret = dma_mapping_error(ctlr->dev, buffer);
if (ret) {
cpdma_desc_free(ctlr->pool, desc, 1);
ret = -EINVAL;
goto unlock_ret;
}
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
cpdma_desc_to_port(chan, mode, directed); cpdma_desc_to_port(chan, mode, directed);

View File

@ -285,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb->protocol = eth_type_trans(skb, net); skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci); if (packet->vlan_tci & VLAN_TAG_PRESENT)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
packet->vlan_tci);
net->stats.rx_packets++; net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen; net->stats.rx_bytes += packet->total_data_buflen;

View File

@ -524,8 +524,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
return -EMSGSIZE; return -EMSGSIZE;
num_pages = get_user_pages_fast(base, size, 0, &page[i]); num_pages = get_user_pages_fast(base, size, 0, &page[i]);
if (num_pages != size) { if (num_pages != size) {
for (i = 0; i < num_pages; i++) int j;
put_page(page[i]);
for (j = 0; j < num_pages; j++)
put_page(page[i + j]);
return -EFAULT; return -EFAULT;
} }
truesize = size * PAGE_SIZE; truesize = size * PAGE_SIZE;

View File

@ -1010,8 +1010,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
return -EMSGSIZE; return -EMSGSIZE;
num_pages = get_user_pages_fast(base, size, 0, &page[i]); num_pages = get_user_pages_fast(base, size, 0, &page[i]);
if (num_pages != size) { if (num_pages != size) {
for (i = 0; i < num_pages; i++) int j;
put_page(page[i]);
for (j = 0; j < num_pages; j++)
put_page(page[i + j]);
return -EFAULT; return -EFAULT;
} }
truesize = size * PAGE_SIZE; truesize = size * PAGE_SIZE;

View File

@ -590,7 +590,13 @@ static const struct usb_device_id products[] = {
{QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
{QMI_GOBI1K_DEVICE(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */

View File

@ -565,18 +565,22 @@ skip:
/* Watch incoming packets to learn mapping between Ethernet address /* Watch incoming packets to learn mapping between Ethernet address
* and Tunnel endpoint. * and Tunnel endpoint.
* Return true if packet is bogus and should be droppped.
*/ */
static void vxlan_snoop(struct net_device *dev, static bool vxlan_snoop(struct net_device *dev,
__be32 src_ip, const u8 *src_mac) __be32 src_ip, const u8 *src_mac)
{ {
struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f; struct vxlan_fdb *f;
int err;
f = vxlan_find_mac(vxlan, src_mac); f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) { if (likely(f)) {
if (likely(f->remote.remote_ip == src_ip)) if (likely(f->remote.remote_ip == src_ip))
return; return false;
/* Don't migrate static entries, drop packets */
if (f->state & NUD_NOARP)
return true;
if (net_ratelimit()) if (net_ratelimit())
netdev_info(dev, netdev_info(dev,
@ -588,14 +592,19 @@ static void vxlan_snoop(struct net_device *dev,
} else { } else {
/* learned new entry */ /* learned new entry */
spin_lock(&vxlan->hash_lock); spin_lock(&vxlan->hash_lock);
err = vxlan_fdb_create(vxlan, src_mac, src_ip,
NUD_REACHABLE, /* close off race between vxlan_flush and incoming packets */
NLM_F_EXCL|NLM_F_CREATE, if (netif_running(dev))
vxlan->dst_port, vxlan_fdb_create(vxlan, src_mac, src_ip,
vxlan->default_dst.remote_vni, NUD_REACHABLE,
0, NTF_SELF); NLM_F_EXCL|NLM_F_CREATE,
vxlan->dst_port,
vxlan->default_dst.remote_vni,
0, NTF_SELF);
spin_unlock(&vxlan->hash_lock); spin_unlock(&vxlan->hash_lock);
} }
return false;
} }
@ -727,8 +736,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
vxlan->dev->dev_addr) == 0) vxlan->dev->dev_addr) == 0)
goto drop; goto drop;
if (vxlan->flags & VXLAN_F_LEARN) if ((vxlan->flags & VXLAN_F_LEARN) &&
vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
goto drop;
__skb_tunnel_rx(skb, vxlan->dev); __skb_tunnel_rx(skb, vxlan->dev);
skb_reset_network_header(skb); skb_reset_network_header(skb);
@ -1151,9 +1161,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *skb1; struct sk_buff *skb1;
skb1 = skb_clone(skb, GFP_ATOMIC); skb1 = skb_clone(skb, GFP_ATOMIC);
rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); if (skb1) {
if (rc == NETDEV_TX_OK) rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
rc = rc1; if (rc == NETDEV_TX_OK)
rc = rc1;
}
} }
rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);

View File

@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci)
struct frad_local *flp; struct frad_local *flp;
struct net_device *master, *slave; struct net_device *master, *slave;
int err; int err;
bool found = false;
rtnl_lock();
/* validate slave device */ /* validate slave device */
master = __dev_get_by_name(&init_net, dlci->devname); master = __dev_get_by_name(&init_net, dlci->devname);
if (!master) if (!master) {
return -ENODEV; err = -ENODEV;
goto out;
}
list_for_each_entry(dlp, &dlci_devs, list) {
if (dlp->master == master) {
found = true;
break;
}
}
if (!found) {
err = -ENODEV;
goto out;
}
if (netif_running(master)) { if (netif_running(master)) {
return -EBUSY; err = -EBUSY;
goto out;
} }
dlp = netdev_priv(master); dlp = netdev_priv(master);
slave = dlp->slave; slave = dlp->slave;
flp = netdev_priv(slave); flp = netdev_priv(slave);
rtnl_lock();
err = (*flp->deassoc)(slave, master); err = (*flp->deassoc)(slave, master);
if (!err) { if (!err) {
list_del(&dlp->list); list_del(&dlp->list);
@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci)
dev_put(slave); dev_put(slave);
} }
out:
rtnl_unlock(); rtnl_unlock();
return err; return err;
} }

View File

@ -1174,7 +1174,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&priv->htc_pm_lock); mutex_lock(&priv->htc_pm_lock);
priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
if (priv->ps_idle) if (!priv->ps_idle)
chip_reset = true; chip_reset = true;
mutex_unlock(&priv->htc_pm_lock); mutex_unlock(&priv->htc_pm_lock);

View File

@ -1570,6 +1570,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return; return;
rcu_read_lock();
ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
@ -1608,8 +1610,10 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
if (ac == last_ac || if (ac == last_ac ||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return; break;
} }
rcu_read_unlock();
} }
/***********/ /***********/

View File

@ -930,6 +930,10 @@ fail:
brcmf_fws_del_interface(ifp); brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr); brcmf_fws_deinit(drvr);
} }
if (drvr->iflist[0]) {
free_netdev(ifp->ndev);
drvr->iflist[0] = NULL;
}
if (p2p_ifp) { if (p2p_ifp) {
free_netdev(p2p_ifp->ndev); free_netdev(p2p_ifp->ndev);
drvr->iflist[1] = NULL; drvr->iflist[1] = NULL;

View File

@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
*/ */
static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
{ {
/* disallow PS when one of the following global conditions meets */ /* not supporting PS so always return false for now */
if (!wlc->pub->associated) return false;
return false;
/* disallow PS when one of these meets when not scanning */
if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
return false;
if (wlc->bsscfg->type == BRCMS_TYPE_AP)
return false;
if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
return false;
return true;
} }
static void brcms_c_statsupd(struct brcms_c_info *wlc) static void brcms_c_statsupd(struct brcms_c_info *wlc)

View File

@ -816,6 +816,7 @@ out:
rs_sta->last_txrate_idx = idx; rs_sta->last_txrate_idx = idx;
info->control.rates[0].idx = rs_sta->last_txrate_idx; info->control.rates[0].idx = rs_sta->last_txrate_idx;
} }
info->control.rates[0].count = 1;
D_RATE("leave: %d\n", idx); D_RATE("leave: %d\n", idx);
} }

View File

@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
info->control.rates[0].flags = 0; info->control.rates[0].flags = 0;
} }
info->control.rates[0].idx = rate_idx; info->control.rates[0].idx = rate_idx;
info->control.rates[0].count = 1;
} }
static void * static void *

View File

@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
info->control.rates[0].flags = 0; info->control.rates[0].flags = 0;
} }
info->control.rates[0].idx = rate_idx; info->control.rates[0].idx = rate_idx;
info->control.rates[0].count = 1;
} }
static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,

View File

@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
struct iwl_chain_noise_data *data = &priv->chain_noise_data; struct iwl_chain_noise_data *data = &priv->chain_noise_data;
int ret; int ret;
if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)) if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
return; return;
if ((data->state == IWL_CHAIN_NOISE_ALIVE) && if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&

View File

@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
*/ */
if (load_module) { if (load_module) {
err = request_module("%s", op->name); err = request_module("%s", op->name);
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
if (err) if (err)
IWL_ERR(drv, IWL_ERR(drv,
"failed to load module %s (error %d), is dynamic loading enabled?\n", "failed to load module %s (error %d), is dynamic loading enabled?\n",
op->name, err); op->name, err);
#endif
} }
return; return;

View File

@ -2546,6 +2546,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
info->control.rates[0].flags = 0; info->control.rates[0].flags = 0;
} }
info->control.rates[0].idx = rate_idx; info->control.rates[0].idx = rate_idx;
info->control.rates[0].count = 1;
} }
static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,

View File

@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
return; return;
} else if (ieee80211_is_back_req(fc)) { } else if (ieee80211_is_back_req(fc)) {
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); tx_cmd->tx_flags |=
cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
} }
/* HT rate doesn't make sense for a non data frame */ /* HT rate doesn't make sense for a non data frame */

View File

@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
* TODO: we do not use +6 dBm option to do not increase power beyond * TODO: we do not use +6 dBm option to do not increase power beyond
* regulatory limit, however this could be utilized for devices with * regulatory limit, however this could be utilized for devices with
* CAPABILITY_POWER_LIMIT. * CAPABILITY_POWER_LIMIT.
*
* TODO: add different temperature compensation code for RT3290 & RT5390
* to allow to use BBP_R1 for those chips.
*/ */
rt2800_bbp_read(rt2x00dev, 1, &r1); if (!rt2x00_rt(rt2x00dev, RT3290) &&
if (delta <= -12) { !rt2x00_rt(rt2x00dev, RT5390)) {
power_ctrl = 2; rt2800_bbp_read(rt2x00dev, 1, &r1);
delta += 12; if (delta <= -12) {
} else if (delta <= -6) { power_ctrl = 2;
power_ctrl = 1; delta += 12;
delta += 6; } else if (delta <= -6) {
} else { power_ctrl = 1;
power_ctrl = 0; delta += 6;
} else {
power_ctrl = 0;
}
rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
rt2800_bbp_write(rt2x00dev, 1, r1);
} }
rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
rt2800_bbp_write(rt2x00dev, 1, r1);
offset = TX_PWR_CFG_0; offset = TX_PWR_CFG_0;
for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {

View File

@ -61,6 +61,7 @@ static DEFINE_MUTEX(bridge_mutex);
static void handle_hotplug_event_bridge (acpi_handle, u32, void *); static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
static void acpiphp_sanitize_bus(struct pci_bus *bus); static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus); static void acpiphp_set_hpp_values(struct pci_bus *bus);
static void hotplug_event_func(acpi_handle handle, u32 type, void *context);
static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
static void free_bridge(struct kref *kref); static void free_bridge(struct kref *kref);
@ -147,7 +148,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
static const struct acpi_dock_ops acpiphp_dock_ops = { static const struct acpi_dock_ops acpiphp_dock_ops = {
.handler = handle_hotplug_event_func, .handler = hotplug_event_func,
}; };
/* Check whether the PCI device is managed by native PCIe hotplug driver */ /* Check whether the PCI device is managed by native PCIe hotplug driver */
@ -179,6 +180,20 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
return true; return true;
} }
static void acpiphp_dock_init(void *data)
{
struct acpiphp_func *func = data;
get_bridge(func->slot->bridge);
}
static void acpiphp_dock_release(void *data)
{
struct acpiphp_func *func = data;
put_bridge(func->slot->bridge);
}
/* callback routine to register each ACPI PCI slot object */ /* callback routine to register each ACPI PCI slot object */
static acpi_status static acpi_status
register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
@ -298,7 +313,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
*/ */
newfunc->flags &= ~FUNC_HAS_EJ0; newfunc->flags &= ~FUNC_HAS_EJ0;
if (register_hotplug_dock_device(handle, if (register_hotplug_dock_device(handle,
&acpiphp_dock_ops, newfunc)) &acpiphp_dock_ops, newfunc,
acpiphp_dock_init, acpiphp_dock_release))
dbg("failed to register dock device\n"); dbg("failed to register dock device\n");
/* we need to be notified when dock events happen /* we need to be notified when dock events happen
@ -670,6 +686,7 @@ static int __ref enable_device(struct acpiphp_slot *slot)
struct pci_bus *bus = slot->bridge->pci_bus; struct pci_bus *bus = slot->bridge->pci_bus;
struct acpiphp_func *func; struct acpiphp_func *func;
int num, max, pass; int num, max, pass;
LIST_HEAD(add_list);
if (slot->flags & SLOT_ENABLED) if (slot->flags & SLOT_ENABLED)
goto err_exit; goto err_exit;
@ -694,13 +711,15 @@ static int __ref enable_device(struct acpiphp_slot *slot)
max = pci_scan_bridge(bus, dev, max, pass); max = pci_scan_bridge(bus, dev, max, pass);
if (pass && dev->subordinate) { if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev); check_hotplug_bridge(slot, dev);
pci_bus_size_bridges(dev->subordinate); pcibios_resource_survey_bus(dev->subordinate);
__pci_bus_size_bridges(dev->subordinate,
&add_list);
} }
} }
} }
} }
pci_bus_assign_resources(bus); __pci_bus_assign_resources(bus, &add_list, NULL);
acpiphp_sanitize_bus(bus); acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus); acpiphp_set_hpp_values(bus);
acpiphp_set_acpi_region(slot); acpiphp_set_acpi_region(slot);
@ -1065,22 +1084,12 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge); alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
} }
static void _handle_hotplug_event_func(struct work_struct *work) static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
{ {
struct acpiphp_func *func; struct acpiphp_func *func = context;
char objname[64]; char objname[64];
struct acpi_buffer buffer = { .length = sizeof(objname), struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname }; .pointer = objname };
struct acpi_hp_work *hp_work;
acpi_handle handle;
u32 type;
hp_work = container_of(work, struct acpi_hp_work, work);
handle = hp_work->handle;
type = hp_work->type;
func = (struct acpiphp_func *)hp_work->context;
acpi_scan_lock_acquire();
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
@ -1113,6 +1122,18 @@ static void _handle_hotplug_event_func(struct work_struct *work)
warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
break; break;
} }
}
static void _handle_hotplug_event_func(struct work_struct *work)
{
struct acpi_hp_work *hp_work;
struct acpiphp_func *func;
hp_work = container_of(work, struct acpi_hp_work, work);
func = hp_work->context;
acpi_scan_lock_acquire();
hotplug_event_func(hp_work->handle, hp_work->type, func);
acpi_scan_lock_release(); acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_func */ kfree(hp_work); /* allocated in handle_hotplug_event_func */

View File

@ -202,6 +202,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg); struct resource *res, unsigned int reg);
int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
void pci_configure_ari(struct pci_dev *dev); void pci_configure_ari(struct pci_dev *dev);
void __ref __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head);
/** /**
* pci_ari_enabled - query ARI forwarding status * pci_ari_enabled - query ARI forwarding status

View File

@ -1044,7 +1044,7 @@ handle_done:
; ;
} }
static void __ref __pci_bus_size_bridges(struct pci_bus *bus, void __ref __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head) struct list_head *realloc_head)
{ {
struct pci_dev *dev; struct pci_dev *dev;
@ -1115,9 +1115,9 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
} }
EXPORT_SYMBOL(pci_bus_size_bridges); EXPORT_SYMBOL(pci_bus_size_bridges);
static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head, struct list_head *realloc_head,
struct list_head *fail_head) struct list_head *fail_head)
{ {
struct pci_bus *b; struct pci_bus *b;
struct pci_dev *dev; struct pci_dev *dev;

View File

@ -439,7 +439,7 @@ static int tps6586x_regulator_remove(struct platform_device *pdev)
static struct platform_driver tps6586x_regulator_driver = { static struct platform_driver tps6586x_regulator_driver = {
.driver = { .driver = {
.name = "tps6586x-pmic", .name = "tps6586x-regulator",
.owner = THIS_MODULE, .owner = THIS_MODULE,
}, },
.probe = tps6586x_regulator_probe, .probe = tps6586x_regulator_probe,

View File

@ -1656,9 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
skb->vlan_tci = VLAN_TAG_PRESENT | /* must set skb->dev before calling vlan_put_tag */
vlan_dev_vlan_id(fcoe->netdev);
skb->dev = fcoe->realdev; skb->dev = fcoe->realdev;
skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_dev_vlan_id(fcoe->netdev));
if (!skb)
return -ENOMEM;
} else } else
skb->dev = fcoe->netdev; skb->dev = fcoe->netdev;

View File

@ -1548,9 +1548,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
{ {
struct fcoe_fcf *fcf; struct fcoe_fcf *fcf;
struct fcoe_fcf *best = fip->sel_fcf; struct fcoe_fcf *best = fip->sel_fcf;
struct fcoe_fcf *first;
first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list);
list_for_each_entry(fcf, &fip->fcfs, list) { list_for_each_entry(fcf, &fip->fcfs, list) {
LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
@ -1568,17 +1565,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
"" : "un"); "" : "un");
continue; continue;
} }
if (fcf->fabric_name != first->fabric_name || if (!best || fcf->pri < best->pri || best->flogi_sent)
fcf->vfid != first->vfid || best = fcf;
fcf->fc_map != first->fc_map) { if (fcf->fabric_name != best->fabric_name ||
fcf->vfid != best->vfid ||
fcf->fc_map != best->fc_map) {
LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
"or FC-MAP\n"); "or FC-MAP\n");
return NULL; return NULL;
} }
if (fcf->flogi_sent)
continue;
if (!best || fcf->pri < best->pri || best->flogi_sent)
best = fcf;
} }
fip->sel_fcf = best; fip->sel_fcf = best;
if (best) { if (best) {

View File

@ -8980,19 +8980,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->res_entries) if (!ioa_cfg->res_entries)
goto out; goto out;
if (ioa_cfg->sis64) {
ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
|| !ioa_cfg->vset_ids)
goto out_free_res_entries;
}
for (i = 0; i < ioa_cfg->max_devs_supported; i++) { for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
@ -9089,9 +9076,6 @@ out_free_vpd_cbs:
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
out_free_res_entries: out_free_res_entries:
kfree(ioa_cfg->res_entries); kfree(ioa_cfg->res_entries);
kfree(ioa_cfg->target_ids);
kfree(ioa_cfg->array_ids);
kfree(ioa_cfg->vset_ids);
goto out; goto out;
} }

View File

@ -1440,9 +1440,9 @@ struct ipr_ioa_cfg {
/* /*
* Bitmaps for SIS64 generated target values * Bitmaps for SIS64 generated target values
*/ */
unsigned long *target_ids; unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
unsigned long *array_ids; unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
unsigned long *vset_ids; unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
u16 type; /* CCIN of the card */ u16 type; /* CCIN of the card */

View File

@ -463,13 +463,7 @@ static void fc_exch_delete(struct fc_exch *ep)
fc_exch_release(ep); /* drop hold for exch in mp */ fc_exch_release(ep); /* drop hold for exch in mp */
} }
/** static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
* fc_seq_send() - Send a frame using existing sequence/exchange pair
* @lport: The local port that the exchange will be sent on
* @sp: The sequence to be sent
* @fp: The frame to be sent on the exchange
*/
static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
struct fc_frame *fp) struct fc_frame *fp)
{ {
struct fc_exch *ep; struct fc_exch *ep;
@ -479,7 +473,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
u8 fh_type = fh->fh_type; u8 fh_type = fh->fh_type;
ep = fc_seq_exch(sp); ep = fc_seq_exch(sp);
WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
f_ctl = ntoh24(fh->fh_f_ctl); f_ctl = ntoh24(fh->fh_f_ctl);
fc_exch_setup_hdr(ep, fp, f_ctl); fc_exch_setup_hdr(ep, fp, f_ctl);
@ -502,17 +496,34 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
error = lport->tt.frame_send(lport, fp); error = lport->tt.frame_send(lport, fp);
if (fh_type == FC_TYPE_BLS) if (fh_type == FC_TYPE_BLS)
return error; goto out;
/* /*
* Update the exchange and sequence flags, * Update the exchange and sequence flags,
* assuming all frames for the sequence have been sent. * assuming all frames for the sequence have been sent.
* We can only be called to send once for each sequence. * We can only be called to send once for each sequence.
*/ */
spin_lock_bh(&ep->ex_lock);
ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
if (f_ctl & FC_FC_SEQ_INIT) if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat &= ~ESB_ST_SEQ_INIT; ep->esb_stat &= ~ESB_ST_SEQ_INIT;
out:
return error;
}
/**
* fc_seq_send() - Send a frame using existing sequence/exchange pair
* @lport: The local port that the exchange will be sent on
* @sp: The sequence to be sent
* @fp: The frame to be sent on the exchange
*/
static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
struct fc_frame *fp)
{
struct fc_exch *ep;
int error;
ep = fc_seq_exch(sp);
spin_lock_bh(&ep->ex_lock);
error = fc_seq_send_locked(lport, sp, fp);
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
return error; return error;
} }
@ -629,7 +640,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
if (fp) { if (fp) {
fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
error = fc_seq_send(ep->lp, sp, fp); error = fc_seq_send_locked(ep->lp, sp, fp);
} else } else
error = -ENOBUFS; error = -ENOBUFS;
return error; return error;
@ -1132,7 +1143,7 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
f_ctl |= ep->f_ctl; f_ctl |= ep->f_ctl;
fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
fc_seq_send(ep->lp, sp, fp); fc_seq_send_locked(ep->lp, sp, fp);
} }
/** /**
@ -1307,8 +1318,8 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
ap->ba_low_seq_cnt = htons(sp->cnt); ap->ba_low_seq_cnt = htons(sp->cnt);
} }
sp = fc_seq_start_next_locked(sp); sp = fc_seq_start_next_locked(sp);
spin_unlock_bh(&ep->ex_lock);
fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
spin_unlock_bh(&ep->ex_lock);
fc_frame_free(rx_fp); fc_frame_free(rx_fp);
return; return;

Some files were not shown because too many files have changed in this diff Show More