2007-09-19 03:39:42 +08:00
|
|
|
/*
|
|
|
|
|
|
|
|
Broadcom B43 wireless driver
|
|
|
|
|
|
|
|
DMA ringbuffer and descriptor allocation/management
|
|
|
|
|
2011-07-05 02:50:05 +08:00
|
|
|
Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
Some code in this file is derived from the b44.c driver
|
|
|
|
Copyright (C) 2002 David S. Miller
|
|
|
|
Copyright (C) Pekka Pietikainen
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; see the file COPYING. If not, write to
|
|
|
|
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
|
|
|
|
Boston, MA 02110-1301, USA.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "b43.h"
|
|
|
|
#include "dma.h"
|
|
|
|
#include "main.h"
|
|
|
|
#include "debugfs.h"
|
|
|
|
#include "xmit.h"
|
|
|
|
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/skbuff.h>
|
2007-12-27 01:26:17 +08:00
|
|
|
#include <linux/etherdevice.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2008-03-07 22:50:02 +08:00
|
|
|
#include <asm/div64.h>
|
2007-12-27 01:26:17 +08:00
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2009-02-20 06:45:43 +08:00
|
|
|
/* Required number of TX DMA slots per TX frame.
|
|
|
|
* This currently is 2, because we put the header and the ieee80211 frame
|
|
|
|
* into separate slots. */
|
|
|
|
#define TX_SLOTS_PER_FRAME 2
|
|
|
|
|
2011-08-15 02:16:37 +08:00
|
|
|
static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
|
|
|
|
enum b43_addrtype addrtype)
|
|
|
|
{
|
|
|
|
u32 uninitialized_var(addr);
|
|
|
|
|
|
|
|
switch (addrtype) {
|
|
|
|
case B43_DMA_ADDR_LOW:
|
|
|
|
addr = lower_32_bits(dmaaddr);
|
|
|
|
if (dma->translation_in_low) {
|
|
|
|
addr &= ~SSB_DMA_TRANSLATION_MASK;
|
|
|
|
addr |= dma->translation;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case B43_DMA_ADDR_HIGH:
|
|
|
|
addr = upper_32_bits(dmaaddr);
|
|
|
|
if (!dma->translation_in_low) {
|
|
|
|
addr &= ~SSB_DMA_TRANSLATION_MASK;
|
|
|
|
addr |= dma->translation;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case B43_DMA_ADDR_EXT:
|
|
|
|
if (dma->translation_in_low)
|
|
|
|
addr = lower_32_bits(dmaaddr);
|
|
|
|
else
|
|
|
|
addr = upper_32_bits(dmaaddr);
|
|
|
|
addr &= SSB_DMA_TRANSLATION_MASK;
|
|
|
|
addr >>= SSB_DMA_TRANSLATION_SHIFT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
2009-02-20 06:45:43 +08:00
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
/* 32bit DMA ops. */
|
|
|
|
static
|
|
|
|
struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
|
|
|
|
int slot,
|
|
|
|
struct b43_dmadesc_meta **meta)
|
|
|
|
{
|
|
|
|
struct b43_dmadesc32 *desc;
|
|
|
|
|
|
|
|
*meta = &(ring->meta[slot]);
|
|
|
|
desc = ring->descbase;
|
|
|
|
desc = &(desc[slot]);
|
|
|
|
|
|
|
|
return (struct b43_dmadesc_generic *)desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op32_fill_descriptor(struct b43_dmaring *ring,
|
|
|
|
struct b43_dmadesc_generic *desc,
|
|
|
|
dma_addr_t dmaaddr, u16 bufsize,
|
|
|
|
int start, int end, int irq)
|
|
|
|
{
|
|
|
|
struct b43_dmadesc32 *descbase = ring->descbase;
|
|
|
|
int slot;
|
|
|
|
u32 ctl;
|
|
|
|
u32 addr;
|
|
|
|
u32 addrext;
|
|
|
|
|
|
|
|
slot = (int)(&(desc->dma32) - descbase);
|
|
|
|
B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
|
|
|
|
|
2011-08-15 02:16:37 +08:00
|
|
|
addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
|
|
|
|
addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
|
|
|
|
|
2009-02-20 06:39:26 +08:00
|
|
|
ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
|
2007-09-19 03:39:42 +08:00
|
|
|
if (slot == ring->nr_slots - 1)
|
|
|
|
ctl |= B43_DMA32_DCTL_DTABLEEND;
|
|
|
|
if (start)
|
|
|
|
ctl |= B43_DMA32_DCTL_FRAMESTART;
|
|
|
|
if (end)
|
|
|
|
ctl |= B43_DMA32_DCTL_FRAMEEND;
|
|
|
|
if (irq)
|
|
|
|
ctl |= B43_DMA32_DCTL_IRQ;
|
|
|
|
ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
|
|
|
|
& B43_DMA32_DCTL_ADDREXT_MASK;
|
|
|
|
|
|
|
|
desc->dma32.control = cpu_to_le32(ctl);
|
|
|
|
desc->dma32.address = cpu_to_le32(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op32_poke_tx(struct b43_dmaring *ring, int slot)
|
|
|
|
{
|
|
|
|
b43_dma_write(ring, B43_DMA32_TXINDEX,
|
|
|
|
(u32) (slot * sizeof(struct b43_dmadesc32)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op32_tx_suspend(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
|
|
|
|
| B43_DMA32_TXSUSPEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op32_tx_resume(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
|
|
|
|
& ~B43_DMA32_TXSUSPEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int op32_get_current_rxslot(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
|
|
|
|
val &= B43_DMA32_RXDPTR;
|
|
|
|
|
|
|
|
return (val / sizeof(struct b43_dmadesc32));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
|
|
|
|
{
|
|
|
|
b43_dma_write(ring, B43_DMA32_RXINDEX,
|
|
|
|
(u32) (slot * sizeof(struct b43_dmadesc32)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct b43_dma_ops dma32_ops = {
|
|
|
|
.idx2desc = op32_idx2desc,
|
|
|
|
.fill_descriptor = op32_fill_descriptor,
|
|
|
|
.poke_tx = op32_poke_tx,
|
|
|
|
.tx_suspend = op32_tx_suspend,
|
|
|
|
.tx_resume = op32_tx_resume,
|
|
|
|
.get_current_rxslot = op32_get_current_rxslot,
|
|
|
|
.set_current_rxslot = op32_set_current_rxslot,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* 64bit DMA ops. */
|
|
|
|
static
|
|
|
|
struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
|
|
|
|
int slot,
|
|
|
|
struct b43_dmadesc_meta **meta)
|
|
|
|
{
|
|
|
|
struct b43_dmadesc64 *desc;
|
|
|
|
|
|
|
|
*meta = &(ring->meta[slot]);
|
|
|
|
desc = ring->descbase;
|
|
|
|
desc = &(desc[slot]);
|
|
|
|
|
|
|
|
return (struct b43_dmadesc_generic *)desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op64_fill_descriptor(struct b43_dmaring *ring,
|
|
|
|
struct b43_dmadesc_generic *desc,
|
|
|
|
dma_addr_t dmaaddr, u16 bufsize,
|
|
|
|
int start, int end, int irq)
|
|
|
|
{
|
|
|
|
struct b43_dmadesc64 *descbase = ring->descbase;
|
|
|
|
int slot;
|
|
|
|
u32 ctl0 = 0, ctl1 = 0;
|
|
|
|
u32 addrlo, addrhi;
|
|
|
|
u32 addrext;
|
|
|
|
|
|
|
|
slot = (int)(&(desc->dma64) - descbase);
|
|
|
|
B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
|
|
|
|
|
2011-08-15 02:16:37 +08:00
|
|
|
addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
|
|
|
|
addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
|
|
|
|
addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
if (slot == ring->nr_slots - 1)
|
|
|
|
ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
|
|
|
|
if (start)
|
|
|
|
ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
|
|
|
|
if (end)
|
|
|
|
ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
|
|
|
|
if (irq)
|
|
|
|
ctl0 |= B43_DMA64_DCTL0_IRQ;
|
2009-02-20 06:39:26 +08:00
|
|
|
ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
|
2007-09-19 03:39:42 +08:00
|
|
|
ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
|
|
|
|
& B43_DMA64_DCTL1_ADDREXT_MASK;
|
|
|
|
|
|
|
|
desc->dma64.control0 = cpu_to_le32(ctl0);
|
|
|
|
desc->dma64.control1 = cpu_to_le32(ctl1);
|
|
|
|
desc->dma64.address_low = cpu_to_le32(addrlo);
|
|
|
|
desc->dma64.address_high = cpu_to_le32(addrhi);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op64_poke_tx(struct b43_dmaring *ring, int slot)
|
|
|
|
{
|
|
|
|
b43_dma_write(ring, B43_DMA64_TXINDEX,
|
|
|
|
(u32) (slot * sizeof(struct b43_dmadesc64)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op64_tx_suspend(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
|
|
|
|
| B43_DMA64_TXSUSPEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op64_tx_resume(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
|
|
|
|
& ~B43_DMA64_TXSUSPEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int op64_get_current_rxslot(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
|
|
|
|
val &= B43_DMA64_RXSTATDPTR;
|
|
|
|
|
|
|
|
return (val / sizeof(struct b43_dmadesc64));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
|
|
|
|
{
|
|
|
|
b43_dma_write(ring, B43_DMA64_RXINDEX,
|
|
|
|
(u32) (slot * sizeof(struct b43_dmadesc64)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct b43_dma_ops dma64_ops = {
|
|
|
|
.idx2desc = op64_idx2desc,
|
|
|
|
.fill_descriptor = op64_fill_descriptor,
|
|
|
|
.poke_tx = op64_poke_tx,
|
|
|
|
.tx_suspend = op64_tx_suspend,
|
|
|
|
.tx_resume = op64_tx_resume,
|
|
|
|
.get_current_rxslot = op64_get_current_rxslot,
|
|
|
|
.set_current_rxslot = op64_set_current_rxslot,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline int free_slots(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
return (ring->nr_slots - ring->used_slots);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int next_slot(struct b43_dmaring *ring, int slot)
|
|
|
|
{
|
|
|
|
B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
|
|
|
|
if (slot == ring->nr_slots - 1)
|
|
|
|
return 0;
|
|
|
|
return slot + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int prev_slot(struct b43_dmaring *ring, int slot)
|
|
|
|
{
|
|
|
|
B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
|
|
|
|
if (slot == 0)
|
|
|
|
return ring->nr_slots - 1;
|
|
|
|
return slot - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_B43_DEBUG
|
|
|
|
static void update_max_used_slots(struct b43_dmaring *ring,
|
|
|
|
int current_used_slots)
|
|
|
|
{
|
|
|
|
if (current_used_slots <= ring->max_used_slots)
|
|
|
|
return;
|
|
|
|
ring->max_used_slots = current_used_slots;
|
|
|
|
if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
|
|
|
|
b43dbg(ring->dev->wl,
|
|
|
|
"max_used_slots increased to %d on %s ring %d\n",
|
|
|
|
ring->max_used_slots,
|
|
|
|
ring->tx ? "TX" : "RX", ring->index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline
|
|
|
|
void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* DEBUG */
|
|
|
|
|
|
|
|
/* Request a slot for usage. */
|
|
|
|
static inline int request_slot(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
int slot;
|
|
|
|
|
|
|
|
B43_WARN_ON(!ring->tx);
|
|
|
|
B43_WARN_ON(ring->stopped);
|
|
|
|
B43_WARN_ON(free_slots(ring) == 0);
|
|
|
|
|
|
|
|
slot = next_slot(ring, ring->current_slot);
|
|
|
|
ring->current_slot = slot;
|
|
|
|
ring->used_slots++;
|
|
|
|
|
|
|
|
update_max_used_slots(ring, ring->used_slots);
|
|
|
|
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
2008-02-05 19:50:41 +08:00
|
|
|
static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
|
2007-09-19 03:39:42 +08:00
|
|
|
{
|
|
|
|
static const u16 map64[] = {
|
|
|
|
B43_MMIO_DMA64_BASE0,
|
|
|
|
B43_MMIO_DMA64_BASE1,
|
|
|
|
B43_MMIO_DMA64_BASE2,
|
|
|
|
B43_MMIO_DMA64_BASE3,
|
|
|
|
B43_MMIO_DMA64_BASE4,
|
|
|
|
B43_MMIO_DMA64_BASE5,
|
|
|
|
};
|
|
|
|
static const u16 map32[] = {
|
|
|
|
B43_MMIO_DMA32_BASE0,
|
|
|
|
B43_MMIO_DMA32_BASE1,
|
|
|
|
B43_MMIO_DMA32_BASE2,
|
|
|
|
B43_MMIO_DMA32_BASE3,
|
|
|
|
B43_MMIO_DMA32_BASE4,
|
|
|
|
B43_MMIO_DMA32_BASE5,
|
|
|
|
};
|
|
|
|
|
2008-02-05 19:50:41 +08:00
|
|
|
if (type == B43_DMA_64BIT) {
|
2007-09-19 03:39:42 +08:00
|
|
|
B43_WARN_ON(!(controller_idx >= 0 &&
|
|
|
|
controller_idx < ARRAY_SIZE(map64)));
|
|
|
|
return map64[controller_idx];
|
|
|
|
}
|
|
|
|
B43_WARN_ON(!(controller_idx >= 0 &&
|
|
|
|
controller_idx < ARRAY_SIZE(map32)));
|
|
|
|
return map32[controller_idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
dma_addr_t map_descbuffer(struct b43_dmaring *ring,
|
|
|
|
unsigned char *buf, size_t len, int tx)
|
|
|
|
{
|
|
|
|
dma_addr_t dmaaddr;
|
|
|
|
|
|
|
|
if (tx) {
|
2011-05-18 08:06:40 +08:00
|
|
|
dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
|
2010-06-04 10:37:36 +08:00
|
|
|
buf, len, DMA_TO_DEVICE);
|
2007-09-19 03:39:42 +08:00
|
|
|
} else {
|
2011-05-18 08:06:40 +08:00
|
|
|
dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
|
2010-06-04 10:37:36 +08:00
|
|
|
buf, len, DMA_FROM_DEVICE);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return dmaaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
void unmap_descbuffer(struct b43_dmaring *ring,
|
|
|
|
dma_addr_t addr, size_t len, int tx)
|
|
|
|
{
|
|
|
|
if (tx) {
|
2011-05-18 08:06:40 +08:00
|
|
|
dma_unmap_single(ring->dev->dev->dma_dev,
|
2010-06-04 10:37:36 +08:00
|
|
|
addr, len, DMA_TO_DEVICE);
|
2007-09-19 03:39:42 +08:00
|
|
|
} else {
|
2011-05-18 08:06:40 +08:00
|
|
|
dma_unmap_single(ring->dev->dev->dma_dev,
|
2010-06-04 10:37:36 +08:00
|
|
|
addr, len, DMA_FROM_DEVICE);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
|
|
|
|
dma_addr_t addr, size_t len)
|
|
|
|
{
|
|
|
|
B43_WARN_ON(ring->tx);
|
2011-05-18 08:06:40 +08:00
|
|
|
dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
|
2008-06-20 17:50:29 +08:00
|
|
|
addr, len, DMA_FROM_DEVICE);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
void sync_descbuffer_for_device(struct b43_dmaring *ring,
|
|
|
|
dma_addr_t addr, size_t len)
|
|
|
|
{
|
|
|
|
B43_WARN_ON(ring->tx);
|
2011-05-18 08:06:40 +08:00
|
|
|
dma_sync_single_for_device(ring->dev->dev->dma_dev,
|
2010-06-04 10:37:36 +08:00
|
|
|
addr, len, DMA_FROM_DEVICE);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
void free_descriptor_buffer(struct b43_dmaring *ring,
|
|
|
|
struct b43_dmadesc_meta *meta)
|
|
|
|
{
|
|
|
|
if (meta->skb) {
|
2012-12-11 00:40:21 +08:00
|
|
|
if (ring->tx)
|
|
|
|
ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
|
|
|
|
else
|
|
|
|
dev_kfree_skb_any(meta->skb);
|
2007-09-19 03:39:42 +08:00
|
|
|
meta->skb = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int alloc_ringmemory(struct b43_dmaring *ring)
|
|
|
|
{
|
2009-12-30 03:07:42 +08:00
|
|
|
/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
|
2011-08-27 02:41:39 +08:00
|
|
|
* alignment and 8K buffers for 64-bit DMA with 8K alignment.
|
|
|
|
* In practice we could use smaller buffers for the latter, but the
|
|
|
|
* alignment is really important because of the hardware bug. If bit
|
|
|
|
* 0x00001000 is used in DMA address, some hardware (like BCM4331)
|
|
|
|
* copies that bit into B43_DMA64_RXSTATUS and we get false values from
|
|
|
|
* B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
|
|
|
|
* more than 256 slots for ring.
|
2007-11-27 00:29:47 +08:00
|
|
|
*/
|
2011-08-27 02:41:39 +08:00
|
|
|
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
|
|
|
|
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
|
|
|
|
|
2013-08-27 13:45:23 +08:00
|
|
|
ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
|
|
|
|
ring_mem_size, &(ring->dmabase),
|
|
|
|
GFP_KERNEL);
|
2013-03-15 15:23:58 +08:00
|
|
|
if (!ring->descbase)
|
2009-11-19 03:53:05 +08:00
|
|
|
return -ENOMEM;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_ringmemory(struct b43_dmaring *ring)
|
|
|
|
{
|
2011-08-27 02:41:39 +08:00
|
|
|
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
|
|
|
|
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
|
|
|
|
dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
|
2010-06-04 10:37:36 +08:00
|
|
|
ring->descbase, ring->dmabase);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset the RX DMA channel */
|
2008-02-05 19:50:41 +08:00
|
|
|
static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
|
|
|
|
enum b43_dmatype type)
|
2007-09-19 03:39:42 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u32 value;
|
|
|
|
u16 offset;
|
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
|
2008-02-05 19:50:41 +08:00
|
|
|
offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
|
2007-09-19 03:39:42 +08:00
|
|
|
b43_write32(dev, mmio_base + offset, 0);
|
|
|
|
for (i = 0; i < 10; i++) {
|
2008-02-05 19:50:41 +08:00
|
|
|
offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
|
|
|
|
B43_DMA32_RXSTATUS;
|
2007-09-19 03:39:42 +08:00
|
|
|
value = b43_read32(dev, mmio_base + offset);
|
2008-02-05 19:50:41 +08:00
|
|
|
if (type == B43_DMA_64BIT) {
|
2007-09-19 03:39:42 +08:00
|
|
|
value &= B43_DMA64_RXSTAT;
|
|
|
|
if (value == B43_DMA64_RXSTAT_DISABLED) {
|
|
|
|
i = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
value &= B43_DMA32_RXSTATE;
|
|
|
|
if (value == B43_DMA32_RXSTAT_DISABLED) {
|
|
|
|
i = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
msleep(1);
|
|
|
|
}
|
|
|
|
if (i != -1) {
|
|
|
|
b43err(dev->wl, "DMA RX reset timed out\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-27 00:29:47 +08:00
|
|
|
/* Reset the TX DMA channel */
|
2008-02-05 19:50:41 +08:00
|
|
|
static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
|
|
|
|
enum b43_dmatype type)
|
2007-09-19 03:39:42 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u32 value;
|
|
|
|
u16 offset;
|
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
for (i = 0; i < 10; i++) {
|
2008-02-05 19:50:41 +08:00
|
|
|
offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
|
|
|
|
B43_DMA32_TXSTATUS;
|
2007-09-19 03:39:42 +08:00
|
|
|
value = b43_read32(dev, mmio_base + offset);
|
2008-02-05 19:50:41 +08:00
|
|
|
if (type == B43_DMA_64BIT) {
|
2007-09-19 03:39:42 +08:00
|
|
|
value &= B43_DMA64_TXSTAT;
|
|
|
|
if (value == B43_DMA64_TXSTAT_DISABLED ||
|
|
|
|
value == B43_DMA64_TXSTAT_IDLEWAIT ||
|
|
|
|
value == B43_DMA64_TXSTAT_STOPPED)
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
value &= B43_DMA32_TXSTATE;
|
|
|
|
if (value == B43_DMA32_TXSTAT_DISABLED ||
|
|
|
|
value == B43_DMA32_TXSTAT_IDLEWAIT ||
|
|
|
|
value == B43_DMA32_TXSTAT_STOPPED)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
msleep(1);
|
|
|
|
}
|
2008-02-05 19:50:41 +08:00
|
|
|
offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
|
2007-09-19 03:39:42 +08:00
|
|
|
b43_write32(dev, mmio_base + offset, 0);
|
|
|
|
for (i = 0; i < 10; i++) {
|
2008-02-05 19:50:41 +08:00
|
|
|
offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
|
|
|
|
B43_DMA32_TXSTATUS;
|
2007-09-19 03:39:42 +08:00
|
|
|
value = b43_read32(dev, mmio_base + offset);
|
2008-02-05 19:50:41 +08:00
|
|
|
if (type == B43_DMA_64BIT) {
|
2007-09-19 03:39:42 +08:00
|
|
|
value &= B43_DMA64_TXSTAT;
|
|
|
|
if (value == B43_DMA64_TXSTAT_DISABLED) {
|
|
|
|
i = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
value &= B43_DMA32_TXSTATE;
|
|
|
|
if (value == B43_DMA32_TXSTAT_DISABLED) {
|
|
|
|
i = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
msleep(1);
|
|
|
|
}
|
|
|
|
if (i != -1) {
|
|
|
|
b43err(dev->wl, "DMA TX reset timed out\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
/* ensure the reset is completed. */
|
|
|
|
msleep(1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-02-05 19:50:41 +08:00
|
|
|
/* Check if a DMA mapping address is invalid. */
|
|
|
|
static bool b43_dma_mapping_error(struct b43_dmaring *ring,
|
|
|
|
dma_addr_t addr,
|
2008-03-23 05:04:45 +08:00
|
|
|
size_t buffersize, bool dma_to_device)
|
2008-02-05 19:50:41 +08:00
|
|
|
{
|
2011-05-18 08:06:40 +08:00
|
|
|
if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
|
2008-02-05 19:50:41 +08:00
|
|
|
return 1;
|
|
|
|
|
2009-12-30 03:07:42 +08:00
|
|
|
switch (ring->type) {
|
|
|
|
case B43_DMA_30BIT:
|
|
|
|
if ((u64)addr + buffersize > (1ULL << 30))
|
|
|
|
goto address_error;
|
|
|
|
break;
|
|
|
|
case B43_DMA_32BIT:
|
|
|
|
if ((u64)addr + buffersize > (1ULL << 32))
|
|
|
|
goto address_error;
|
|
|
|
break;
|
|
|
|
case B43_DMA_64BIT:
|
|
|
|
/* Currently we can't have addresses beyond
|
|
|
|
* 64bit in the kernel. */
|
|
|
|
break;
|
2008-02-05 19:50:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The address is OK. */
|
|
|
|
return 0;
|
2009-12-30 03:07:42 +08:00
|
|
|
|
|
|
|
address_error:
|
|
|
|
/* We can't support this address. Unmap it again. */
|
|
|
|
unmap_descbuffer(ring, addr, buffersize, dma_to_device);
|
|
|
|
|
|
|
|
return 1;
|
2008-02-05 19:50:41 +08:00
|
|
|
}
|
|
|
|
|
2009-03-28 05:51:58 +08:00
|
|
|
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
unsigned char *f = skb->data + ring->frameoffset;
|
|
|
|
|
|
|
|
return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct b43_rxhdr_fw4 *rxhdr;
|
|
|
|
unsigned char *frame;
|
|
|
|
|
|
|
|
/* This poisons the RX buffer to detect DMA failures. */
|
|
|
|
|
|
|
|
rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
|
|
|
|
rxhdr->frame_len = 0;
|
|
|
|
|
|
|
|
B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
|
|
|
|
frame = skb->data + ring->frameoffset;
|
|
|
|
memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
|
|
|
|
}
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
static int setup_rx_descbuffer(struct b43_dmaring *ring,
|
|
|
|
struct b43_dmadesc_generic *desc,
|
|
|
|
struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
|
|
|
|
{
|
|
|
|
dma_addr_t dmaaddr;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
B43_WARN_ON(ring->tx);
|
|
|
|
|
|
|
|
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
|
|
|
|
if (unlikely(!skb))
|
|
|
|
return -ENOMEM;
|
2009-03-28 05:51:58 +08:00
|
|
|
b43_poison_rx_buffer(ring, skb);
|
2007-09-19 03:39:42 +08:00
|
|
|
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
|
2008-03-23 05:04:45 +08:00
|
|
|
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
|
2007-09-19 03:39:42 +08:00
|
|
|
/* ugh. try to realloc in zone_dma */
|
|
|
|
gfp_flags |= GFP_DMA;
|
|
|
|
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
|
|
|
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
|
|
|
|
if (unlikely(!skb))
|
|
|
|
return -ENOMEM;
|
2009-03-28 05:51:58 +08:00
|
|
|
b43_poison_rx_buffer(ring, skb);
|
2007-09-19 03:39:42 +08:00
|
|
|
dmaaddr = map_descbuffer(ring, skb->data,
|
|
|
|
ring->rx_buffersize, 0);
|
2009-02-20 06:45:43 +08:00
|
|
|
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
|
|
|
|
b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
meta->skb = skb;
|
|
|
|
meta->dmaaddr = dmaaddr;
|
|
|
|
ring->ops->fill_descriptor(ring, desc, dmaaddr,
|
|
|
|
ring->rx_buffersize, 0, 0, 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate the initial descbuffers.
|
|
|
|
* This is used for an RX ring only.
|
|
|
|
*/
|
|
|
|
static int alloc_initial_descbuffers(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
int i, err = -ENOMEM;
|
|
|
|
struct b43_dmadesc_generic *desc;
|
|
|
|
struct b43_dmadesc_meta *meta;
|
|
|
|
|
|
|
|
for (i = 0; i < ring->nr_slots; i++) {
|
|
|
|
desc = ring->ops->idx2desc(ring, i, &meta);
|
|
|
|
|
|
|
|
err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
|
|
|
|
if (err) {
|
|
|
|
b43err(ring->dev->wl,
|
|
|
|
"Failed to allocate initial descbuffers\n");
|
|
|
|
goto err_unwind;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mb();
|
|
|
|
ring->used_slots = ring->nr_slots;
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err_unwind:
|
|
|
|
for (i--; i >= 0; i--) {
|
|
|
|
desc = ring->ops->idx2desc(ring, i, &meta);
|
|
|
|
|
|
|
|
unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
|
|
|
|
dev_kfree_skb(meta->skb);
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do initial setup of the DMA controller.
|
|
|
|
* Reset the controller, write the ring busaddress
|
|
|
|
* and switch the "enable" bit on.
|
|
|
|
*/
|
|
|
|
static int dmacontroller_setup(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
u32 value;
|
|
|
|
u32 addrext;
|
2011-07-21 01:47:07 +08:00
|
|
|
bool parity = ring->dev->dma.parity;
|
2011-08-15 02:16:37 +08:00
|
|
|
u32 addrlo;
|
|
|
|
u32 addrhi;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
if (ring->tx) {
|
2008-02-05 19:50:41 +08:00
|
|
|
if (ring->type == B43_DMA_64BIT) {
|
2007-09-19 03:39:42 +08:00
|
|
|
u64 ringbase = (u64) (ring->dmabase);
|
2011-08-15 02:16:37 +08:00
|
|
|
addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
|
|
|
|
addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
|
|
|
|
addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
value = B43_DMA64_TXENABLE;
|
|
|
|
value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
|
|
|
|
& B43_DMA64_TXADDREXT_MASK;
|
2011-07-21 01:47:07 +08:00
|
|
|
if (!parity)
|
|
|
|
value |= B43_DMA64_TXPARITYDISABLE;
|
2007-09-19 03:39:42 +08:00
|
|
|
b43_dma_write(ring, B43_DMA64_TXCTL, value);
|
2011-08-15 02:16:37 +08:00
|
|
|
b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
|
|
|
|
b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
|
2007-09-19 03:39:42 +08:00
|
|
|
} else {
|
|
|
|
u32 ringbase = (u32) (ring->dmabase);
|
2011-08-15 02:16:37 +08:00
|
|
|
addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
|
|
|
|
addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
value = B43_DMA32_TXENABLE;
|
|
|
|
value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
|
|
|
|
& B43_DMA32_TXADDREXT_MASK;
|
2011-07-21 01:47:07 +08:00
|
|
|
if (!parity)
|
|
|
|
value |= B43_DMA32_TXPARITYDISABLE;
|
2007-09-19 03:39:42 +08:00
|
|
|
b43_dma_write(ring, B43_DMA32_TXCTL, value);
|
2011-08-15 02:16:37 +08:00
|
|
|
b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = alloc_initial_descbuffers(ring);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2008-02-05 19:50:41 +08:00
|
|
|
if (ring->type == B43_DMA_64BIT) {
|
2007-09-19 03:39:42 +08:00
|
|
|
u64 ringbase = (u64) (ring->dmabase);
|
2011-08-15 02:16:37 +08:00
|
|
|
addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
|
|
|
|
addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
|
|
|
|
addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
|
|
|
|
value |= B43_DMA64_RXENABLE;
|
|
|
|
value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
|
|
|
|
& B43_DMA64_RXADDREXT_MASK;
|
2011-07-21 01:47:07 +08:00
|
|
|
if (!parity)
|
|
|
|
value |= B43_DMA64_RXPARITYDISABLE;
|
2007-09-19 03:39:42 +08:00
|
|
|
b43_dma_write(ring, B43_DMA64_RXCTL, value);
|
2011-08-15 02:16:37 +08:00
|
|
|
b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
|
|
|
|
b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
|
2007-11-27 00:29:47 +08:00
|
|
|
b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
|
|
|
|
sizeof(struct b43_dmadesc64));
|
2007-09-19 03:39:42 +08:00
|
|
|
} else {
|
|
|
|
u32 ringbase = (u32) (ring->dmabase);
|
2011-08-15 02:16:37 +08:00
|
|
|
addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
|
|
|
|
addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
|
|
|
|
value |= B43_DMA32_RXENABLE;
|
|
|
|
value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
|
|
|
|
& B43_DMA32_RXADDREXT_MASK;
|
2011-07-21 01:47:07 +08:00
|
|
|
if (!parity)
|
|
|
|
value |= B43_DMA32_RXPARITYDISABLE;
|
2007-09-19 03:39:42 +08:00
|
|
|
b43_dma_write(ring, B43_DMA32_RXCTL, value);
|
2011-08-15 02:16:37 +08:00
|
|
|
b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
|
2007-11-27 00:29:47 +08:00
|
|
|
b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
|
|
|
|
sizeof(struct b43_dmadesc32));
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-27 00:29:47 +08:00
|
|
|
out:
|
2007-09-19 03:39:42 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shutdown the DMA controller. */
|
|
|
|
static void dmacontroller_cleanup(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
if (ring->tx) {
|
|
|
|
b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
|
2008-02-05 19:50:41 +08:00
|
|
|
ring->type);
|
|
|
|
if (ring->type == B43_DMA_64BIT) {
|
2007-09-19 03:39:42 +08:00
|
|
|
b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
|
|
|
|
b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
|
|
|
|
} else
|
|
|
|
b43_dma_write(ring, B43_DMA32_TXRING, 0);
|
|
|
|
} else {
|
|
|
|
b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
|
2008-02-05 19:50:41 +08:00
|
|
|
ring->type);
|
|
|
|
if (ring->type == B43_DMA_64BIT) {
|
2007-09-19 03:39:42 +08:00
|
|
|
b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
|
|
|
|
b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
|
|
|
|
} else
|
|
|
|
b43_dma_write(ring, B43_DMA32_RXRING, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_all_descbuffers(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
struct b43_dmadesc_meta *meta;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!ring->used_slots)
|
|
|
|
return;
|
|
|
|
for (i = 0; i < ring->nr_slots; i++) {
|
2011-05-23 09:54:25 +08:00
|
|
|
/* get meta - ignore returned value */
|
|
|
|
ring->ops->idx2desc(ring, i, &meta);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2009-11-20 05:24:29 +08:00
|
|
|
if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
|
2007-09-19 03:39:42 +08:00
|
|
|
B43_WARN_ON(!ring->tx);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ring->tx) {
|
|
|
|
unmap_descbuffer(ring, meta->dmaaddr,
|
|
|
|
meta->skb->len, 1);
|
|
|
|
} else {
|
|
|
|
unmap_descbuffer(ring, meta->dmaaddr,
|
|
|
|
ring->rx_buffersize, 0);
|
|
|
|
}
|
|
|
|
free_descriptor_buffer(ring, meta);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 supported_dma_mask(struct b43_wldev *dev)
|
|
|
|
{
|
|
|
|
u32 tmp;
|
|
|
|
u16 mmio_base;
|
|
|
|
|
2011-07-23 19:57:33 +08:00
|
|
|
switch (dev->dev->bus_type) {
|
|
|
|
#ifdef CONFIG_B43_BCMA
|
|
|
|
case B43_BUS_BCMA:
|
|
|
|
tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
|
|
|
|
if (tmp & BCMA_IOST_DMA64)
|
|
|
|
return DMA_BIT_MASK(64);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_B43_SSB
|
|
|
|
case B43_BUS_SSB:
|
|
|
|
tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
|
|
|
|
if (tmp & SSB_TMSHIGH_DMA64)
|
|
|
|
return DMA_BIT_MASK(64);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
mmio_base = b43_dmacontroller_base(0, 0);
|
|
|
|
b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
|
|
|
|
tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
|
|
|
|
if (tmp & B43_DMA32_TXADDREXT_MASK)
|
2009-04-07 10:01:15 +08:00
|
|
|
return DMA_BIT_MASK(32);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2009-04-07 10:01:17 +08:00
|
|
|
return DMA_BIT_MASK(30);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
2008-03-30 04:01:16 +08:00
|
|
|
static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
|
|
|
|
{
|
2009-04-07 10:01:17 +08:00
|
|
|
if (dmamask == DMA_BIT_MASK(30))
|
2008-03-30 04:01:16 +08:00
|
|
|
return B43_DMA_30BIT;
|
2009-04-07 10:01:15 +08:00
|
|
|
if (dmamask == DMA_BIT_MASK(32))
|
2008-03-30 04:01:16 +08:00
|
|
|
return B43_DMA_32BIT;
|
2009-04-07 10:01:13 +08:00
|
|
|
if (dmamask == DMA_BIT_MASK(64))
|
2008-03-30 04:01:16 +08:00
|
|
|
return B43_DMA_64BIT;
|
|
|
|
B43_WARN_ON(1);
|
|
|
|
return B43_DMA_30BIT;
|
|
|
|
}
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
/* Main initialization function. */
|
|
|
|
static
|
|
|
|
struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|
|
|
int controller_index,
|
2008-02-05 19:50:41 +08:00
|
|
|
int for_tx,
|
|
|
|
enum b43_dmatype type)
|
2007-09-19 03:39:42 +08:00
|
|
|
{
|
|
|
|
struct b43_dmaring *ring;
|
2009-11-20 05:24:29 +08:00
|
|
|
int i, err;
|
2007-09-19 03:39:42 +08:00
|
|
|
dma_addr_t dma_test;
|
|
|
|
|
|
|
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
|
|
|
if (!ring)
|
|
|
|
goto out;
|
|
|
|
|
2008-06-12 17:58:56 +08:00
|
|
|
ring->nr_slots = B43_RXRING_SLOTS;
|
2007-09-19 03:39:42 +08:00
|
|
|
if (for_tx)
|
2008-06-12 17:58:56 +08:00
|
|
|
ring->nr_slots = B43_TXRING_SLOTS;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-06-12 17:58:56 +08:00
|
|
|
ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
|
2007-09-19 03:39:42 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ring->meta)
|
|
|
|
goto err_kfree_ring;
|
2009-11-20 05:24:29 +08:00
|
|
|
for (i = 0; i < ring->nr_slots; i++)
|
|
|
|
ring->meta->skb = B43_DMA_PTR_POISON;
|
2008-06-12 17:58:56 +08:00
|
|
|
|
|
|
|
ring->type = type;
|
|
|
|
ring->dev = dev;
|
|
|
|
ring->mmio_base = b43_dmacontroller_base(type, controller_index);
|
|
|
|
ring->index = controller_index;
|
|
|
|
if (type == B43_DMA_64BIT)
|
|
|
|
ring->ops = &dma64_ops;
|
|
|
|
else
|
|
|
|
ring->ops = &dma32_ops;
|
2007-09-19 03:39:42 +08:00
|
|
|
if (for_tx) {
|
2011-12-19 21:56:45 +08:00
|
|
|
ring->tx = true;
|
2008-06-12 17:58:56 +08:00
|
|
|
ring->current_slot = -1;
|
|
|
|
} else {
|
|
|
|
if (ring->index == 0) {
|
2011-08-11 23:16:27 +08:00
|
|
|
switch (dev->fw.hdr_format) {
|
|
|
|
case B43_FW_HDR_598:
|
|
|
|
ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
|
|
|
|
ring->frameoffset = B43_DMA0_RX_FW598_FO;
|
|
|
|
break;
|
|
|
|
case B43_FW_HDR_410:
|
|
|
|
case B43_FW_HDR_351:
|
|
|
|
ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
|
|
|
|
ring->frameoffset = B43_DMA0_RX_FW351_FO;
|
|
|
|
break;
|
|
|
|
}
|
2008-06-12 17:58:56 +08:00
|
|
|
} else
|
|
|
|
B43_WARN_ON(1);
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_B43_DEBUG
|
|
|
|
ring->last_injected_overflow = jiffies;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (for_tx) {
|
2009-02-20 19:24:52 +08:00
|
|
|
/* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
|
|
|
|
BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
|
|
|
|
|
2009-02-20 06:45:43 +08:00
|
|
|
ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
|
2008-01-29 06:47:41 +08:00
|
|
|
b43_txhdr_size(dev),
|
2007-09-19 03:39:42 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ring->txhdr_cache)
|
|
|
|
goto err_kfree_meta;
|
|
|
|
|
|
|
|
/* test for ability to dma to txhdr_cache */
|
2011-05-18 08:06:40 +08:00
|
|
|
dma_test = dma_map_single(dev->dev->dma_dev,
|
2010-06-04 10:37:36 +08:00
|
|
|
ring->txhdr_cache,
|
|
|
|
b43_txhdr_size(dev),
|
|
|
|
DMA_TO_DEVICE);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-03-23 05:04:45 +08:00
|
|
|
if (b43_dma_mapping_error(ring, dma_test,
|
|
|
|
b43_txhdr_size(dev), 1)) {
|
2007-09-19 03:39:42 +08:00
|
|
|
/* ugh realloc */
|
|
|
|
kfree(ring->txhdr_cache);
|
2009-02-20 06:45:43 +08:00
|
|
|
ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
|
2008-01-29 06:47:41 +08:00
|
|
|
b43_txhdr_size(dev),
|
2007-09-19 03:39:42 +08:00
|
|
|
GFP_KERNEL | GFP_DMA);
|
|
|
|
if (!ring->txhdr_cache)
|
|
|
|
goto err_kfree_meta;
|
|
|
|
|
2011-05-18 08:06:40 +08:00
|
|
|
dma_test = dma_map_single(dev->dev->dma_dev,
|
2010-06-04 10:37:36 +08:00
|
|
|
ring->txhdr_cache,
|
|
|
|
b43_txhdr_size(dev),
|
|
|
|
DMA_TO_DEVICE);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-02-05 19:50:41 +08:00
|
|
|
if (b43_dma_mapping_error(ring, dma_test,
|
2008-03-28 18:46:58 +08:00
|
|
|
b43_txhdr_size(dev), 1)) {
|
|
|
|
|
|
|
|
b43err(dev->wl,
|
|
|
|
"TXHDR DMA allocation failed\n");
|
2007-09-19 03:39:42 +08:00
|
|
|
goto err_kfree_txhdr_cache;
|
2008-03-28 18:46:58 +08:00
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
2011-05-18 08:06:40 +08:00
|
|
|
dma_unmap_single(dev->dev->dma_dev,
|
2010-06-04 10:37:36 +08:00
|
|
|
dma_test, b43_txhdr_size(dev),
|
|
|
|
DMA_TO_DEVICE);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
err = alloc_ringmemory(ring);
|
|
|
|
if (err)
|
|
|
|
goto err_kfree_txhdr_cache;
|
|
|
|
err = dmacontroller_setup(ring);
|
|
|
|
if (err)
|
|
|
|
goto err_free_ringmemory;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ring;
|
|
|
|
|
|
|
|
err_free_ringmemory:
|
|
|
|
free_ringmemory(ring);
|
|
|
|
err_kfree_txhdr_cache:
|
|
|
|
kfree(ring->txhdr_cache);
|
|
|
|
err_kfree_meta:
|
|
|
|
kfree(ring->meta);
|
|
|
|
err_kfree_ring:
|
|
|
|
kfree(ring);
|
|
|
|
ring = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-03-07 22:50:02 +08:00
|
|
|
#define divide(a, b) ({ \
|
|
|
|
typeof(a) __a = a; \
|
|
|
|
do_div(__a, b); \
|
|
|
|
__a; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define modulo(a, b) ({ \
|
|
|
|
typeof(a) __a = a; \
|
|
|
|
do_div(__a, b); \
|
|
|
|
})
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
/* Main cleanup function. */
|
2008-03-06 23:32:46 +08:00
|
|
|
static void b43_destroy_dmaring(struct b43_dmaring *ring,
|
|
|
|
const char *ringname)
|
2007-09-19 03:39:42 +08:00
|
|
|
{
|
|
|
|
if (!ring)
|
|
|
|
return;
|
|
|
|
|
2008-03-07 22:50:02 +08:00
|
|
|
#ifdef CONFIG_B43_DEBUG
|
|
|
|
{
|
|
|
|
/* Print some statistics. */
|
|
|
|
u64 failed_packets = ring->nr_failed_tx_packets;
|
|
|
|
u64 succeed_packets = ring->nr_succeed_tx_packets;
|
|
|
|
u64 nr_packets = failed_packets + succeed_packets;
|
|
|
|
u64 permille_failed = 0, average_tries = 0;
|
|
|
|
|
|
|
|
if (nr_packets)
|
|
|
|
permille_failed = divide(failed_packets * 1000, nr_packets);
|
|
|
|
if (nr_packets)
|
|
|
|
average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
|
|
|
|
|
|
|
|
b43dbg(ring->dev->wl, "DMA-%u %s: "
|
|
|
|
"Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
|
|
|
|
"Average tries %llu.%02llu\n",
|
|
|
|
(unsigned int)(ring->type), ringname,
|
|
|
|
ring->max_used_slots,
|
|
|
|
ring->nr_slots,
|
|
|
|
(unsigned long long)failed_packets,
|
2008-03-08 02:52:24 +08:00
|
|
|
(unsigned long long)nr_packets,
|
2008-03-07 22:50:02 +08:00
|
|
|
(unsigned long long)divide(permille_failed, 10),
|
|
|
|
(unsigned long long)modulo(permille_failed, 10),
|
|
|
|
(unsigned long long)divide(average_tries, 100),
|
|
|
|
(unsigned long long)modulo(average_tries, 100));
|
|
|
|
}
|
|
|
|
#endif /* DEBUG */
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
/* Device IRQs are disabled prior entering this function,
|
|
|
|
* so no need to take care of concurrency with rx handler stuff.
|
|
|
|
*/
|
|
|
|
dmacontroller_cleanup(ring);
|
|
|
|
free_all_descbuffers(ring);
|
|
|
|
free_ringmemory(ring);
|
|
|
|
|
|
|
|
kfree(ring->txhdr_cache);
|
|
|
|
kfree(ring->meta);
|
|
|
|
kfree(ring);
|
|
|
|
}
|
|
|
|
|
2008-03-06 23:32:46 +08:00
|
|
|
#define destroy_ring(dma, ring) do { \
|
|
|
|
b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
|
|
|
|
(dma)->ring = NULL; \
|
|
|
|
} while (0)
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
void b43_dma_free(struct b43_wldev *dev)
|
|
|
|
{
|
2008-03-30 04:01:16 +08:00
|
|
|
struct b43_dma *dma;
|
|
|
|
|
|
|
|
if (b43_using_pio_transfers(dev))
|
|
|
|
return;
|
|
|
|
dma = &dev->dma;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-03-06 23:32:46 +08:00
|
|
|
destroy_ring(dma, rx_ring);
|
|
|
|
destroy_ring(dma, tx_ring_AC_BK);
|
|
|
|
destroy_ring(dma, tx_ring_AC_BE);
|
|
|
|
destroy_ring(dma, tx_ring_AC_VI);
|
|
|
|
destroy_ring(dma, tx_ring_AC_VO);
|
|
|
|
destroy_ring(dma, tx_ring_mcast);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
2008-04-24 01:13:01 +08:00
|
|
|
static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
|
|
|
|
{
|
|
|
|
u64 orig_mask = mask;
|
2011-12-19 21:56:45 +08:00
|
|
|
bool fallback = false;
|
2008-04-24 01:13:01 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Try to set the DMA mask. If it fails, try falling back to a
|
|
|
|
* lower mask, as we can always also support a lower one. */
|
|
|
|
while (1) {
|
2013-06-27 06:49:11 +08:00
|
|
|
err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
|
|
|
|
if (!err)
|
|
|
|
break;
|
2009-04-07 10:01:13 +08:00
|
|
|
if (mask == DMA_BIT_MASK(64)) {
|
2009-04-07 10:01:15 +08:00
|
|
|
mask = DMA_BIT_MASK(32);
|
2011-12-19 21:56:45 +08:00
|
|
|
fallback = true;
|
2008-04-24 01:13:01 +08:00
|
|
|
continue;
|
|
|
|
}
|
2009-04-07 10:01:15 +08:00
|
|
|
if (mask == DMA_BIT_MASK(32)) {
|
2009-04-07 10:01:17 +08:00
|
|
|
mask = DMA_BIT_MASK(30);
|
2011-12-19 21:56:45 +08:00
|
|
|
fallback = true;
|
2008-04-24 01:13:01 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
b43err(dev->wl, "The machine/kernel does not support "
|
|
|
|
"the required %u-bit DMA mask\n",
|
|
|
|
(unsigned int)dma_mask_to_engine_type(orig_mask));
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
if (fallback) {
|
|
|
|
b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
|
|
|
|
(unsigned int)dma_mask_to_engine_type(orig_mask),
|
|
|
|
(unsigned int)dma_mask_to_engine_type(mask));
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-15 02:16:37 +08:00
|
|
|
/* Some hardware with 64-bit DMA seems to be bugged and looks for translation
|
|
|
|
* bit in low address word instead of high one.
|
|
|
|
*/
|
|
|
|
static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
|
|
|
|
enum b43_dmatype type)
|
|
|
|
{
|
|
|
|
if (type != B43_DMA_64BIT)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
#ifdef CONFIG_B43_SSB
|
|
|
|
if (dev->dev->bus_type == B43_BUS_SSB &&
|
|
|
|
dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
|
2012-05-09 07:27:32 +08:00
|
|
|
!(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
|
2011-08-15 02:16:37 +08:00
|
|
|
ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
|
|
|
|
return 1;
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
int b43_dma_init(struct b43_wldev *dev)
|
|
|
|
{
|
|
|
|
struct b43_dma *dma = &dev->dma;
|
|
|
|
int err;
|
|
|
|
u64 dmamask;
|
2008-02-05 19:50:41 +08:00
|
|
|
enum b43_dmatype type;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
dmamask = supported_dma_mask(dev);
|
2008-03-30 04:01:16 +08:00
|
|
|
type = dma_mask_to_engine_type(dmamask);
|
2008-04-24 01:13:01 +08:00
|
|
|
err = b43_dma_set_mask(dev, dmamask);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2011-07-06 21:45:26 +08:00
|
|
|
|
|
|
|
switch (dev->dev->bus_type) {
|
2011-07-21 01:52:16 +08:00
|
|
|
#ifdef CONFIG_B43_BCMA
|
|
|
|
case B43_BUS_BCMA:
|
|
|
|
dma->translation = bcma_core_dma_translation(dev->dev->bdev);
|
|
|
|
break;
|
|
|
|
#endif
|
2011-07-06 21:45:26 +08:00
|
|
|
#ifdef CONFIG_B43_SSB
|
|
|
|
case B43_BUS_SSB:
|
|
|
|
dma->translation = ssb_dma_translation(dev->dev->sdev);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
}
|
2011-08-15 02:16:37 +08:00
|
|
|
dma->translation_in_low = b43_dma_translation_in_low_word(dev, type);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2011-07-21 01:47:07 +08:00
|
|
|
dma->parity = true;
|
|
|
|
#ifdef CONFIG_B43_BCMA
|
|
|
|
/* TODO: find out which SSB devices need disabling parity */
|
|
|
|
if (dev->dev->bus_type == B43_BUS_BCMA)
|
|
|
|
dma->parity = false;
|
|
|
|
#endif
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
/* setup TX DMA channels. */
|
2008-03-06 23:32:46 +08:00
|
|
|
dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
|
|
|
|
if (!dma->tx_ring_AC_BK)
|
2007-09-19 03:39:42 +08:00
|
|
|
goto out;
|
|
|
|
|
2008-03-06 23:32:46 +08:00
|
|
|
dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
|
|
|
|
if (!dma->tx_ring_AC_BE)
|
|
|
|
goto err_destroy_bk;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-03-06 23:32:46 +08:00
|
|
|
dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
|
|
|
|
if (!dma->tx_ring_AC_VI)
|
|
|
|
goto err_destroy_be;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-03-06 23:32:46 +08:00
|
|
|
dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
|
|
|
|
if (!dma->tx_ring_AC_VO)
|
|
|
|
goto err_destroy_vi;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-03-06 23:32:46 +08:00
|
|
|
dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
|
|
|
|
if (!dma->tx_ring_mcast)
|
|
|
|
goto err_destroy_vo;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-03-06 23:32:46 +08:00
|
|
|
/* setup RX DMA channel. */
|
|
|
|
dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
|
|
|
|
if (!dma->rx_ring)
|
|
|
|
goto err_destroy_mcast;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-03-06 23:32:46 +08:00
|
|
|
/* No support for the TX status DMA ring. */
|
2011-05-18 08:06:38 +08:00
|
|
|
B43_WARN_ON(dev->dev->core_rev < 5);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-02-05 19:50:41 +08:00
|
|
|
b43dbg(dev->wl, "%u-bit DMA initialized\n",
|
|
|
|
(unsigned int)type);
|
2007-09-19 03:39:42 +08:00
|
|
|
err = 0;
|
2008-03-06 23:32:46 +08:00
|
|
|
out:
|
2007-09-19 03:39:42 +08:00
|
|
|
return err;
|
|
|
|
|
2008-03-06 23:32:46 +08:00
|
|
|
err_destroy_mcast:
|
|
|
|
destroy_ring(dma, tx_ring_mcast);
|
|
|
|
err_destroy_vo:
|
|
|
|
destroy_ring(dma, tx_ring_AC_VO);
|
|
|
|
err_destroy_vi:
|
|
|
|
destroy_ring(dma, tx_ring_AC_VI);
|
|
|
|
err_destroy_be:
|
|
|
|
destroy_ring(dma, tx_ring_AC_BE);
|
|
|
|
err_destroy_bk:
|
|
|
|
destroy_ring(dma, tx_ring_AC_BK);
|
|
|
|
return err;
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate a cookie for the TX header. */
|
|
|
|
static u16 generate_cookie(struct b43_dmaring *ring, int slot)
|
|
|
|
{
|
2008-03-06 23:32:46 +08:00
|
|
|
u16 cookie;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
/* Use the upper 4 bits of the cookie as
|
|
|
|
* DMA controller ID and store the slot number
|
|
|
|
* in the lower 12 bits.
|
|
|
|
* Note that the cookie must never be 0, as this
|
|
|
|
* is a special value used in RX path.
|
2007-12-27 01:26:17 +08:00
|
|
|
* It can also not be 0xFFFF because that is special
|
|
|
|
* for multicast frames.
|
2007-09-19 03:39:42 +08:00
|
|
|
*/
|
2008-03-06 23:32:46 +08:00
|
|
|
cookie = (((u16)ring->index + 1) << 12);
|
2007-09-19 03:39:42 +08:00
|
|
|
B43_WARN_ON(slot & ~0x0FFF);
|
2008-03-06 23:32:46 +08:00
|
|
|
cookie |= (u16)slot;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
return cookie;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Inspect a cookie and find out to which controller/slot it belongs. */
|
|
|
|
static
|
|
|
|
struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
|
|
|
|
{
|
|
|
|
struct b43_dma *dma = &dev->dma;
|
|
|
|
struct b43_dmaring *ring = NULL;
|
|
|
|
|
|
|
|
switch (cookie & 0xF000) {
|
2007-12-27 01:26:17 +08:00
|
|
|
case 0x1000:
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dma->tx_ring_AC_BK;
|
2007-09-19 03:39:42 +08:00
|
|
|
break;
|
2007-12-27 01:26:17 +08:00
|
|
|
case 0x2000:
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dma->tx_ring_AC_BE;
|
2007-09-19 03:39:42 +08:00
|
|
|
break;
|
2007-12-27 01:26:17 +08:00
|
|
|
case 0x3000:
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dma->tx_ring_AC_VI;
|
2007-09-19 03:39:42 +08:00
|
|
|
break;
|
2007-12-27 01:26:17 +08:00
|
|
|
case 0x4000:
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dma->tx_ring_AC_VO;
|
2007-09-19 03:39:42 +08:00
|
|
|
break;
|
2007-12-27 01:26:17 +08:00
|
|
|
case 0x5000:
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dma->tx_ring_mcast;
|
2007-09-19 03:39:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
*slot = (cookie & 0x0FFF);
|
2009-11-20 05:24:29 +08:00
|
|
|
if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
|
|
|
|
b43dbg(dev->wl, "TX-status contains "
|
|
|
|
"invalid cookie: 0x%04X\n", cookie);
|
|
|
|
return NULL;
|
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
return ring;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dma_tx_fragment(struct b43_dmaring *ring,
|
2009-11-07 01:32:44 +08:00
|
|
|
struct sk_buff *skb)
|
2007-09-19 03:39:42 +08:00
|
|
|
{
|
|
|
|
const struct b43_dma_ops *ops = ring->ops;
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2009-11-07 01:32:44 +08:00
|
|
|
struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
|
2007-09-19 03:39:42 +08:00
|
|
|
u8 *header;
|
2008-01-24 04:44:15 +08:00
|
|
|
int slot, old_top_slot, old_used_slots;
|
2007-09-19 03:39:42 +08:00
|
|
|
int err;
|
|
|
|
struct b43_dmadesc_generic *desc;
|
|
|
|
struct b43_dmadesc_meta *meta;
|
|
|
|
struct b43_dmadesc_meta *meta_hdr;
|
2007-12-27 01:26:17 +08:00
|
|
|
u16 cookie;
|
2008-01-29 06:47:41 +08:00
|
|
|
size_t hdrsize = b43_txhdr_size(ring->dev);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2009-02-20 06:45:43 +08:00
|
|
|
/* Important note: If the number of used DMA slots per TX frame
|
|
|
|
* is changed here, the TX_SLOTS_PER_FRAME definition at the top of
|
|
|
|
* the file has to be updated, too!
|
|
|
|
*/
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2008-01-24 04:44:15 +08:00
|
|
|
old_top_slot = ring->current_slot;
|
|
|
|
old_used_slots = ring->used_slots;
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
/* Get a slot for the header. */
|
|
|
|
slot = request_slot(ring);
|
|
|
|
desc = ops->idx2desc(ring, slot, &meta_hdr);
|
|
|
|
memset(meta_hdr, 0, sizeof(*meta_hdr));
|
|
|
|
|
2009-02-20 06:45:43 +08:00
|
|
|
header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
|
2007-12-27 01:26:17 +08:00
|
|
|
cookie = generate_cookie(ring, slot);
|
2008-01-24 04:44:15 +08:00
|
|
|
err = b43_generate_txhdr(ring->dev, header,
|
2009-08-20 04:35:45 +08:00
|
|
|
skb, info, cookie);
|
2008-01-24 04:44:15 +08:00
|
|
|
if (unlikely(err)) {
|
|
|
|
ring->current_slot = old_top_slot;
|
|
|
|
ring->used_slots = old_used_slots;
|
|
|
|
return err;
|
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
|
2008-01-29 06:47:41 +08:00
|
|
|
hdrsize, 1);
|
2008-03-23 05:04:45 +08:00
|
|
|
if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
|
2008-01-24 04:44:15 +08:00
|
|
|
ring->current_slot = old_top_slot;
|
|
|
|
ring->used_slots = old_used_slots;
|
2007-09-19 03:39:42 +08:00
|
|
|
return -EIO;
|
2008-01-24 04:44:15 +08:00
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
|
2008-01-29 06:47:41 +08:00
|
|
|
hdrsize, 1, 0, 0);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
/* Get a slot for the payload. */
|
|
|
|
slot = request_slot(ring);
|
|
|
|
desc = ops->idx2desc(ring, slot, &meta);
|
|
|
|
memset(meta, 0, sizeof(*meta));
|
|
|
|
|
|
|
|
meta->skb = skb;
|
2011-12-19 21:56:45 +08:00
|
|
|
meta->is_last_fragment = true;
|
2009-11-07 01:32:44 +08:00
|
|
|
priv_info->bouncebuffer = NULL;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
|
|
|
/* create a bounce buffer in zone_dma on mapping failure. */
|
2008-03-23 05:04:45 +08:00
|
|
|
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
|
2010-05-16 05:20:26 +08:00
|
|
|
priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
|
|
|
|
GFP_ATOMIC | GFP_DMA);
|
2009-11-07 01:32:44 +08:00
|
|
|
if (!priv_info->bouncebuffer) {
|
2008-01-24 04:44:15 +08:00
|
|
|
ring->current_slot = old_top_slot;
|
|
|
|
ring->used_slots = old_used_slots;
|
2007-09-19 03:39:42 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_unmap_hdr;
|
|
|
|
}
|
|
|
|
|
2009-11-07 01:32:44 +08:00
|
|
|
meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
|
2008-03-23 05:04:45 +08:00
|
|
|
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
|
2009-11-07 01:32:44 +08:00
|
|
|
kfree(priv_info->bouncebuffer);
|
|
|
|
priv_info->bouncebuffer = NULL;
|
2008-01-24 04:44:15 +08:00
|
|
|
ring->current_slot = old_top_slot;
|
|
|
|
ring->used_slots = old_used_slots;
|
2007-09-19 03:39:42 +08:00
|
|
|
err = -EIO;
|
2009-11-07 01:32:44 +08:00
|
|
|
goto out_unmap_hdr;
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
|
|
|
|
|
2008-05-15 18:55:29 +08:00
|
|
|
if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
|
2007-12-27 01:26:17 +08:00
|
|
|
/* Tell the firmware about the cookie of the last
|
|
|
|
* mcast frame, so it can clear the more-data bit in it. */
|
|
|
|
b43_shm_write16(ring->dev, B43_SHM_SHARED,
|
|
|
|
B43_SHM_SH_MCASTCOOKIE, cookie);
|
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
/* Now transfer the whole frame. */
|
|
|
|
wmb();
|
|
|
|
ops->poke_tx(ring, next_slot(ring, slot));
|
|
|
|
return 0;
|
|
|
|
|
2007-12-27 01:26:17 +08:00
|
|
|
out_unmap_hdr:
|
2007-09-19 03:39:42 +08:00
|
|
|
unmap_descbuffer(ring, meta_hdr->dmaaddr,
|
2008-01-29 06:47:41 +08:00
|
|
|
hdrsize, 1);
|
2007-09-19 03:39:42 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int should_inject_overflow(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_B43_DEBUG
|
|
|
|
if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
|
|
|
|
/* Check if we should inject another ringbuffer overflow
|
|
|
|
* to test handling of this situation in the stack. */
|
|
|
|
unsigned long next_overflow;
|
|
|
|
|
|
|
|
next_overflow = ring->last_injected_overflow + HZ;
|
|
|
|
if (time_after(jiffies, next_overflow)) {
|
|
|
|
ring->last_injected_overflow = jiffies;
|
|
|
|
b43dbg(ring->dev->wl,
|
|
|
|
"Injecting TX ring overflow on "
|
|
|
|
"DMA controller %d\n", ring->index);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_B43_DEBUG */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-06 04:18:49 +08:00
|
|
|
/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
|
2009-02-24 18:16:42 +08:00
|
|
|
static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
|
|
|
|
u8 queue_prio)
|
2008-03-06 04:18:49 +08:00
|
|
|
{
|
|
|
|
struct b43_dmaring *ring;
|
|
|
|
|
2009-06-09 03:04:57 +08:00
|
|
|
if (dev->qos_enabled) {
|
2008-03-06 04:18:49 +08:00
|
|
|
/* 0 = highest priority */
|
|
|
|
switch (queue_prio) {
|
|
|
|
default:
|
|
|
|
B43_WARN_ON(1);
|
|
|
|
/* fallthrough */
|
|
|
|
case 0:
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dev->dma.tx_ring_AC_VO;
|
2008-03-06 04:18:49 +08:00
|
|
|
break;
|
|
|
|
case 1:
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dev->dma.tx_ring_AC_VI;
|
2008-03-06 04:18:49 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dev->dma.tx_ring_AC_BE;
|
2008-03-06 04:18:49 +08:00
|
|
|
break;
|
|
|
|
case 3:
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dev->dma.tx_ring_AC_BK;
|
2008-03-06 04:18:49 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dev->dma.tx_ring_AC_BE;
|
2008-03-06 04:18:49 +08:00
|
|
|
|
|
|
|
return ring;
|
|
|
|
}
|
|
|
|
|
2008-05-15 18:55:29 +08:00
|
|
|
int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
|
2007-09-19 03:39:42 +08:00
|
|
|
{
|
|
|
|
struct b43_dmaring *ring;
|
2007-12-27 01:26:17 +08:00
|
|
|
struct ieee80211_hdr *hdr;
|
2007-09-19 03:39:42 +08:00
|
|
|
int err = 0;
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2007-12-27 01:26:17 +08:00
|
|
|
hdr = (struct ieee80211_hdr *)skb->data;
|
2008-05-15 18:55:29 +08:00
|
|
|
if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
|
2007-12-27 01:26:17 +08:00
|
|
|
/* The multicast ring will be sent after the DTIM */
|
2008-03-06 23:32:46 +08:00
|
|
|
ring = dev->dma.tx_ring_mcast;
|
2007-12-27 01:26:17 +08:00
|
|
|
/* Set the more-data bit. Ucode will clear it on
|
|
|
|
* the last frame for us. */
|
|
|
|
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
|
|
|
|
} else {
|
|
|
|
/* Decide by priority where to put this frame. */
|
2008-05-17 06:57:14 +08:00
|
|
|
ring = select_ring_by_priority(
|
|
|
|
dev, skb_get_queue_mapping(skb));
|
2007-12-27 01:26:17 +08:00
|
|
|
}
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
B43_WARN_ON(!ring->tx);
|
2009-02-20 03:17:36 +08:00
|
|
|
|
2009-07-29 23:54:06 +08:00
|
|
|
if (unlikely(ring->stopped)) {
|
|
|
|
/* We get here only because of a bug in mac80211.
|
|
|
|
* Because of a race, one packet may be queued after
|
|
|
|
* the queue is stopped, thus we got called when we shouldn't.
|
|
|
|
* For now, just refuse the transmit. */
|
|
|
|
if (b43_debug(dev, B43_DBG_DMAVERBOSE))
|
|
|
|
b43err(dev->wl, "Packet after queue stopped\n");
|
|
|
|
err = -ENOSPC;
|
2009-09-05 04:55:00 +08:00
|
|
|
goto out;
|
2009-07-29 23:54:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
|
|
|
|
/* If we get here, we have a real error with the queue
|
|
|
|
* full, but queues not stopped. */
|
|
|
|
b43err(dev->wl, "DMA queue overflow\n");
|
2007-09-19 03:39:42 +08:00
|
|
|
err = -ENOSPC;
|
2009-09-05 04:55:00 +08:00
|
|
|
goto out;
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
2008-03-06 04:18:49 +08:00
|
|
|
/* Assign the queue number to the ring (if not already done before)
|
|
|
|
* so TX status handling can use it. The queue to ring mapping is
|
|
|
|
* static, so we don't need to store it per frame. */
|
2008-05-17 06:57:14 +08:00
|
|
|
ring->queue_prio = skb_get_queue_mapping(skb);
|
2008-03-06 04:18:49 +08:00
|
|
|
|
2009-11-07 01:32:44 +08:00
|
|
|
err = dma_tx_fragment(ring, skb);
|
2008-01-24 04:44:15 +08:00
|
|
|
if (unlikely(err == -ENOKEY)) {
|
|
|
|
/* Drop this packet, as we don't have the encryption key
|
|
|
|
* anymore and must not transmit it unencrypted. */
|
2012-12-11 00:40:21 +08:00
|
|
|
ieee80211_free_txskb(dev->wl->hw, skb);
|
2008-01-24 04:44:15 +08:00
|
|
|
err = 0;
|
2009-09-05 04:55:00 +08:00
|
|
|
goto out;
|
2008-01-24 04:44:15 +08:00
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
if (unlikely(err)) {
|
|
|
|
b43err(dev->wl, "DMA tx mapping failure\n");
|
2009-09-05 04:55:00 +08:00
|
|
|
goto out;
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
2009-02-20 06:45:43 +08:00
|
|
|
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
|
2007-09-19 03:39:42 +08:00
|
|
|
should_inject_overflow(ring)) {
|
|
|
|
/* This TX ring is full. */
|
2011-12-17 01:34:56 +08:00
|
|
|
unsigned int skb_mapping = skb_get_queue_mapping(skb);
|
|
|
|
ieee80211_stop_queue(dev->wl->hw, skb_mapping);
|
|
|
|
dev->wl->tx_queue_stopped[skb_mapping] = 1;
|
2011-12-19 21:56:45 +08:00
|
|
|
ring->stopped = true;
|
2007-09-19 03:39:42 +08:00
|
|
|
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
|
|
|
|
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
|
|
|
|
}
|
|
|
|
}
|
2009-09-05 04:55:00 +08:00
|
|
|
out:
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void b43_dma_handle_txstatus(struct b43_wldev *dev,
|
|
|
|
const struct b43_txstatus *status)
|
|
|
|
{
|
|
|
|
const struct b43_dma_ops *ops;
|
|
|
|
struct b43_dmaring *ring;
|
|
|
|
struct b43_dmadesc_meta *meta;
|
2013-03-21 03:02:31 +08:00
|
|
|
static const struct b43_txstatus fake; /* filled with 0 */
|
|
|
|
const struct b43_txstatus *txstat;
|
2009-11-20 05:24:29 +08:00
|
|
|
int slot, firstused;
|
2008-03-30 04:01:16 +08:00
|
|
|
bool frame_succeed;
|
2013-03-21 03:02:31 +08:00
|
|
|
int skip;
|
|
|
|
static u8 err_out1, err_out2;
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
ring = parse_cookie(dev, status->cookie, &slot);
|
|
|
|
if (unlikely(!ring))
|
|
|
|
return;
|
|
|
|
B43_WARN_ON(!ring->tx);
|
2009-11-20 05:24:29 +08:00
|
|
|
|
|
|
|
/* Sanity check: TX packets are processed in-order on one ring.
|
|
|
|
* Check if the slot deduced from the cookie really is the first
|
|
|
|
* used slot. */
|
|
|
|
firstused = ring->current_slot - ring->used_slots + 1;
|
|
|
|
if (firstused < 0)
|
|
|
|
firstused = ring->nr_slots + firstused;
|
2013-03-21 03:02:31 +08:00
|
|
|
|
|
|
|
skip = 0;
|
2009-11-20 05:24:29 +08:00
|
|
|
if (unlikely(slot != firstused)) {
|
|
|
|
/* This possibly is a firmware bug and will result in
|
2013-03-21 03:02:31 +08:00
|
|
|
* malfunction, memory leaks and/or stall of DMA functionality.
|
|
|
|
*/
|
|
|
|
if (slot == next_slot(ring, next_slot(ring, firstused))) {
|
|
|
|
/* If a single header/data pair was missed, skip over
|
|
|
|
* the first two slots in an attempt to recover.
|
|
|
|
*/
|
|
|
|
slot = firstused;
|
|
|
|
skip = 2;
|
|
|
|
if (!err_out1) {
|
|
|
|
/* Report the error once. */
|
|
|
|
b43dbg(dev->wl,
|
|
|
|
"Skip on DMA ring %d slot %d.\n",
|
|
|
|
ring->index, slot);
|
|
|
|
err_out1 = 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* More than a single header/data pair were missed.
|
|
|
|
* Report this error once.
|
|
|
|
*/
|
|
|
|
if (!err_out2)
|
|
|
|
b43dbg(dev->wl,
|
|
|
|
"Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
|
|
|
|
ring->index, firstused, slot);
|
|
|
|
err_out2 = 1;
|
|
|
|
return;
|
|
|
|
}
|
2009-11-20 05:24:29 +08:00
|
|
|
}
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
ops = ring->ops;
|
|
|
|
while (1) {
|
2009-11-20 05:24:29 +08:00
|
|
|
B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
|
2011-05-23 09:54:25 +08:00
|
|
|
/* get meta - ignore returned value */
|
|
|
|
ops->idx2desc(ring, slot, &meta);
|
2007-09-19 03:39:42 +08:00
|
|
|
|
2009-11-20 05:24:29 +08:00
|
|
|
if (b43_dma_ptr_is_poisoned(meta->skb)) {
|
|
|
|
b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
|
|
|
|
"on ring %d\n",
|
|
|
|
slot, firstused, ring->index);
|
|
|
|
break;
|
|
|
|
}
|
2013-03-21 03:02:31 +08:00
|
|
|
|
2009-11-07 01:32:44 +08:00
|
|
|
if (meta->skb) {
|
|
|
|
struct b43_private_tx_info *priv_info =
|
2013-03-21 03:02:31 +08:00
|
|
|
b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
|
2009-11-07 01:32:44 +08:00
|
|
|
|
2013-03-21 03:02:31 +08:00
|
|
|
unmap_descbuffer(ring, meta->dmaaddr,
|
|
|
|
meta->skb->len, 1);
|
2009-11-07 01:32:44 +08:00
|
|
|
kfree(priv_info->bouncebuffer);
|
|
|
|
priv_info->bouncebuffer = NULL;
|
|
|
|
} else {
|
2007-09-19 03:39:42 +08:00
|
|
|
unmap_descbuffer(ring, meta->dmaaddr,
|
2008-01-29 06:47:41 +08:00
|
|
|
b43_txhdr_size(dev), 1);
|
2009-11-07 01:32:44 +08:00
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
|
|
|
|
if (meta->is_last_fragment) {
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
|
2009-11-20 05:24:29 +08:00
|
|
|
if (unlikely(!meta->skb)) {
|
2013-03-21 03:02:31 +08:00
|
|
|
/* This is a scatter-gather fragment of a frame,
|
|
|
|
* so the skb pointer must not be NULL.
|
|
|
|
*/
|
2009-11-20 05:24:29 +08:00
|
|
|
b43dbg(dev->wl, "TX status unexpected NULL skb "
|
|
|
|
"at slot %d (first=%d) on ring %d\n",
|
|
|
|
slot, firstused, ring->index);
|
|
|
|
break;
|
|
|
|
}
|
2008-05-15 18:55:29 +08:00
|
|
|
|
|
|
|
info = IEEE80211_SKB_CB(meta->skb);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call back to inform the ieee80211 subsystem about
|
2013-03-21 03:02:31 +08:00
|
|
|
* the status of the transmission. When skipping over
|
|
|
|
* a missed TX status report, use a status structure
|
|
|
|
* filled with zeros to indicate that the frame was not
|
|
|
|
* sent (frame_count 0) and not acknowledged
|
2007-09-19 03:39:42 +08:00
|
|
|
*/
|
2013-03-21 03:02:31 +08:00
|
|
|
if (unlikely(skip))
|
|
|
|
txstat = &fake;
|
|
|
|
else
|
|
|
|
txstat = status;
|
|
|
|
|
|
|
|
frame_succeed = b43_fill_txstatus_report(dev, info,
|
|
|
|
txstat);
|
2008-03-30 04:01:16 +08:00
|
|
|
#ifdef CONFIG_B43_DEBUG
|
|
|
|
if (frame_succeed)
|
|
|
|
ring->nr_succeed_tx_packets++;
|
|
|
|
else
|
|
|
|
ring->nr_failed_tx_packets++;
|
|
|
|
ring->nr_total_packet_tries += status->frame_count;
|
|
|
|
#endif /* DEBUG */
|
2009-09-11 02:22:02 +08:00
|
|
|
ieee80211_tx_status(dev->wl->hw, meta->skb);
|
2008-05-15 18:55:29 +08:00
|
|
|
|
2009-11-20 05:24:29 +08:00
|
|
|
/* skb will be freed by ieee80211_tx_status().
|
|
|
|
* Poison our pointer. */
|
|
|
|
meta->skb = B43_DMA_PTR_POISON;
|
2007-09-19 03:39:42 +08:00
|
|
|
} else {
|
|
|
|
/* No need to call free_descriptor_buffer here, as
|
|
|
|
* this is only the txhdr, which is not allocated.
|
|
|
|
*/
|
2009-11-20 05:24:29 +08:00
|
|
|
if (unlikely(meta->skb)) {
|
|
|
|
b43dbg(dev->wl, "TX status unexpected non-NULL skb "
|
|
|
|
"at slot %d (first=%d) on ring %d\n",
|
|
|
|
slot, firstused, ring->index);
|
|
|
|
break;
|
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Everything unmapped and free'd. So it's not used anymore. */
|
|
|
|
ring->used_slots--;
|
|
|
|
|
2013-03-21 03:02:31 +08:00
|
|
|
if (meta->is_last_fragment && !skip) {
|
2009-11-20 05:24:29 +08:00
|
|
|
/* This is the last scatter-gather
|
|
|
|
* fragment of the frame. We are done. */
|
2007-09-19 03:39:42 +08:00
|
|
|
break;
|
2009-11-20 05:24:29 +08:00
|
|
|
}
|
2007-09-19 03:39:42 +08:00
|
|
|
slot = next_slot(ring, slot);
|
2013-03-21 03:02:31 +08:00
|
|
|
if (skip > 0)
|
|
|
|
--skip;
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
if (ring->stopped) {
|
2009-02-20 06:45:43 +08:00
|
|
|
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
|
2011-12-19 21:56:45 +08:00
|
|
|
ring->stopped = false;
|
2011-12-17 01:34:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
|
|
|
|
dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
|
|
|
|
} else {
|
|
|
|
/* If the driver queue is running wake the corresponding
|
|
|
|
* mac80211 queue. */
|
|
|
|
ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
|
2007-09-19 03:39:42 +08:00
|
|
|
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
|
|
|
|
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
|
|
|
|
}
|
|
|
|
}
|
2011-12-17 01:34:56 +08:00
|
|
|
/* Add work to the queue. */
|
|
|
|
ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dma_rx(struct b43_dmaring *ring, int *slot)
|
|
|
|
{
|
|
|
|
const struct b43_dma_ops *ops = ring->ops;
|
|
|
|
struct b43_dmadesc_generic *desc;
|
|
|
|
struct b43_dmadesc_meta *meta;
|
|
|
|
struct b43_rxhdr_fw4 *rxhdr;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
u16 len;
|
|
|
|
int err;
|
|
|
|
dma_addr_t dmaaddr;
|
|
|
|
|
|
|
|
desc = ops->idx2desc(ring, *slot, &meta);
|
|
|
|
|
|
|
|
sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
|
|
|
|
skb = meta->skb;
|
|
|
|
|
|
|
|
rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
|
|
|
|
len = le16_to_cpu(rxhdr->frame_len);
|
|
|
|
if (len == 0) {
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
udelay(2);
|
|
|
|
barrier();
|
|
|
|
len = le16_to_cpu(rxhdr->frame_len);
|
|
|
|
} while (len == 0 && i++ < 5);
|
|
|
|
if (unlikely(len == 0)) {
|
2009-03-28 07:41:25 +08:00
|
|
|
dmaaddr = meta->dmaaddr;
|
|
|
|
goto drop_recycle_buffer;
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
}
|
2009-03-28 05:51:58 +08:00
|
|
|
if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
|
|
|
|
/* Something went wrong with the DMA.
|
|
|
|
* The device did not touch the buffer and did not overwrite the poison. */
|
|
|
|
b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
|
2009-03-28 07:41:25 +08:00
|
|
|
dmaaddr = meta->dmaaddr;
|
|
|
|
goto drop_recycle_buffer;
|
2009-03-28 05:51:58 +08:00
|
|
|
}
|
2011-03-31 02:02:46 +08:00
|
|
|
if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
|
2007-09-19 03:39:42 +08:00
|
|
|
/* The data did not fit into one descriptor buffer
|
|
|
|
* and is split over multiple buffers.
|
|
|
|
* This should never happen, as we try to allocate buffers
|
|
|
|
* big enough. So simply ignore this packet.
|
|
|
|
*/
|
|
|
|
int cnt = 0;
|
|
|
|
s32 tmp = len;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
desc = ops->idx2desc(ring, *slot, &meta);
|
|
|
|
/* recycle the descriptor buffer. */
|
2009-03-28 07:41:25 +08:00
|
|
|
b43_poison_rx_buffer(ring, meta->skb);
|
2007-09-19 03:39:42 +08:00
|
|
|
sync_descbuffer_for_device(ring, meta->dmaaddr,
|
|
|
|
ring->rx_buffersize);
|
|
|
|
*slot = next_slot(ring, *slot);
|
|
|
|
cnt++;
|
|
|
|
tmp -= ring->rx_buffersize;
|
|
|
|
if (tmp <= 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
b43err(ring->dev->wl, "DMA RX buffer too small "
|
|
|
|
"(len: %u, buffer: %u, nr-dropped: %d)\n",
|
|
|
|
len, ring->rx_buffersize, cnt);
|
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
|
|
|
|
dmaaddr = meta->dmaaddr;
|
|
|
|
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
|
2009-03-28 07:41:25 +08:00
|
|
|
goto drop_recycle_buffer;
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
|
|
|
|
skb_put(skb, len + ring->frameoffset);
|
|
|
|
skb_pull(skb, ring->frameoffset);
|
|
|
|
|
|
|
|
b43_rx(ring->dev, skb, rxhdr);
|
2008-03-06 23:32:46 +08:00
|
|
|
drop:
|
2007-09-19 03:39:42 +08:00
|
|
|
return;
|
2009-03-28 07:41:25 +08:00
|
|
|
|
|
|
|
drop_recycle_buffer:
|
|
|
|
/* Poison and recycle the RX buffer. */
|
|
|
|
b43_poison_rx_buffer(ring, skb);
|
|
|
|
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
2013-04-24 03:45:11 +08:00
|
|
|
void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
int current_slot, previous_slot;
|
|
|
|
|
|
|
|
B43_WARN_ON(ring->tx);
|
|
|
|
|
|
|
|
/* Device has filled all buffers, drop all packets and let TCP
|
|
|
|
* decrease speed.
|
|
|
|
* Decrement RX index by one will let the device to see all slots
|
|
|
|
* as free again
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
*TODO: How to increase rx_drop in mac80211?
|
|
|
|
*/
|
|
|
|
current_slot = ring->ops->get_current_rxslot(ring);
|
|
|
|
previous_slot = prev_slot(ring, current_slot);
|
|
|
|
ring->ops->set_current_rxslot(ring, previous_slot);
|
|
|
|
}
|
|
|
|
|
2007-09-19 03:39:42 +08:00
|
|
|
void b43_dma_rx(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
const struct b43_dma_ops *ops = ring->ops;
|
|
|
|
int slot, current_slot;
|
|
|
|
int used_slots = 0;
|
|
|
|
|
|
|
|
B43_WARN_ON(ring->tx);
|
|
|
|
current_slot = ops->get_current_rxslot(ring);
|
|
|
|
B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
|
|
|
|
|
|
|
|
slot = ring->current_slot;
|
|
|
|
for (; slot != current_slot; slot = next_slot(ring, slot)) {
|
|
|
|
dma_rx(ring, &slot);
|
|
|
|
update_max_used_slots(ring, ++used_slots);
|
|
|
|
}
|
2011-07-05 01:51:11 +08:00
|
|
|
wmb();
|
2007-09-19 03:39:42 +08:00
|
|
|
ops->set_current_rxslot(ring, slot);
|
|
|
|
ring->current_slot = slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
B43_WARN_ON(!ring->tx);
|
|
|
|
ring->ops->tx_suspend(ring);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
|
|
|
|
{
|
|
|
|
B43_WARN_ON(!ring->tx);
|
|
|
|
ring->ops->tx_resume(ring);
|
|
|
|
}
|
|
|
|
|
|
|
|
void b43_dma_tx_suspend(struct b43_wldev *dev)
|
|
|
|
{
|
|
|
|
b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
|
2008-03-06 23:32:46 +08:00
|
|
|
b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
|
|
|
|
b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
|
|
|
|
b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
|
|
|
|
b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
|
|
|
|
b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
|
2007-09-19 03:39:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void b43_dma_tx_resume(struct b43_wldev *dev)
|
|
|
|
{
|
2008-03-06 23:32:46 +08:00
|
|
|
b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
|
|
|
|
b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
|
|
|
|
b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
|
|
|
|
b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
|
|
|
|
b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
|
2007-09-19 03:39:42 +08:00
|
|
|
b43_power_saving_ctl_bits(dev, 0);
|
|
|
|
}
|
2008-03-30 04:01:16 +08:00
|
|
|
|
|
|
|
static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
|
|
|
|
u16 mmio_base, bool enable)
|
|
|
|
{
|
|
|
|
u32 ctl;
|
|
|
|
|
|
|
|
if (type == B43_DMA_64BIT) {
|
|
|
|
ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
|
|
|
|
ctl &= ~B43_DMA64_RXDIRECTFIFO;
|
|
|
|
if (enable)
|
|
|
|
ctl |= B43_DMA64_RXDIRECTFIFO;
|
|
|
|
b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
|
|
|
|
} else {
|
|
|
|
ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
|
|
|
|
ctl &= ~B43_DMA32_RXDIRECTFIFO;
|
|
|
|
if (enable)
|
|
|
|
ctl |= B43_DMA32_RXDIRECTFIFO;
|
|
|
|
b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
|
|
|
|
* This is called from PIO code, so DMA structures are not available. */
|
|
|
|
void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
|
|
|
|
unsigned int engine_index, bool enable)
|
|
|
|
{
|
|
|
|
enum b43_dmatype type;
|
|
|
|
u16 mmio_base;
|
|
|
|
|
|
|
|
type = dma_mask_to_engine_type(supported_dma_mask(dev));
|
|
|
|
|
|
|
|
mmio_base = b43_dmacontroller_base(type, engine_index);
|
|
|
|
direct_fifo_rx(dev, type, mmio_base, enable);
|
|
|
|
}
|