2008-12-21 08:57:45 +08:00
|
|
|
/*
|
|
|
|
* Intel Wireless WiMAX Connection 2400m
|
|
|
|
* Glue with the networking stack
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
|
|
|
|
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
|
|
|
|
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License version
|
|
|
|
* 2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
|
|
* 02110-1301, USA.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* This implements an ethernet device for the i2400m.
|
|
|
|
*
|
|
|
|
* We fake being an ethernet device to simplify the support from user
|
|
|
|
* space and from the other side. The world is (sadly) configured to
|
|
|
|
* take in only Ethernet devices...
|
|
|
|
*
|
2009-03-01 07:42:52 +08:00
|
|
|
* Because of this, when using firmwares <= v1.3, there is an
|
|
|
|
* copy-each-rxed-packet overhead on the RX path. Each IP packet has
|
|
|
|
* to be reallocated to add an ethernet header (as there is no space
|
|
|
|
* in what we get from the device). This is a known drawback and
|
|
|
|
* firmwares >= 1.4 add header space that can be used to insert the
|
|
|
|
* ethernet header without having to reallocate and copy.
|
2008-12-21 08:57:45 +08:00
|
|
|
*
|
|
|
|
* TX error handling is tricky; because we have to FIFO/queue the
|
|
|
|
* buffers for transmission (as the hardware likes it aggregated), we
|
|
|
|
* just give the skb to the TX subsystem and by the time it is
|
|
|
|
* transmitted, we have long forgotten about it. So we just don't care
|
|
|
|
* too much about it.
|
|
|
|
*
|
|
|
|
* Note that when the device is in idle mode with the basestation, we
|
|
|
|
* need to negotiate coming back up online. That involves negotiation
|
|
|
|
* and possible user space interaction. Thus, we defer to a workqueue
|
|
|
|
* to do all that. By default, we only queue a single packet and drop
|
|
|
|
* the rest, as potentially the time to go back from idle to normal is
|
|
|
|
* long.
|
|
|
|
*
|
|
|
|
* ROADMAP
|
|
|
|
*
|
|
|
|
* i2400m_open Called on ifconfig up
|
|
|
|
* i2400m_stop Called on ifconfig down
|
|
|
|
*
|
|
|
|
* i2400m_hard_start_xmit Called by the network stack to send a packet
|
|
|
|
* i2400m_net_wake_tx Wake up device from basestation-IDLE & TX
|
|
|
|
* i2400m_wake_tx_work
|
|
|
|
* i2400m_cmd_exit_idle
|
|
|
|
* i2400m_tx
|
|
|
|
* i2400m_net_tx TX a data frame
|
|
|
|
* i2400m_tx
|
|
|
|
*
|
|
|
|
* i2400m_change_mtu Called on ifconfig mtu XXX
|
|
|
|
*
|
|
|
|
* i2400m_tx_timeout Called when the device times out
|
|
|
|
*
|
|
|
|
* i2400m_net_rx Called by the RX code when a data frame is
|
2009-03-01 07:42:52 +08:00
|
|
|
* available (firmware <= 1.3)
|
|
|
|
* i2400m_net_erx Called by the RX code when a data frame is
|
|
|
|
* available (firmware >= 1.4).
|
2008-12-21 08:57:45 +08:00
|
|
|
* i2400m_netdev_setup Called to setup all the netdev stuff from
|
|
|
|
* alloc_netdev.
|
|
|
|
*/
|
|
|
|
#include <linux/if_arp.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2008-12-21 08:57:45 +08:00
|
|
|
#include <linux/netdevice.h>
|
2009-09-18 04:06:14 +08:00
|
|
|
#include <linux/ethtool.h>
|
2011-05-28 04:14:23 +08:00
|
|
|
#include <linux/export.h>
|
2008-12-21 08:57:45 +08:00
|
|
|
#include "i2400m.h"
|
|
|
|
|
|
|
|
|
|
|
|
#define D_SUBMODULE netdev
|
|
|
|
#include "debug-levels.h"
|
|
|
|
|
|
|
|
enum {
|
|
|
|
/* netdev interface */
|
2009-10-15 17:16:08 +08:00
|
|
|
/* 20 secs? yep, this is the maximum timeout that the device
|
|
|
|
* might take to get out of IDLE / negotiate it with the base
|
|
|
|
* station. We add 1sec for good measure. */
|
|
|
|
I2400M_TX_TIMEOUT = 21 * HZ,
|
2010-04-09 07:24:32 +08:00
|
|
|
/*
|
|
|
|
* Experimentation has determined that, 20 to be a good value
|
|
|
|
* for minimizing the jitter in the throughput.
|
|
|
|
*/
|
|
|
|
I2400M_TX_QLEN = 20,
|
2008-12-21 08:57:45 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static
|
|
|
|
int i2400m_open(struct net_device *net_dev)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
|
|
|
|
struct device *dev = i2400m_dev(i2400m);
|
|
|
|
|
|
|
|
d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
|
2009-09-17 08:53:57 +08:00
|
|
|
/* Make sure we wait until init is complete... */
|
|
|
|
mutex_lock(&i2400m->init_mutex);
|
|
|
|
if (i2400m->updown)
|
2008-12-21 08:57:45 +08:00
|
|
|
result = 0;
|
2009-09-17 08:53:57 +08:00
|
|
|
else
|
|
|
|
result = -EBUSY;
|
|
|
|
mutex_unlock(&i2400m->init_mutex);
|
2008-12-21 08:57:45 +08:00
|
|
|
d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
|
|
|
|
net_dev, i2400m, result);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static
|
|
|
|
int i2400m_stop(struct net_device *net_dev)
|
|
|
|
{
|
|
|
|
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
|
|
|
|
struct device *dev = i2400m_dev(i2400m);
|
|
|
|
|
|
|
|
d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
|
2009-09-17 07:30:39 +08:00
|
|
|
i2400m_net_wake_stop(i2400m);
|
2008-12-21 08:57:45 +08:00
|
|
|
d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake up the device and transmit a held SKB, then restart the net queue
|
|
|
|
*
|
|
|
|
* When the device goes into basestation-idle mode, we need to tell it
|
|
|
|
* to exit that mode; it will negotiate with the base station, user
|
|
|
|
* space may have to intervene to rehandshake crypto and then tell us
|
|
|
|
* when it is ready to transmit the packet we have "queued". Still we
|
|
|
|
* need to give it sometime after it reports being ok.
|
|
|
|
*
|
|
|
|
* On error, there is not much we can do. If the error was on TX, we
|
|
|
|
* still wake the queue up to see if the next packet will be luckier.
|
|
|
|
*
|
|
|
|
* If _cmd_exit_idle() fails...well, it could be many things; most
|
|
|
|
* commonly it is that something else took the device out of IDLE mode
|
|
|
|
* (for example, the base station). In that case we get an -EILSEQ and
|
|
|
|
* we are just going to ignore that one. If the device is back to
|
|
|
|
* connected, then fine -- if it is someother state, the packet will
|
|
|
|
* be dropped anyway.
|
|
|
|
*/
|
|
|
|
void i2400m_wake_tx_work(struct work_struct *ws)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
|
2009-10-15 17:16:08 +08:00
|
|
|
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
|
2008-12-21 08:57:45 +08:00
|
|
|
struct device *dev = i2400m_dev(i2400m);
|
2012-12-22 09:57:10 +08:00
|
|
|
struct sk_buff *skb;
|
2008-12-21 08:57:45 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&i2400m->tx_lock, flags);
|
|
|
|
skb = i2400m->wake_tx_skb;
|
|
|
|
i2400m->wake_tx_skb = NULL;
|
|
|
|
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
|
|
|
|
|
|
|
|
d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
|
|
|
|
result = -EINVAL;
|
|
|
|
if (skb == NULL) {
|
2011-03-31 09:57:33 +08:00
|
|
|
dev_err(dev, "WAKE&TX: skb disappeared!\n");
|
2008-12-21 08:57:45 +08:00
|
|
|
goto out_put;
|
|
|
|
}
|
2009-10-15 17:16:08 +08:00
|
|
|
/* If we have, somehow, lost the connection after this was
|
|
|
|
* queued, don't do anything; this might be the device got
|
|
|
|
* reset or just disconnected. */
|
|
|
|
if (unlikely(!netif_carrier_ok(net_dev)))
|
|
|
|
goto out_kfree;
|
2008-12-21 08:57:45 +08:00
|
|
|
result = i2400m_cmd_exit_idle(i2400m);
|
|
|
|
if (result == -EILSEQ)
|
|
|
|
result = 0;
|
|
|
|
if (result < 0) {
|
|
|
|
dev_err(dev, "WAKE&TX: device didn't get out of idle: "
|
2009-10-19 15:24:56 +08:00
|
|
|
"%d - resetting\n", result);
|
|
|
|
i2400m_reset(i2400m, I2400M_RT_BUS);
|
|
|
|
goto error;
|
2008-12-21 08:57:45 +08:00
|
|
|
}
|
|
|
|
result = wait_event_timeout(i2400m->state_wq,
|
2009-10-15 17:16:08 +08:00
|
|
|
i2400m->state != I2400M_SS_IDLE,
|
|
|
|
net_dev->watchdog_timeo - HZ/2);
|
2008-12-21 08:57:45 +08:00
|
|
|
if (result == 0)
|
|
|
|
result = -ETIMEDOUT;
|
|
|
|
if (result < 0) {
|
|
|
|
dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
|
2009-10-19 15:24:56 +08:00
|
|
|
"%d - resetting\n", result);
|
|
|
|
i2400m_reset(i2400m, I2400M_RT_BUS);
|
2008-12-21 08:57:45 +08:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
msleep(20); /* device still needs some time or it drops it */
|
|
|
|
result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
|
|
|
|
error:
|
2009-10-15 17:16:08 +08:00
|
|
|
netif_wake_queue(net_dev);
|
|
|
|
out_kfree:
|
2008-12-21 08:57:45 +08:00
|
|
|
kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */
|
|
|
|
out_put:
|
|
|
|
i2400m_put(i2400m);
|
|
|
|
d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
|
|
|
|
ws, i2400m, skb, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare the data payload TX header
|
|
|
|
*
|
|
|
|
* The i2400m expects a 4 byte header in front of a data packet.
|
|
|
|
*
|
|
|
|
* Because we pretend to be an ethernet device, this packet comes with
|
|
|
|
* an ethernet header. Pull it and push our header.
|
|
|
|
*/
|
|
|
|
static
|
|
|
|
void i2400m_tx_prep_header(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct i2400m_pl_data_hdr *pl_hdr;
|
|
|
|
skb_pull(skb, ETH_HLEN);
|
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:23 +08:00
|
|
|
pl_hdr = skb_push(skb, sizeof(*pl_hdr));
|
2008-12-21 08:57:45 +08:00
|
|
|
pl_hdr->reserved = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-09-17 07:30:39 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup resources acquired during i2400m_net_wake_tx()
|
|
|
|
*
|
|
|
|
* This is called by __i2400m_dev_stop and means we have to make sure
|
|
|
|
* the workqueue is flushed from any pending work.
|
|
|
|
*/
|
|
|
|
void i2400m_net_wake_stop(struct i2400m *i2400m)
|
|
|
|
{
|
|
|
|
struct device *dev = i2400m_dev(i2400m);
|
2012-12-22 09:57:10 +08:00
|
|
|
struct sk_buff *wake_tx_skb;
|
|
|
|
unsigned long flags;
|
2009-09-17 07:30:39 +08:00
|
|
|
|
|
|
|
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
|
2012-12-22 09:57:10 +08:00
|
|
|
/*
|
|
|
|
* See i2400m_hard_start_xmit(), references are taken there and
|
|
|
|
* here we release them if the packet was still pending.
|
|
|
|
*/
|
|
|
|
cancel_work_sync(&i2400m->wake_tx_ws);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&i2400m->tx_lock, flags);
|
|
|
|
wake_tx_skb = i2400m->wake_tx_skb;
|
|
|
|
i2400m->wake_tx_skb = NULL;
|
|
|
|
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
|
|
|
|
|
|
|
|
if (wake_tx_skb) {
|
2009-09-17 07:30:39 +08:00
|
|
|
i2400m_put(i2400m);
|
|
|
|
kfree_skb(wake_tx_skb);
|
|
|
|
}
|
2012-12-22 09:57:10 +08:00
|
|
|
|
2009-09-17 07:30:39 +08:00
|
|
|
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-21 08:57:45 +08:00
|
|
|
/*
|
|
|
|
* TX an skb to an idle device
|
|
|
|
*
|
|
|
|
* When the device is in basestation-idle mode, we need to wake it up
|
|
|
|
* and then TX. So we queue a work_struct for doing so.
|
|
|
|
*
|
|
|
|
* We need to get an extra ref for the skb (so it is not dropped), as
|
|
|
|
* well as be careful not to queue more than one request (won't help
|
|
|
|
* at all). If more than one request comes or there are errors, we
|
|
|
|
* just drop the packets (see i2400m_hard_start_xmit()).
|
|
|
|
*/
|
|
|
|
static
|
|
|
|
int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
struct device *dev = i2400m_dev(i2400m);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
|
|
|
|
if (net_ratelimit()) {
|
|
|
|
d_printf(3, dev, "WAKE&NETTX: "
|
|
|
|
"skb %p sending %d bytes to radio\n",
|
|
|
|
skb, skb->len);
|
|
|
|
d_dump(4, dev, skb->data, skb->len);
|
|
|
|
}
|
|
|
|
/* We hold a ref count for i2400m and skb, so when
|
|
|
|
* stopping() the device, we need to cancel that work
|
|
|
|
* and if pending, release those resources. */
|
|
|
|
result = 0;
|
|
|
|
spin_lock_irqsave(&i2400m->tx_lock, flags);
|
2012-12-22 09:57:10 +08:00
|
|
|
if (!i2400m->wake_tx_skb) {
|
2008-12-21 08:57:45 +08:00
|
|
|
netif_stop_queue(net_dev);
|
|
|
|
i2400m_get(i2400m);
|
|
|
|
i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */
|
|
|
|
i2400m_tx_prep_header(skb);
|
|
|
|
result = schedule_work(&i2400m->wake_tx_ws);
|
|
|
|
WARN_ON(result == 0);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
|
|
|
|
if (result == 0) {
|
|
|
|
/* Yes, this happens even if we stopped the
|
|
|
|
* queue -- blame the queue disciplines that
|
|
|
|
* queue without looking -- I guess there is a reason
|
|
|
|
* for that. */
|
|
|
|
if (net_ratelimit())
|
|
|
|
d_printf(1, dev, "NETTX: device exiting idle, "
|
|
|
|
"dropping skb %p, queue running %d\n",
|
|
|
|
skb, netif_queue_stopped(net_dev));
|
|
|
|
result = -EBUSY;
|
|
|
|
}
|
|
|
|
d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transmit a packet to the base station on behalf of the network stack.
|
|
|
|
*
|
|
|
|
* Returns: 0 if ok, < 0 errno code on error.
|
|
|
|
*
|
|
|
|
* We need to pull the ethernet header and add the hardware header,
|
|
|
|
* which is currently set to all zeroes and reserved.
|
|
|
|
*/
|
|
|
|
static
|
|
|
|
int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
struct device *dev = i2400m_dev(i2400m);
|
|
|
|
|
|
|
|
d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
|
|
|
|
i2400m, net_dev, skb);
|
|
|
|
/* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
|
2016-05-03 22:33:13 +08:00
|
|
|
netif_trans_update(net_dev);
|
2008-12-21 08:57:45 +08:00
|
|
|
i2400m_tx_prep_header(skb);
|
|
|
|
d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
|
|
|
|
skb, skb->len);
|
|
|
|
d_dump(4, dev, skb->data, skb->len);
|
|
|
|
result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
|
|
|
|
d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
|
|
|
|
i2400m, net_dev, skb, result);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transmit a packet to the base station on behalf of the network stack
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Returns: NETDEV_TX_OK (always, even in case of error)
|
|
|
|
*
|
|
|
|
* In case of error, we just drop it. Reasons:
|
|
|
|
*
|
|
|
|
* - we add a hw header to each skb, and if the network stack
|
|
|
|
* retries, we have no way to know if that skb has it or not.
|
|
|
|
*
|
|
|
|
* - network protocols have their own drop-recovery mechanisms
|
|
|
|
*
|
|
|
|
* - there is not much else we can do
|
|
|
|
*
|
|
|
|
* If the device is idle, we need to wake it up; that is an operation
|
|
|
|
* that will sleep. See i2400m_net_wake_tx() for details.
|
|
|
|
*/
|
|
|
|
static
|
2009-09-01 03:50:57 +08:00
|
|
|
netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *net_dev)
|
2008-12-21 08:57:45 +08:00
|
|
|
{
|
|
|
|
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
|
|
|
|
struct device *dev = i2400m_dev(i2400m);
|
2012-03-14 17:21:44 +08:00
|
|
|
int result = -1;
|
2008-12-21 08:57:45 +08:00
|
|
|
|
|
|
|
d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
|
2012-03-14 17:21:44 +08:00
|
|
|
|
2014-03-29 19:26:30 +08:00
|
|
|
if (skb_cow_head(skb, 0))
|
2012-03-14 17:21:44 +08:00
|
|
|
goto drop;
|
2009-09-30 07:28:24 +08:00
|
|
|
|
2008-12-21 08:57:45 +08:00
|
|
|
if (i2400m->state == I2400M_SS_IDLE)
|
|
|
|
result = i2400m_net_wake_tx(i2400m, net_dev, skb);
|
|
|
|
else
|
|
|
|
result = i2400m_net_tx(i2400m, net_dev, skb);
|
2012-03-14 17:21:44 +08:00
|
|
|
if (result < 0) {
|
|
|
|
drop:
|
2008-12-21 08:57:45 +08:00
|
|
|
net_dev->stats.tx_dropped++;
|
2012-03-14 17:21:44 +08:00
|
|
|
} else {
|
2008-12-21 08:57:45 +08:00
|
|
|
net_dev->stats.tx_packets++;
|
|
|
|
net_dev->stats.tx_bytes += skb->len;
|
|
|
|
}
|
2012-03-14 17:21:44 +08:00
|
|
|
dev_kfree_skb(skb);
|
2009-09-30 07:28:24 +08:00
|
|
|
d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
|
2012-03-14 17:21:44 +08:00
|
|
|
return NETDEV_TX_OK;
|
2008-12-21 08:57:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static
|
|
|
|
void i2400m_tx_timeout(struct net_device *net_dev)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We might want to kick the device
|
|
|
|
*
|
|
|
|
* There is not much we can do though, as the device requires
|
|
|
|
* that we send the data aggregated. By the time we receive
|
|
|
|
* this, there might be data pending to be sent or not...
|
|
|
|
*/
|
|
|
|
net_dev->stats.tx_errors++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a fake ethernet header
|
|
|
|
*
|
|
|
|
* For emulating an ethernet device, every received IP header has to
|
2009-03-01 07:42:52 +08:00
|
|
|
* be prefixed with an ethernet header. Fake it with the given
|
|
|
|
* protocol.
|
2008-12-21 08:57:45 +08:00
|
|
|
*/
|
|
|
|
static
|
|
|
|
void i2400m_rx_fake_eth_header(struct net_device *net_dev,
|
2009-03-01 07:42:53 +08:00
|
|
|
void *_eth_hdr, __be16 protocol)
|
2008-12-21 08:57:45 +08:00
|
|
|
{
|
2009-04-23 07:53:08 +08:00
|
|
|
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
|
2008-12-21 08:57:45 +08:00
|
|
|
struct ethhdr *eth_hdr = _eth_hdr;
|
|
|
|
|
|
|
|
memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
|
2009-04-23 07:53:08 +08:00
|
|
|
memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
|
|
|
|
sizeof(eth_hdr->h_source));
|
2009-03-01 07:42:53 +08:00
|
|
|
eth_hdr->h_proto = protocol;
|
2008-12-21 08:57:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* i2400m_net_rx - pass a network packet to the stack
|
|
|
|
*
|
|
|
|
* @i2400m: device instance
|
|
|
|
* @skb_rx: the skb where the buffer pointed to by @buf is
|
|
|
|
* @i: 1 if payload is the only one
|
|
|
|
* @buf: pointer to the buffer containing the data
|
|
|
|
* @len: buffer's length
|
|
|
|
*
|
2009-03-01 07:42:52 +08:00
|
|
|
* This is only used now for the v1.3 firmware. It will be deprecated
|
|
|
|
* in >= 2.6.31.
|
|
|
|
*
|
|
|
|
* Note that due to firmware limitations, we don't have space to add
|
|
|
|
* an ethernet header, so we need to copy each packet. Firmware
|
|
|
|
* versions >= v1.4 fix this [see i2400m_net_erx()].
|
|
|
|
*
|
2008-12-21 08:57:45 +08:00
|
|
|
* We just clone the skb and set it up so that it's skb->data pointer
|
|
|
|
* points to "buf" and it's length.
|
|
|
|
*
|
|
|
|
* Note that if the payload is the last (or the only one) in a
|
|
|
|
* multi-payload message, we don't clone the SKB but just reuse it.
|
|
|
|
*
|
|
|
|
* This function is normally run from a thread context. However, we
|
|
|
|
* still use netif_rx() instead of netif_receive_skb() as was
|
|
|
|
* recommended in the mailing list. Reason is in some stress tests
|
|
|
|
* when sending/receiving a lot of data we seem to hit a softlock in
|
|
|
|
* the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
|
|
|
|
* netif_rx() took care of the issue.
|
|
|
|
*
|
|
|
|
* This is, of course, still open to do more research on why running
|
|
|
|
* with netif_receive_skb() hits this softlock. FIXME.
|
|
|
|
*
|
|
|
|
* FIXME: currently we don't do any efforts at distinguishing if what
|
|
|
|
* we got was an IPv4 or IPv6 header, to setup the protocol field
|
|
|
|
* correctly.
|
|
|
|
*/
|
|
|
|
void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
|
|
|
|
unsigned i, const void *buf, int buf_len)
|
|
|
|
{
|
|
|
|
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
|
|
|
|
struct device *dev = i2400m_dev(i2400m);
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
|
|
|
|
i2400m, buf, buf_len);
|
|
|
|
if (i) {
|
|
|
|
skb = skb_get(skb_rx);
|
|
|
|
d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
|
|
|
|
skb_pull(skb, buf - (void *) skb->data);
|
|
|
|
skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
|
|
|
|
} else {
|
|
|
|
/* Yes, this is bad -- a lot of overhead -- see
|
|
|
|
* comments at the top of the file */
|
|
|
|
skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
|
|
|
|
if (skb == NULL) {
|
|
|
|
dev_err(dev, "NETRX: no memory to realloc skb\n");
|
|
|
|
net_dev->stats.rx_dropped++;
|
|
|
|
goto error_skb_realloc;
|
|
|
|
}
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:20 +08:00
|
|
|
skb_put_data(skb, buf, buf_len);
|
2008-12-21 08:57:45 +08:00
|
|
|
}
|
|
|
|
i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
|
2009-03-01 07:42:53 +08:00
|
|
|
skb->data - ETH_HLEN,
|
|
|
|
cpu_to_be16(ETH_P_IP));
|
2008-12-21 08:57:45 +08:00
|
|
|
skb_set_mac_header(skb, -ETH_HLEN);
|
|
|
|
skb->dev = i2400m->wimax_dev.net_dev;
|
|
|
|
skb->protocol = htons(ETH_P_IP);
|
|
|
|
net_dev->stats.rx_packets++;
|
|
|
|
net_dev->stats.rx_bytes += buf_len;
|
|
|
|
d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
|
|
|
|
buf_len);
|
|
|
|
d_dump(4, dev, buf, buf_len);
|
|
|
|
netif_rx_ni(skb); /* see notes in function header */
|
|
|
|
error_skb_realloc:
|
|
|
|
d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
|
|
|
|
i2400m, buf, buf_len);
|
|
|
|
}
|
|
|
|
|
2009-03-01 07:42:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* i2400m_net_erx - pass a network packet to the stack (extended version)
|
|
|
|
*
|
|
|
|
* @i2400m: device descriptor
|
|
|
|
* @skb: the skb where the packet is - the skb should be set to point
|
|
|
|
* at the IP packet; this function will add ethernet headers if
|
|
|
|
* needed.
|
|
|
|
* @cs: packet type
|
|
|
|
*
|
|
|
|
* This is only used now for firmware >= v1.4. Note it is quite
|
|
|
|
* similar to i2400m_net_rx() (used only for v1.3 firmware).
|
|
|
|
*
|
|
|
|
* This function is normally run from a thread context. However, we
|
|
|
|
* still use netif_rx() instead of netif_receive_skb() as was
|
|
|
|
* recommended in the mailing list. Reason is in some stress tests
|
|
|
|
* when sending/receiving a lot of data we seem to hit a softlock in
|
|
|
|
* the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
|
|
|
|
* netif_rx() took care of the issue.
|
|
|
|
*
|
|
|
|
* This is, of course, still open to do more research on why running
|
|
|
|
* with netif_receive_skb() hits this softlock. FIXME.
|
|
|
|
*/
|
|
|
|
void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
|
|
|
|
enum i2400m_cs cs)
|
|
|
|
{
|
|
|
|
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
|
|
|
|
struct device *dev = i2400m_dev(i2400m);
|
|
|
|
int protocol;
|
|
|
|
|
2009-03-12 14:24:03 +08:00
|
|
|
d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
|
2009-03-01 07:42:52 +08:00
|
|
|
i2400m, skb, skb->len, cs);
|
|
|
|
switch(cs) {
|
|
|
|
case I2400M_CS_IPV4_0:
|
|
|
|
case I2400M_CS_IPV4:
|
|
|
|
protocol = ETH_P_IP;
|
|
|
|
i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
|
2009-03-01 07:42:53 +08:00
|
|
|
skb->data - ETH_HLEN,
|
|
|
|
cpu_to_be16(ETH_P_IP));
|
2009-03-01 07:42:52 +08:00
|
|
|
skb_set_mac_header(skb, -ETH_HLEN);
|
|
|
|
skb->dev = i2400m->wimax_dev.net_dev;
|
|
|
|
skb->protocol = htons(ETH_P_IP);
|
|
|
|
net_dev->stats.rx_packets++;
|
|
|
|
net_dev->stats.rx_bytes += skb->len;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
}
|
|
|
|
d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
|
|
|
|
skb->len);
|
|
|
|
d_dump(4, dev, skb->data, skb->len);
|
|
|
|
netif_rx_ni(skb); /* see notes in function header */
|
|
|
|
error:
|
2009-03-12 14:24:03 +08:00
|
|
|
d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n",
|
2009-03-01 07:42:52 +08:00
|
|
|
i2400m, skb, skb->len, cs);
|
|
|
|
}
|
|
|
|
|
2009-01-10 00:43:49 +08:00
|
|
|
static const struct net_device_ops i2400m_netdev_ops = {
|
|
|
|
.ndo_open = i2400m_open,
|
|
|
|
.ndo_stop = i2400m_stop,
|
|
|
|
.ndo_start_xmit = i2400m_hard_start_xmit,
|
|
|
|
.ndo_tx_timeout = i2400m_tx_timeout,
|
|
|
|
};
|
|
|
|
|
2009-09-18 04:06:14 +08:00
|
|
|
static void i2400m_get_drvinfo(struct net_device *net_dev,
|
|
|
|
struct ethtool_drvinfo *info)
|
|
|
|
{
|
|
|
|
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
|
|
|
|
|
2013-01-06 08:44:26 +08:00
|
|
|
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
|
|
|
|
strlcpy(info->fw_version, i2400m->fw_name ? : "",
|
|
|
|
sizeof(info->fw_version));
|
2009-09-18 04:06:14 +08:00
|
|
|
if (net_dev->dev.parent)
|
2013-01-06 08:44:26 +08:00
|
|
|
strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
|
|
|
|
sizeof(info->bus_info));
|
2009-09-18 04:06:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ethtool_ops i2400m_ethtool_ops = {
|
|
|
|
.get_drvinfo = i2400m_get_drvinfo,
|
|
|
|
.get_link = ethtool_op_get_link,
|
|
|
|
};
|
2008-12-21 08:57:45 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
|
|
|
|
*
|
|
|
|
* Called by alloc_netdev()
|
|
|
|
*/
|
|
|
|
void i2400m_netdev_setup(struct net_device *net_dev)
|
|
|
|
{
|
|
|
|
d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
|
|
|
|
ether_setup(net_dev);
|
|
|
|
net_dev->mtu = I2400M_MAX_MTU;
|
2016-10-21 01:55:18 +08:00
|
|
|
net_dev->min_mtu = 0;
|
|
|
|
net_dev->max_mtu = I2400M_MAX_MTU;
|
2008-12-21 08:57:45 +08:00
|
|
|
net_dev->tx_queue_len = I2400M_TX_QLEN;
|
|
|
|
net_dev->features =
|
|
|
|
NETIF_F_VLAN_CHALLENGED
|
|
|
|
| NETIF_F_HIGHDMA;
|
|
|
|
net_dev->flags =
|
|
|
|
IFF_NOARP /* i2400m is apure IP device */
|
|
|
|
& (~IFF_BROADCAST /* i2400m is P2P */
|
|
|
|
& ~IFF_MULTICAST);
|
|
|
|
net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
|
2009-01-10 00:43:49 +08:00
|
|
|
net_dev->netdev_ops = &i2400m_netdev_ops;
|
2009-09-18 04:06:14 +08:00
|
|
|
net_dev->ethtool_ops = &i2400m_ethtool_ops;
|
2008-12-21 08:57:45 +08:00
|
|
|
d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
|
|
|
|
|