Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (109 commits)
  [ETHTOOL]: Fix UFO typo
  [SCTP]: Fix persistent slowdown in sctp when a gap ack consumes rx buffer.
  [SCTP]: Send only 1 window update SACK per message.
  [SCTP]: Don't do CRC32C checksum over loopback.
  [SCTP] Reset rtt_in_progress for the chunk when processing its sack.
  [SCTP]: Reject sctp packets with broadcast addresses.
  [SCTP]: Limit association max_retrans setting in setsockopt.
  [PFKEYV2]: Fix inconsistent typing in struct sadb_x_kmprivate.
  [IPV6]: Sum real space for RTAs.
  [IRDA]: Use put_unaligned() in irlmp_do_discovery().
  [BRIDGE]: Add support for NETIF_F_HW_CSUM devices
  [NET]: Add NETIF_F_GEN_CSUM and NETIF_F_ALL_CSUM
  [TG3]: Convert to non-LLTX
  [TG3]: Remove unnecessary tx_lock
  [TCP]: Add tcp_slow_start_after_idle sysctl.
  [BNX2]: Update version and reldate
  [BNX2]: Use CPU native page size
  [BNX2]: Use compressed firmware
  [BNX2]: Add firmware decompression
  [BNX2]: Allow WoL settings on new 5708 chips
  ...

Manual fixup for conflict in drivers/net/tulip/winbond-840.c
This commit is contained in:
Linus Torvalds 2006-06-19 18:55:56 -07:00
commit d0b952a983
220 changed files with 12080 additions and 5832 deletions

View File

@ -1402,6 +1402,15 @@ running once the system is up.
If enabled at boot time, /selinux/disable can be used If enabled at boot time, /selinux/disable can be used
later to disable prior to initial policy load. later to disable prior to initial policy load.
selinux_compat_net =
[SELINUX] Set initial selinux_compat_net flag value.
Format: { "0" | "1" }
0 -- use new secmark-based packet controls
1 -- use legacy packet controls
Default value is 0 (preferred).
Value can be changed at runtime via
/selinux/compat_net.
serialnumber [BUGS=IA-32] serialnumber [BUGS=IA-32]
sg_def_reserved_size= [SCSI] sg_def_reserved_size= [SCSI]

View File

@ -362,6 +362,13 @@ tcp_workaround_signed_windows - BOOLEAN
not receive a window scaling option from them. not receive a window scaling option from them.
Default: 0 Default: 0
tcp_slow_start_after_idle - BOOLEAN
If set, provide RFC2861 behavior and time out the congestion
window after an idle period. An idle period is defined at
the current RTO. If unset, the congestion window will not
be timed out after an idle period.
Default: 1
IP Variables: IP Variables:
ip_local_port_range - 2 INTEGERS ip_local_port_range - 2 INTEGERS

View File

@ -42,9 +42,9 @@ dev->get_stats:
Context: nominally process, but don't sleep inside an rwlock Context: nominally process, but don't sleep inside an rwlock
dev->hard_start_xmit: dev->hard_start_xmit:
Synchronization: dev->xmit_lock spinlock. Synchronization: netif_tx_lock spinlock.
When the driver sets NETIF_F_LLTX in dev->features this will be When the driver sets NETIF_F_LLTX in dev->features this will be
called without holding xmit_lock. In this case the driver called without holding netif_tx_lock. In this case the driver
has to lock by itself when needed. It is recommended to use a try lock has to lock by itself when needed. It is recommended to use a try lock
for this and return -1 when the spin lock fails. for this and return -1 when the spin lock fails.
The locking there should also properly protect against The locking there should also properly protect against
@ -62,12 +62,12 @@ dev->hard_start_xmit:
Only valid when NETIF_F_LLTX is set. Only valid when NETIF_F_LLTX is set.
dev->tx_timeout: dev->tx_timeout:
Synchronization: dev->xmit_lock spinlock. Synchronization: netif_tx_lock spinlock.
Context: BHs disabled Context: BHs disabled
Notes: netif_queue_stopped() is guaranteed true Notes: netif_queue_stopped() is guaranteed true
dev->set_multicast_list: dev->set_multicast_list:
Synchronization: dev->xmit_lock spinlock. Synchronization: netif_tx_lock spinlock.
Context: BHs disabled Context: BHs disabled
dev->poll: dev->poll:

View File

@ -72,4 +72,6 @@ source "drivers/edac/Kconfig"
source "drivers/rtc/Kconfig" source "drivers/rtc/Kconfig"
source "drivers/dma/Kconfig"
endmenu endmenu

View File

@ -74,3 +74,4 @@ obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/ obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/ obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/ obj-$(CONFIG_SUPERH) += sh/
obj-$(CONFIG_DMA_ENGINE) += dma/

View File

@ -116,8 +116,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
skb = skb_share_check(skb, GFP_ATOMIC); skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL) if (skb == NULL)
return 0; return 0;
if (skb_is_nonlinear(skb)) if (skb_linearize(skb))
if (skb_linearize(skb, GFP_ATOMIC) < 0)
goto exit; goto exit;
if (!is_aoe_netif(ifp)) if (!is_aoe_netif(ifp))
goto exit; goto exit;

View File

@ -127,7 +127,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
if (found) { if (found) {
cn_queue_free_callback(cbq); cn_queue_free_callback(cbq);
atomic_dec_and_test(&dev->refcnt); atomic_dec(&dev->refcnt);
} }
} }

34
drivers/dma/Kconfig Normal file
View File

@ -0,0 +1,34 @@
#
# DMA engine configuration
#
menu "DMA Engine support"
config DMA_ENGINE
bool "Support for DMA engines"
---help---
DMA engines offload copy operations from the CPU to dedicated
hardware, allowing the copies to happen asynchronously.
comment "DMA Clients"
config NET_DMA
bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET
default y
---help---
This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles.
Since this is the main user of the DMA engine, it should be enabled;
say Y here.
comment "DMA Devices"
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on DMA_ENGINE && PCI
default m
---help---
Enable support for the Intel(R) I/OAT DMA engine.
endmenu

3
drivers/dma/Makefile Normal file
View File

@ -0,0 +1,3 @@
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o

408
drivers/dma/dmaengine.c Normal file
View File

@ -0,0 +1,408 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
/*
* This code implements the DMA subsystem. It provides a HW-neutral interface
* for other kernel code to use asynchronous memory copy capabilities,
* if present, and allows different HW DMA drivers to register as providing
* this capability.
*
* Due to the fact we are accelerating what is already a relatively fast
* operation, the code goes to great lengths to avoid additional overhead,
* such as locking.
*
* LOCKING:
*
* The subsystem keeps two global lists, dma_device_list and dma_client_list.
* Both of these are protected by a mutex, dma_list_mutex.
*
* Each device has a channels list, which runs unlocked but is never modified
* once the device is registered, it's just setup by the driver.
*
* Each client has a channels list, it's only modified under the client->lock
* and in an RCU callback, so it's safe to read under rcu_read_lock().
*
* Each device has a kref, which is initialized to 1 when the device is
* registered. A kref_put is done for each class_device registered. When the
* class_device is released, the coresponding kref_put is done in the release
* method. Every time one of the device's channels is allocated to a client,
* a kref_get occurs. When the channel is freed, the coresponding kref_put
* happens. The device's release function does a completion, so
* unregister_device does a remove event, class_device_unregister, a kref_put
* for the first reference, then waits on the completion for all other
* references to finish.
*
* Each channel has an open-coded implementation of Rusty Russell's "bigref,"
* with a kref and a per_cpu local_t. A single reference is set when on an
* ADDED event, and removed with a REMOVE event. Net DMA client takes an
* extra reference per outstanding transaction. The relase function does a
* kref_put on the device. -ChrisL
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
static DEFINE_MUTEX(dma_list_mutex);
static LIST_HEAD(dma_device_list);
static LIST_HEAD(dma_client_list);
/* --- sysfs implementation --- */
static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
{
struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
unsigned long count = 0;
int i;
for_each_possible_cpu(i)
count += per_cpu_ptr(chan->local, i)->memcpy_count;
return sprintf(buf, "%lu\n", count);
}
static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
{
struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
unsigned long count = 0;
int i;
for_each_possible_cpu(i)
count += per_cpu_ptr(chan->local, i)->bytes_transferred;
return sprintf(buf, "%lu\n", count);
}
static ssize_t show_in_use(struct class_device *cd, char *buf)
{
struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
return sprintf(buf, "%d\n", (chan->client ? 1 : 0));
}
static struct class_device_attribute dma_class_attrs[] = {
__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
__ATTR(in_use, S_IRUGO, show_in_use, NULL),
__ATTR_NULL
};
static void dma_async_device_cleanup(struct kref *kref);
static void dma_class_dev_release(struct class_device *cd)
{
struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
kref_put(&chan->device->refcount, dma_async_device_cleanup);
}
static struct class dma_devclass = {
.name = "dma",
.class_dev_attrs = dma_class_attrs,
.release = dma_class_dev_release,
};
/* --- client and device registration --- */
/**
* dma_client_chan_alloc - try to allocate a channel to a client
* @client: &dma_client
*
* Called with dma_list_mutex held.
*/
static struct dma_chan *dma_client_chan_alloc(struct dma_client *client)
{
struct dma_device *device;
struct dma_chan *chan;
unsigned long flags;
int desc; /* allocated descriptor count */
/* Find a channel, any DMA engine will do */
list_for_each_entry(device, &dma_device_list, global_node) {
list_for_each_entry(chan, &device->channels, device_node) {
if (chan->client)
continue;
desc = chan->device->device_alloc_chan_resources(chan);
if (desc >= 0) {
kref_get(&device->refcount);
kref_init(&chan->refcount);
chan->slow_ref = 0;
INIT_RCU_HEAD(&chan->rcu);
chan->client = client;
spin_lock_irqsave(&client->lock, flags);
list_add_tail_rcu(&chan->client_node,
&client->channels);
spin_unlock_irqrestore(&client->lock, flags);
return chan;
}
}
}
return NULL;
}
/**
* dma_client_chan_free - release a DMA channel
* @chan: &dma_chan
*/
void dma_chan_cleanup(struct kref *kref)
{
struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
chan->device->device_free_chan_resources(chan);
chan->client = NULL;
kref_put(&chan->device->refcount, dma_async_device_cleanup);
}
static void dma_chan_free_rcu(struct rcu_head *rcu)
{
struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
int bias = 0x7FFFFFFF;
int i;
for_each_possible_cpu(i)
bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
atomic_sub(bias, &chan->refcount.refcount);
kref_put(&chan->refcount, dma_chan_cleanup);
}
static void dma_client_chan_free(struct dma_chan *chan)
{
atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
chan->slow_ref = 1;
call_rcu(&chan->rcu, dma_chan_free_rcu);
}
/**
* dma_chans_rebalance - reallocate channels to clients
*
* When the number of DMA channel in the system changes,
* channels need to be rebalanced among clients
*/
static void dma_chans_rebalance(void)
{
struct dma_client *client;
struct dma_chan *chan;
unsigned long flags;
mutex_lock(&dma_list_mutex);
list_for_each_entry(client, &dma_client_list, global_node) {
while (client->chans_desired > client->chan_count) {
chan = dma_client_chan_alloc(client);
if (!chan)
break;
client->chan_count++;
client->event_callback(client,
chan,
DMA_RESOURCE_ADDED);
}
while (client->chans_desired < client->chan_count) {
spin_lock_irqsave(&client->lock, flags);
chan = list_entry(client->channels.next,
struct dma_chan,
client_node);
list_del_rcu(&chan->client_node);
spin_unlock_irqrestore(&client->lock, flags);
client->chan_count--;
client->event_callback(client,
chan,
DMA_RESOURCE_REMOVED);
dma_client_chan_free(chan);
}
}
mutex_unlock(&dma_list_mutex);
}
/**
* dma_async_client_register - allocate and register a &dma_client
* @event_callback: callback for notification of channel addition/removal
*/
struct dma_client *dma_async_client_register(dma_event_callback event_callback)
{
struct dma_client *client;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return NULL;
INIT_LIST_HEAD(&client->channels);
spin_lock_init(&client->lock);
client->chans_desired = 0;
client->chan_count = 0;
client->event_callback = event_callback;
mutex_lock(&dma_list_mutex);
list_add_tail(&client->global_node, &dma_client_list);
mutex_unlock(&dma_list_mutex);
return client;
}
/**
* dma_async_client_unregister - unregister a client and free the &dma_client
* @client:
*
* Force frees any allocated DMA channels, frees the &dma_client memory
*/
void dma_async_client_unregister(struct dma_client *client)
{
struct dma_chan *chan;
if (!client)
return;
rcu_read_lock();
list_for_each_entry_rcu(chan, &client->channels, client_node)
dma_client_chan_free(chan);
rcu_read_unlock();
mutex_lock(&dma_list_mutex);
list_del(&client->global_node);
mutex_unlock(&dma_list_mutex);
kfree(client);
dma_chans_rebalance();
}
/**
* dma_async_client_chan_request - request DMA channels
* @client: &dma_client
* @number: count of DMA channels requested
*
* Clients call dma_async_client_chan_request() to specify how many
* DMA channels they need, 0 to free all currently allocated.
* The resulting allocations/frees are indicated to the client via the
* event callback.
*/
void dma_async_client_chan_request(struct dma_client *client,
unsigned int number)
{
client->chans_desired = number;
dma_chans_rebalance();
}
/**
* dma_async_device_register -
* @device: &dma_device
*/
int dma_async_device_register(struct dma_device *device)
{
static int id;
int chancnt = 0;
struct dma_chan* chan;
if (!device)
return -ENODEV;
init_completion(&device->done);
kref_init(&device->refcount);
device->dev_id = id++;
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
chan->local = alloc_percpu(typeof(*chan->local));
if (chan->local == NULL)
continue;
chan->chan_id = chancnt++;
chan->class_dev.class = &dma_devclass;
chan->class_dev.dev = NULL;
snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
device->dev_id, chan->chan_id);
kref_get(&device->refcount);
class_device_register(&chan->class_dev);
}
mutex_lock(&dma_list_mutex);
list_add_tail(&device->global_node, &dma_device_list);
mutex_unlock(&dma_list_mutex);
dma_chans_rebalance();
return 0;
}
/**
* dma_async_device_unregister -
* @device: &dma_device
*/
static void dma_async_device_cleanup(struct kref *kref)
{
struct dma_device *device;
device = container_of(kref, struct dma_device, refcount);
complete(&device->done);
}
void dma_async_device_unregister(struct dma_device* device)
{
struct dma_chan *chan;
unsigned long flags;
mutex_lock(&dma_list_mutex);
list_del(&device->global_node);
mutex_unlock(&dma_list_mutex);
list_for_each_entry(chan, &device->channels, device_node) {
if (chan->client) {
spin_lock_irqsave(&chan->client->lock, flags);
list_del(&chan->client_node);
chan->client->chan_count--;
spin_unlock_irqrestore(&chan->client->lock, flags);
chan->client->event_callback(chan->client,
chan,
DMA_RESOURCE_REMOVED);
dma_client_chan_free(chan);
}
class_device_unregister(&chan->class_dev);
}
dma_chans_rebalance();
kref_put(&device->refcount, dma_async_device_cleanup);
wait_for_completion(&device->done);
}
static int __init dma_bus_init(void)
{
mutex_init(&dma_list_mutex);
return class_register(&dma_devclass);
}
subsys_initcall(dma_bus_init);
EXPORT_SYMBOL(dma_async_client_register);
EXPORT_SYMBOL(dma_async_client_unregister);
EXPORT_SYMBOL(dma_async_client_chan_request);
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
EXPORT_SYMBOL(dma_async_memcpy_complete);
EXPORT_SYMBOL(dma_async_memcpy_issue_pending);
EXPORT_SYMBOL(dma_async_device_register);
EXPORT_SYMBOL(dma_async_device_unregister);
EXPORT_SYMBOL(dma_chan_cleanup);

840
drivers/dma/ioatdma.c Normal file
View File

@ -0,0 +1,840 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
/*
* This driver supports an Intel I/OAT DMA engine, which does asynchronous
* copy operations.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include "ioatdma.h"
#include "ioatdma_io.h"
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"
#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
#define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
/* internal functions */
static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void __devexit ioat_remove(struct pci_dev *pdev);
static int enumerate_dma_channels(struct ioat_device *device)
{
u8 xfercap_scale;
u32 xfercap;
int i;
struct ioat_dma_chan *ioat_chan;
device->common.chancnt = ioatdma_read8(device, IOAT_CHANCNT_OFFSET);
xfercap_scale = ioatdma_read8(device, IOAT_XFERCAP_OFFSET);
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
for (i = 0; i < device->common.chancnt; i++) {
ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan) {
device->common.chancnt = i;
break;
}
ioat_chan->device = device;
ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
ioat_chan->xfercap = xfercap;
spin_lock_init(&ioat_chan->cleanup_lock);
spin_lock_init(&ioat_chan->desc_lock);
INIT_LIST_HEAD(&ioat_chan->free_desc);
INIT_LIST_HEAD(&ioat_chan->used_desc);
/* This should be made common somewhere in dmaengine.c */
ioat_chan->common.device = &device->common;
ioat_chan->common.client = NULL;
list_add_tail(&ioat_chan->common.device_node,
&device->common.channels);
}
return device->common.chancnt;
}
static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
struct ioat_dma_chan *ioat_chan,
int flags)
{
struct ioat_dma_descriptor *desc;
struct ioat_desc_sw *desc_sw;
struct ioat_device *ioat_device;
dma_addr_t phys;
ioat_device = to_ioat_device(ioat_chan->common.device);
desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys);
if (unlikely(!desc))
return NULL;
desc_sw = kzalloc(sizeof(*desc_sw), flags);
if (unlikely(!desc_sw)) {
pci_pool_free(ioat_device->dma_pool, desc, phys);
return NULL;
}
memset(desc, 0, sizeof(*desc));
desc_sw->hw = desc;
desc_sw->phys = phys;
return desc_sw;
}
#define INITIAL_IOAT_DESC_COUNT 128
static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan);
/* returns the actual number of allocated descriptors */
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *desc = NULL;
u16 chanctrl;
u32 chanerr;
int i;
LIST_HEAD(tmp_list);
/*
* In-use bit automatically set by reading chanctrl
* If 0, we got it, if 1, someone else did
*/
chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);
if (chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE)
return -EBUSY;
/* Setup register to interrupt and write completion status on error */
chanctrl = IOAT_CHANCTRL_CHANNEL_IN_USE |
IOAT_CHANCTRL_ERR_INT_EN |
IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
IOAT_CHANCTRL_ERR_COMPLETION_EN;
ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);
chanerr = ioatdma_chan_read32(ioat_chan, IOAT_CHANERR_OFFSET);
if (chanerr) {
printk("IOAT: CHANERR = %x, clearing\n", chanerr);
ioatdma_chan_write32(ioat_chan, IOAT_CHANERR_OFFSET, chanerr);
}
/* Allocate descriptors */
for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
if (!desc) {
printk(KERN_ERR "IOAT: Only %d initial descriptors\n", i);
break;
}
list_add_tail(&desc->node, &tmp_list);
}
spin_lock_bh(&ioat_chan->desc_lock);
list_splice(&tmp_list, &ioat_chan->free_desc);
spin_unlock_bh(&ioat_chan->desc_lock);
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion_virt =
pci_pool_alloc(ioat_chan->device->completion_pool,
GFP_KERNEL,
&ioat_chan->completion_addr);
memset(ioat_chan->completion_virt, 0,
sizeof(*ioat_chan->completion_virt));
ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_LOW,
((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF);
ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_HIGH,
((u64) ioat_chan->completion_addr) >> 32);
ioat_start_null_desc(ioat_chan);
return i;
}
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
static void ioat_dma_free_chan_resources(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_device *ioat_device = to_ioat_device(chan->device);
struct ioat_desc_sw *desc, *_desc;
u16 chanctrl;
int in_use_descs = 0;
ioat_dma_memcpy_cleanup(ioat_chan);
ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET);
spin_lock_bh(&ioat_chan->desc_lock);
list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
in_use_descs++;
list_del(&desc->node);
pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);
kfree(desc);
}
list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
list_del(&desc->node);
pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);
kfree(desc);
}
spin_unlock_bh(&ioat_chan->desc_lock);
pci_pool_free(ioat_device->completion_pool,
ioat_chan->completion_virt,
ioat_chan->completion_addr);
/* one is ok since we left it on there on purpose */
if (in_use_descs > 1)
printk(KERN_ERR "IOAT: Freeing %d in use descriptors!\n",
in_use_descs - 1);
ioat_chan->last_completion = ioat_chan->completion_addr = 0;
/* Tell hw the chan is free */
chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);
chanctrl &= ~IOAT_CHANCTRL_CHANNEL_IN_USE;
ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);
}
/**
* do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction
* @chan: IOAT DMA channel handle
* @dest: DMA destination address
* @src: DMA source address
* @len: transaction length in bytes
*/
static dma_cookie_t do_ioat_dma_memcpy(struct ioat_dma_chan *ioat_chan,
dma_addr_t dest,
dma_addr_t src,
size_t len)
{
struct ioat_desc_sw *first;
struct ioat_desc_sw *prev;
struct ioat_desc_sw *new;
dma_cookie_t cookie;
LIST_HEAD(new_chain);
u32 copy;
size_t orig_len;
dma_addr_t orig_src, orig_dst;
unsigned int desc_count = 0;
unsigned int append = 0;
if (!ioat_chan || !dest || !src)
return -EFAULT;
if (!len)
return ioat_chan->common.cookie;
orig_len = len;
orig_src = src;
orig_dst = dest;
first = NULL;
prev = NULL;
spin_lock_bh(&ioat_chan->desc_lock);
while (len) {
if (!list_empty(&ioat_chan->free_desc)) {
new = to_ioat_desc(ioat_chan->free_desc.next);
list_del(&new->node);
} else {
/* try to get another desc */
new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
/* will this ever happen? */
/* TODO add upper limit on these */
BUG_ON(!new);
}
copy = min((u32) len, ioat_chan->xfercap);
new->hw->size = copy;
new->hw->ctl = 0;
new->hw->src_addr = src;
new->hw->dst_addr = dest;
new->cookie = 0;
/* chain together the physical address list for the HW */
if (!first)
first = new;
else
prev->hw->next = (u64) new->phys;
prev = new;
len -= copy;
dest += copy;
src += copy;
list_add_tail(&new->node, &new_chain);
desc_count++;
}
new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
new->hw->next = 0;
/* cookie incr and addition to used_list must be atomic */
cookie = ioat_chan->common.cookie;
cookie++;
if (cookie < 0)
cookie = 1;
ioat_chan->common.cookie = new->cookie = cookie;
pci_unmap_addr_set(new, src, orig_src);
pci_unmap_addr_set(new, dst, orig_dst);
pci_unmap_len_set(new, src_len, orig_len);
pci_unmap_len_set(new, dst_len, orig_len);
/* write address into NextDescriptor field of last desc in chain */
to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = first->phys;
list_splice_init(&new_chain, ioat_chan->used_desc.prev);
ioat_chan->pending += desc_count;
if (ioat_chan->pending >= 20) {
append = 1;
ioat_chan->pending = 0;
}
spin_unlock_bh(&ioat_chan->desc_lock);
if (append)
ioatdma_chan_write8(ioat_chan,
IOAT_CHANCMD_OFFSET,
IOAT_CHANCMD_APPEND);
return cookie;
}
/**
* ioat_dma_memcpy_buf_to_buf - wrapper that takes src & dest bufs
* @chan: IOAT DMA channel handle
* @dest: DMA destination address
* @src: DMA source address
* @len: transaction length in bytes
*/
static dma_cookie_t ioat_dma_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest,
void *src,
size_t len)
{
dma_addr_t dest_addr;
dma_addr_t src_addr;
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
dest_addr = pci_map_single(ioat_chan->device->pdev,
dest, len, PCI_DMA_FROMDEVICE);
src_addr = pci_map_single(ioat_chan->device->pdev,
src, len, PCI_DMA_TODEVICE);
return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
}
/**
* ioat_dma_memcpy_buf_to_pg - wrapper, copying from a buf to a page
* @chan: IOAT DMA channel handle
* @page: pointer to the page to copy to
* @offset: offset into that page
* @src: DMA source address
* @len: transaction length in bytes
*/
static dma_cookie_t ioat_dma_memcpy_buf_to_pg(struct dma_chan *chan,
struct page *page,
unsigned int offset,
void *src,
size_t len)
{
dma_addr_t dest_addr;
dma_addr_t src_addr;
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
dest_addr = pci_map_page(ioat_chan->device->pdev,
page, offset, len, PCI_DMA_FROMDEVICE);
src_addr = pci_map_single(ioat_chan->device->pdev,
src, len, PCI_DMA_TODEVICE);
return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
}
/**
* ioat_dma_memcpy_pg_to_pg - wrapper, copying between two pages
* @chan: IOAT DMA channel handle
* @dest_pg: pointer to the page to copy to
* @dest_off: offset into that page
* @src_pg: pointer to the page to copy from
* @src_off: offset into that page
* @len: transaction length in bytes. This is guaranteed to not make a copy
* across a page boundary.
*/
static dma_cookie_t ioat_dma_memcpy_pg_to_pg(struct dma_chan *chan,
struct page *dest_pg,
unsigned int dest_off,
struct page *src_pg,
unsigned int src_off,
size_t len)
{
dma_addr_t dest_addr;
dma_addr_t src_addr;
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
dest_addr = pci_map_page(ioat_chan->device->pdev,
dest_pg, dest_off, len, PCI_DMA_FROMDEVICE);
src_addr = pci_map_page(ioat_chan->device->pdev,
src_pg, src_off, len, PCI_DMA_TODEVICE);
return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
}
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognoized appended descriptors to hw
* @chan: DMA channel handle
*/
static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
if (ioat_chan->pending != 0) {
ioat_chan->pending = 0;
ioatdma_chan_write8(ioat_chan,
IOAT_CHANCMD_OFFSET,
IOAT_CHANCMD_APPEND);
}
}
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
{
unsigned long phys_complete;
struct ioat_desc_sw *desc, *_desc;
dma_cookie_t cookie = 0;
prefetch(chan->completion_virt);
if (!spin_trylock(&chan->cleanup_lock))
return;
/* The completion writeback can happen at any time,
so reads by the driver need to be atomic operations
The descriptor physical addresses are limited to 32-bits
when the CPU can only do a 32-bit mov */
#if (BITS_PER_LONG == 64)
phys_complete =
chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
#else
phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
#endif
if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
printk("IOAT: Channel halted, chanerr = %x\n",
ioatdma_chan_read32(chan, IOAT_CHANERR_OFFSET));
/* TODO do something to salvage the situation */
}
if (phys_complete == chan->last_completion) {
spin_unlock(&chan->cleanup_lock);
return;
}
spin_lock_bh(&chan->desc_lock);
list_for_each_entry_safe(desc, _desc, &chan->used_desc, node) {
/*
* Incoming DMA requests may use multiple descriptors, due to
* exceeding xfercap, perhaps. If so, only the last one will
* have a cookie, and require unmapping.
*/
if (desc->cookie) {
cookie = desc->cookie;
/* yes we are unmapping both _page and _single alloc'd
regions with unmap_page. Is this *really* that bad?
*/
pci_unmap_page(chan->device->pdev,
pci_unmap_addr(desc, dst),
pci_unmap_len(desc, dst_len),
PCI_DMA_FROMDEVICE);
pci_unmap_page(chan->device->pdev,
pci_unmap_addr(desc, src),
pci_unmap_len(desc, src_len),
PCI_DMA_TODEVICE);
}
if (desc->phys != phys_complete) {
/* a completed entry, but not the last, so cleanup */
list_del(&desc->node);
list_add_tail(&desc->node, &chan->free_desc);
} else {
/* last used desc. Do not remove, so we can append from
it, but don't look at it next time, either */
desc->cookie = 0;
/* TODO check status bits? */
break;
}
}
spin_unlock_bh(&chan->desc_lock);
chan->last_completion = phys_complete;
if (cookie != 0)
chan->completed_cookie = cookie;
spin_unlock(&chan->cleanup_lock);
}
/**
* ioat_dma_is_complete - poll the status of a IOAT DMA transaction
* @chan: IOAT DMA channel handle
* @cookie: DMA transaction identifier
*/
static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
dma_cookie_t cookie,
dma_cookie_t *done,
dma_cookie_t *used)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status ret;
last_used = chan->cookie;
last_complete = ioat_chan->completed_cookie;
if (done)
*done= last_complete;
if (used)
*used = last_used;
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS)
return ret;
ioat_dma_memcpy_cleanup(ioat_chan);
last_used = chan->cookie;
last_complete = ioat_chan->completed_cookie;
if (done)
*done= last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used);
}
/* PCI API */
static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
{ 0, }
};
static struct pci_driver ioat_pci_drv = {
.name = "ioatdma",
.id_table = ioat_pci_tbl,
.probe = ioat_probe,
.remove = __devexit_p(ioat_remove),
};
static irqreturn_t ioat_do_interrupt(int irq, void *data, struct pt_regs *regs)
{
struct ioat_device *instance = data;
unsigned long attnstatus;
u8 intrctrl;
intrctrl = ioatdma_read8(instance, IOAT_INTRCTRL_OFFSET);
if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
return IRQ_NONE;
if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);
return IRQ_NONE;
}
attnstatus = ioatdma_read32(instance, IOAT_ATTNSTATUS_OFFSET);
printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus);
ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);
return IRQ_HANDLED;
}
static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan)
{
struct ioat_desc_sw *desc;
spin_lock_bh(&ioat_chan->desc_lock);
if (!list_empty(&ioat_chan->free_desc)) {
desc = to_ioat_desc(ioat_chan->free_desc.next);
list_del(&desc->node);
} else {
/* try to get another desc */
spin_unlock_bh(&ioat_chan->desc_lock);
desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
spin_lock_bh(&ioat_chan->desc_lock);
/* will this ever happen? */
BUG_ON(!desc);
}
desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
desc->hw->next = 0;
list_add_tail(&desc->node, &ioat_chan->used_desc);
spin_unlock_bh(&ioat_chan->desc_lock);
#if (BITS_PER_LONG == 64)
ioatdma_chan_write64(ioat_chan, IOAT_CHAINADDR_OFFSET, desc->phys);
#else
ioatdma_chan_write32(ioat_chan,
IOAT_CHAINADDR_OFFSET_LOW,
(u32) desc->phys);
ioatdma_chan_write32(ioat_chan, IOAT_CHAINADDR_OFFSET_HIGH, 0);
#endif
ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_START);
}
/*
* Perform a IOAT transaction to verify the HW works.
*/
#define IOAT_TEST_SIZE 2000
static int ioat_self_test(struct ioat_device *device)
{
int i;
u8 *src;
u8 *dest;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
int err = 0;
src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
if (!src)
return -ENOMEM;
dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
if (!dest) {
kfree(src);
return -ENOMEM;
}
/* Fill in src buffer */
for (i = 0; i < IOAT_TEST_SIZE; i++)
src[i] = (u8)i;
/* Start copy, using first DMA channel */
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
cookie = ioat_dma_memcpy_buf_to_buf(dma_chan, dest, src, IOAT_TEST_SIZE);
ioat_dma_memcpy_issue_pending(dma_chan);
msleep(1);
if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
printk(KERN_ERR "ioatdma: Self-test copy timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
printk(KERN_ERR "ioatdma: Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
}
free_resources:
ioat_dma_free_chan_resources(dma_chan);
out:
kfree(src);
kfree(dest);
return err;
}
static int __devinit ioat_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err;
unsigned long mmio_start, mmio_len;
void *reg_base;
struct ioat_device *device;
err = pci_enable_device(pdev);
if (err)
goto err_enable_device;
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
if (err)
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err)
goto err_set_dma_mask;
err = pci_request_regions(pdev, ioat_pci_drv.name);
if (err)
goto err_request_regions;
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
reg_base = ioremap(mmio_start, mmio_len);
if (!reg_base) {
err = -ENOMEM;
goto err_ioremap;
}
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device) {
err = -ENOMEM;
goto err_kzalloc;
}
/* DMA coherent memory pool for DMA descriptor allocations */
device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
sizeof(struct ioat_dma_descriptor), 64, 0);
if (!device->dma_pool) {
err = -ENOMEM;
goto err_dma_pool;
}
device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES);
if (!device->completion_pool) {
err = -ENOMEM;
goto err_completion_pool;
}
device->pdev = pdev;
pci_set_drvdata(pdev, device);
#ifdef CONFIG_PCI_MSI
if (pci_enable_msi(pdev) == 0) {
device->msi = 1;
} else {
device->msi = 0;
}
#endif
err = request_irq(pdev->irq, &ioat_do_interrupt, SA_SHIRQ, "ioat",
device);
if (err)
goto err_irq;
device->reg_base = reg_base;
ioatdma_write8(device, IOAT_INTRCTRL_OFFSET, IOAT_INTRCTRL_MASTER_INT_EN);
pci_set_master(pdev);
INIT_LIST_HEAD(&device->common.channels);
enumerate_dma_channels(device);
device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
device->common.device_free_chan_resources = ioat_dma_free_chan_resources;
device->common.device_memcpy_buf_to_buf = ioat_dma_memcpy_buf_to_buf;
device->common.device_memcpy_buf_to_pg = ioat_dma_memcpy_buf_to_pg;
device->common.device_memcpy_pg_to_pg = ioat_dma_memcpy_pg_to_pg;
device->common.device_memcpy_complete = ioat_dma_is_complete;
device->common.device_memcpy_issue_pending = ioat_dma_memcpy_issue_pending;
printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",
device->common.chancnt);
err = ioat_self_test(device);
if (err)
goto err_self_test;
dma_async_device_register(&device->common);
return 0;
err_self_test:
err_irq:
pci_pool_destroy(device->completion_pool);
err_completion_pool:
pci_pool_destroy(device->dma_pool);
err_dma_pool:
kfree(device);
err_kzalloc:
iounmap(reg_base);
err_ioremap:
pci_release_regions(pdev);
err_request_regions:
err_set_dma_mask:
pci_disable_device(pdev);
err_enable_device:
return err;
}
static void __devexit ioat_remove(struct pci_dev *pdev)
{
struct ioat_device *device;
struct dma_chan *chan, *_chan;
struct ioat_dma_chan *ioat_chan;
device = pci_get_drvdata(pdev);
dma_async_device_unregister(&device->common);
free_irq(device->pdev->irq, device);
#ifdef CONFIG_PCI_MSI
if (device->msi)
pci_disable_msi(device->pdev);
#endif
pci_pool_destroy(device->dma_pool);
pci_pool_destroy(device->completion_pool);
iounmap(device->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) {
ioat_chan = to_ioat_chan(chan);
list_del(&chan->device_node);
kfree(ioat_chan);
}
kfree(device);
}
/* MODULE API */
MODULE_VERSION("1.7");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
static int __init ioat_init_module(void)
{
/* it's currently unsafe to unload this module */
/* if forced, worst case is that rmmod hangs */
if (THIS_MODULE != NULL)
THIS_MODULE->unsafe = 1;
return pci_module_init(&ioat_pci_drv);
}
module_init(ioat_init_module);
static void __exit ioat_exit_module(void)
{
pci_unregister_driver(&ioat_pci_drv);
}
module_exit(ioat_exit_module);

125
drivers/dma/ioatdma.h Normal file
View File

@ -0,0 +1,125 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef IOATDMA_H
#define IOATDMA_H
#include <linux/dmaengine.h>
#include "ioatdma_hw.h"
#include <linux/init.h>
#include <linux/dmapool.h>
#include <linux/cache.h>
#include <linux/pci_ids.h>
#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
extern struct list_head dma_device_list;
extern struct list_head dma_client_list;
/**
* struct ioat_device - internal representation of a IOAT device
* @pdev: PCI-Express device
* @reg_base: MMIO register space base address
* @dma_pool: for allocating DMA descriptors
* @common: embedded struct dma_device
* @msi: Message Signaled Interrupt number
*/
struct ioat_device {
struct pci_dev *pdev;
void *reg_base;
struct pci_pool *dma_pool;
struct pci_pool *completion_pool;
struct dma_device common;
u8 msi;
};
/**
* struct ioat_dma_chan - internal representation of a DMA channel
* @device:
* @reg_base:
* @sw_in_use:
* @completion:
* @completion_low:
* @completion_high:
* @completed_cookie: last cookie seen completed on cleanup
* @cookie: value of last cookie given to client
* @last_completion:
* @xfercap:
* @desc_lock:
* @free_desc:
* @used_desc:
* @resource:
* @device_node:
*/
struct ioat_dma_chan {
void *reg_base;
dma_cookie_t completed_cookie;
unsigned long last_completion;
u32 xfercap; /* XFERCAP register value expanded out */
spinlock_t cleanup_lock;
spinlock_t desc_lock;
struct list_head free_desc;
struct list_head used_desc;
int pending;
struct ioat_device *device;
struct dma_chan common;
dma_addr_t completion_addr;
union {
u64 full; /* HW completion writeback */
struct {
u32 low;
u32 high;
};
} *completion_virt;
};
/* wrapper around hardware descriptor format + additional software fields */
/**
* struct ioat_desc_sw - wrapper around hardware descriptor
* @hw: hardware DMA descriptor
* @node:
* @cookie:
* @phys:
*/
struct ioat_desc_sw {
struct ioat_dma_descriptor *hw;
struct list_head node;
dma_cookie_t cookie;
dma_addr_t phys;
DECLARE_PCI_UNMAP_ADDR(src)
DECLARE_PCI_UNMAP_LEN(src_len)
DECLARE_PCI_UNMAP_ADDR(dst)
DECLARE_PCI_UNMAP_LEN(dst_len)
};
#endif /* IOATDMA_H */

52
drivers/dma/ioatdma_hw.h Normal file
View File

@ -0,0 +1,52 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef _IOAT_HW_H_
#define _IOAT_HW_H_
/* PCI Configuration Space Values */
#define IOAT_PCI_VID 0x8086
#define IOAT_PCI_DID 0x1A38
#define IOAT_PCI_RID 0x00
#define IOAT_PCI_SVID 0x8086
#define IOAT_PCI_SID 0x8086
#define IOAT_VER 0x12 /* Version 1.2 */
struct ioat_dma_descriptor {
uint32_t size;
uint32_t ctl;
uint64_t src_addr;
uint64_t dst_addr;
uint64_t next;
uint64_t rsv1;
uint64_t rsv2;
uint64_t user1;
uint64_t user2;
};
#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001
#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002
#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004
#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008
#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010
#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020
#define IOAT_DMA_DESCRIPTOR_OPCODE 0xFF000000
#endif

118
drivers/dma/ioatdma_io.h Normal file
View File

@ -0,0 +1,118 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef IOATDMA_IO_H
#define IOATDMA_IO_H
#include <asm/io.h>
/*
* device and per-channel MMIO register read and write functions
* this is a lot of anoying inline functions, but it's typesafe
*/
static inline u8 ioatdma_read8(struct ioat_device *device,
unsigned int offset)
{
return readb(device->reg_base + offset);
}
static inline u16 ioatdma_read16(struct ioat_device *device,
unsigned int offset)
{
return readw(device->reg_base + offset);
}
static inline u32 ioatdma_read32(struct ioat_device *device,
unsigned int offset)
{
return readl(device->reg_base + offset);
}
static inline void ioatdma_write8(struct ioat_device *device,
unsigned int offset, u8 value)
{
writeb(value, device->reg_base + offset);
}
static inline void ioatdma_write16(struct ioat_device *device,
unsigned int offset, u16 value)
{
writew(value, device->reg_base + offset);
}
static inline void ioatdma_write32(struct ioat_device *device,
unsigned int offset, u32 value)
{
writel(value, device->reg_base + offset);
}
static inline u8 ioatdma_chan_read8(struct ioat_dma_chan *chan,
unsigned int offset)
{
return readb(chan->reg_base + offset);
}
static inline u16 ioatdma_chan_read16(struct ioat_dma_chan *chan,
unsigned int offset)
{
return readw(chan->reg_base + offset);
}
static inline u32 ioatdma_chan_read32(struct ioat_dma_chan *chan,
unsigned int offset)
{
return readl(chan->reg_base + offset);
}
static inline void ioatdma_chan_write8(struct ioat_dma_chan *chan,
unsigned int offset, u8 value)
{
writeb(value, chan->reg_base + offset);
}
static inline void ioatdma_chan_write16(struct ioat_dma_chan *chan,
unsigned int offset, u16 value)
{
writew(value, chan->reg_base + offset);
}
static inline void ioatdma_chan_write32(struct ioat_dma_chan *chan,
unsigned int offset, u32 value)
{
writel(value, chan->reg_base + offset);
}
#if (BITS_PER_LONG == 64)
static inline u64 ioatdma_chan_read64(struct ioat_dma_chan *chan,
unsigned int offset)
{
return readq(chan->reg_base + offset);
}
static inline void ioatdma_chan_write64(struct ioat_dma_chan *chan,
unsigned int offset, u64 value)
{
writeq(value, chan->reg_base + offset);
}
#endif
#endif /* IOATDMA_IO_H */

View File

@ -0,0 +1,126 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef _IOAT_REGISTERS_H_
#define _IOAT_REGISTERS_H_
/* MMIO Device Registers */
#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
#define IOAT_XFERCAP_OFFSET 0x01 /* 8-bit */
#define IOAT_XFERCAP_4KB 12
#define IOAT_XFERCAP_8KB 13
#define IOAT_XFERCAP_16KB 14
#define IOAT_XFERCAP_32KB 15
#define IOAT_XFERCAP_32GB 0
#define IOAT_GENCTRL_OFFSET 0x02 /* 8-bit */
#define IOAT_GENCTRL_DEBUG_EN 0x01
#define IOAT_INTRCTRL_OFFSET 0x03 /* 8-bit */
#define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */
#define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */
#define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */
#define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */
#define IOAT_VER_OFFSET 0x08 /* 8-bit */
#define IOAT_VER_MAJOR_MASK 0xF0
#define IOAT_VER_MINOR_MASK 0x0F
#define GET_IOAT_VER_MAJOR(x) ((x) & IOAT_VER_MAJOR_MASK)
#define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK)
#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */
#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalesing Supported */
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
/* DMA Channel Registers */
#define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */
#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
#define IOAT_CHANCTRL_ERR_INT_EN 0x0010
#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_DISABLE 0x0001
#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatability */
#define IOAT_DMA_COMP_V1 0x0001 /* Compatability with DMA version 1 */
#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
#define IOAT_CHANSTS_OFFSET_LOW 0x04
#define IOAT_CHANSTS_OFFSET_HIGH 0x08
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0
#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3
#define IOAT_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
#define IOAT_CHAINADDR_OFFSET_LOW 0x0C
#define IOAT_CHAINADDR_OFFSET_HIGH 0x10
#define IOAT_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */
#define IOAT_CHANCMD_RESET 0x20
#define IOAT_CHANCMD_RESUME 0x10
#define IOAT_CHANCMD_ABORT 0x08
#define IOAT_CHANCMD_SUSPEND 0x04
#define IOAT_CHANCMD_APPEND 0x02
#define IOAT_CHANCMD_START 0x01
#define IOAT_CHANCMP_OFFSET 0x18 /* 64-bit Channel Completion Address Register */
#define IOAT_CHANCMP_OFFSET_LOW 0x18
#define IOAT_CHANCMP_OFFSET_HIGH 0x1C
#define IOAT_CDAR_OFFSET 0x20 /* 64-bit Current Descriptor Address Register */
#define IOAT_CDAR_OFFSET_LOW 0x20
#define IOAT_CDAR_OFFSET_HIGH 0x24
#define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */
#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001
#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002
#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004
#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008
#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010
#define IOAT_CHANERR_CHANCMD_ERR 0x0020
#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040
#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080
#define IOAT_CHANERR_READ_DATA_ERR 0x0100
#define IOAT_CHANERR_WRITE_DATA_ERR 0x0200
#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400
#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800
#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000
#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000
#define IOAT_CHANERR_SOFT_ERR 0x4000
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
#endif /* _IOAT_REGISTERS_H_ */

301
drivers/dma/iovlock.c Normal file
View File

@ -0,0 +1,301 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
* Portions based on net/core/datagram.c and copyrighted by their authors.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
/*
* This code allows the net stack to make use of a DMA engine for
* skb to iovec copies.
*/
#include <linux/dmaengine.h>
#include <linux/pagemap.h>
#include <net/tcp.h> /* for memcpy_toiovec */
#include <asm/io.h>
#include <asm/uaccess.h>
int num_pages_spanned(struct iovec *iov)
{
return
((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
}
/*
* Pin down all the iovec pages needed for len bytes.
* Return a struct dma_pinned_list to keep track of pages pinned down.
*
* We are allocating a single chunk of memory, and then carving it up into
* 3 sections, the latter 2 whose size depends on the number of iovecs and the
* total number of pages, respectively.
*/
struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
{
struct dma_pinned_list *local_list;
struct page **pages;
int i;
int ret;
int nr_iovecs = 0;
int iovec_len_used = 0;
int iovec_pages_used = 0;
long err;
/* don't pin down non-user-based iovecs */
if (segment_eq(get_fs(), KERNEL_DS))
return NULL;
/* determine how many iovecs/pages there are, up front */
do {
iovec_len_used += iov[nr_iovecs].iov_len;
iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
nr_iovecs++;
} while (iovec_len_used < len);
/* single kmalloc for pinned list, page_list[], and the page arrays */
local_list = kmalloc(sizeof(*local_list)
+ (nr_iovecs * sizeof (struct dma_page_list))
+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
if (!local_list) {
err = -ENOMEM;
goto out;
}
/* list of pages starts right after the page list array */
pages = (struct page **) &local_list->page_list[nr_iovecs];
for (i = 0; i < nr_iovecs; i++) {
struct dma_page_list *page_list = &local_list->page_list[i];
len -= iov[i].iov_len;
if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) {
err = -EFAULT;
goto unpin;
}
page_list->nr_pages = num_pages_spanned(&iov[i]);
page_list->base_address = iov[i].iov_base;
page_list->pages = pages;
pages += page_list->nr_pages;
/* pin pages down */
down_read(&current->mm->mmap_sem);
ret = get_user_pages(
current,
current->mm,
(unsigned long) iov[i].iov_base,
page_list->nr_pages,
1, /* write */
0, /* force */
page_list->pages,
NULL);
up_read(&current->mm->mmap_sem);
if (ret != page_list->nr_pages) {
err = -ENOMEM;
goto unpin;
}
local_list->nr_iovecs = i + 1;
}
return local_list;
unpin:
dma_unpin_iovec_pages(local_list);
out:
return ERR_PTR(err);
}
void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
{
int i, j;
if (!pinned_list)
return;
for (i = 0; i < pinned_list->nr_iovecs; i++) {
struct dma_page_list *page_list = &pinned_list->page_list[i];
for (j = 0; j < page_list->nr_pages; j++) {
set_page_dirty_lock(page_list->pages[j]);
page_cache_release(page_list->pages[j]);
}
}
kfree(pinned_list);
}
static dma_cookie_t dma_memcpy_to_kernel_iovec(struct dma_chan *chan, struct
iovec *iov, unsigned char *kdata, size_t len)
{
dma_cookie_t dma_cookie = 0;
while (len > 0) {
if (iov->iov_len) {
int copy = min_t(unsigned int, iov->iov_len, len);
dma_cookie = dma_async_memcpy_buf_to_buf(
chan,
iov->iov_base,
kdata,
copy);
kdata += copy;
len -= copy;
iov->iov_len -= copy;
iov->iov_base += copy;
}
iov++;
}
return dma_cookie;
}
/*
* We have already pinned down the pages we will be using in the iovecs.
* Each entry in iov array has corresponding entry in pinned_list->page_list.
* Using array indexing to keep iov[] and page_list[] in sync.
* Initial elements in iov array's iov->iov_len will be 0 if already copied into
* by another call.
* iov array length remaining guaranteed to be bigger than len.
*/
dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len)
{
int iov_byte_offset;
int copy;
dma_cookie_t dma_cookie = 0;
int iovec_idx;
int page_idx;
if (!chan)
return memcpy_toiovec(iov, kdata, len);
/* -> kernel copies (e.g. smbfs) */
if (!pinned_list)
return dma_memcpy_to_kernel_iovec(chan, iov, kdata, len);
iovec_idx = 0;
while (iovec_idx < pinned_list->nr_iovecs) {
struct dma_page_list *page_list;
/* skip already used-up iovecs */
while (!iov[iovec_idx].iov_len)
iovec_idx++;
page_list = &pinned_list->page_list[iovec_idx];
iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
- ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
/* break up copies to not cross page boundary */
while (iov[iovec_idx].iov_len) {
copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
copy = min_t(int, copy, iov[iovec_idx].iov_len);
dma_cookie = dma_async_memcpy_buf_to_pg(chan,
page_list->pages[page_idx],
iov_byte_offset,
kdata,
copy);
len -= copy;
iov[iovec_idx].iov_len -= copy;
iov[iovec_idx].iov_base += copy;
if (!len)
return dma_cookie;
kdata += copy;
iov_byte_offset = 0;
page_idx++;
}
iovec_idx++;
}
/* really bad if we ever run out of iovecs */
BUG();
return -EFAULT;
}
dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, struct page *page,
unsigned int offset, size_t len)
{
int iov_byte_offset;
int copy;
dma_cookie_t dma_cookie = 0;
int iovec_idx;
int page_idx;
int err;
/* this needs as-yet-unimplemented buf-to-buff, so punt. */
/* TODO: use dma for this */
if (!chan || !pinned_list) {
u8 *vaddr = kmap(page);
err = memcpy_toiovec(iov, vaddr + offset, len);
kunmap(page);
return err;
}
iovec_idx = 0;
while (iovec_idx < pinned_list->nr_iovecs) {
struct dma_page_list *page_list;
/* skip already used-up iovecs */
while (!iov[iovec_idx].iov_len)
iovec_idx++;
page_list = &pinned_list->page_list[iovec_idx];
iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
- ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
/* break up copies to not cross page boundary */
while (iov[iovec_idx].iov_len) {
copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
copy = min_t(int, copy, iov[iovec_idx].iov_len);
dma_cookie = dma_async_memcpy_pg_to_pg(chan,
page_list->pages[page_idx],
iov_byte_offset,
page,
offset,
copy);
len -= copy;
iov[iovec_idx].iov_len -= copy;
iov[iovec_idx].iov_base += copy;
if (!len)
return dma_cookie;
offset += copy;
iov_byte_offset = 0;
page_idx++;
}
iovec_idx++;
}
/* really bad if we ever run out of iovecs */
BUG();
return -EFAULT;
}

View File

@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
ipoib_mcast_stop_thread(dev, 0); ipoib_mcast_stop_thread(dev, 0);
spin_lock_irqsave(&dev->xmit_lock, flags); local_irq_save(flags);
netif_tx_lock(dev);
spin_lock(&priv->lock); spin_lock(&priv->lock);
/* /*
@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
} }
spin_unlock(&priv->lock); spin_unlock(&priv->lock);
spin_unlock_irqrestore(&dev->xmit_lock, flags); netif_tx_unlock(dev);
local_irq_restore(flags);
/* We have to cancel outside of the spinlock */ /* We have to cancel outside of the spinlock */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {

View File

@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void *data)
dvb_net_feed_stop(dev); dvb_net_feed_stop(dev);
priv->rx_mode = RX_MODE_UNI; priv->rx_mode = RX_MODE_UNI;
spin_lock_bh(&dev->xmit_lock); netif_tx_lock_bh(dev);
if (dev->flags & IFF_PROMISC) { if (dev->flags & IFF_PROMISC) {
dprintk("%s: promiscuous mode\n", dev->name); dprintk("%s: promiscuous mode\n", dev->name);
@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void *data)
} }
} }
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
dvb_net_feed_start(dev); dvb_net_feed_start(dev);
} }

View File

@ -2180,6 +2180,8 @@ config TIGON3
config BNX2 config BNX2
tristate "Broadcom NetXtremeII support" tristate "Broadcom NetXtremeII support"
depends on PCI depends on PCI
select CRC32
select ZLIB_INFLATE
help help
This driver supports Broadcom NetXtremeII gigabit Ethernet cards. This driver supports Broadcom NetXtremeII gigabit Ethernet cards.

View File

@ -32,6 +32,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/page.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/mii.h> #include <linux/mii.h>
@ -49,14 +50,15 @@
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/zlib.h>
#include "bnx2.h" #include "bnx2.h"
#include "bnx2_fw.h" #include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2" #define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.4.40" #define DRV_MODULE_VERSION "1.4.42"
#define DRV_MODULE_RELDATE "May 22, 2006" #define DRV_MODULE_RELDATE "June 12, 2006"
#define RUN_AT(x) (jiffies + (x)) #define RUN_AT(x) (jiffies + (x))
@ -1820,7 +1822,7 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev); skb->protocol = eth_type_trans(skb, bp->dev);
if ((len > (bp->dev->mtu + ETH_HLEN)) && if ((len > (bp->dev->mtu + ETH_HLEN)) &&
(htons(skb->protocol) != 0x8100)) { (ntohs(skb->protocol) != 0x8100)) {
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
goto next_rx; goto next_rx;
@ -2009,7 +2011,7 @@ bnx2_poll(struct net_device *dev, int *budget)
return 1; return 1;
} }
/* Called with rtnl_lock from vlan functions and also dev->xmit_lock /* Called with rtnl_lock from vlan functions and also netif_tx_lock
* from set_multicast. * from set_multicast.
*/ */
static void static void
@ -2083,6 +2085,92 @@ bnx2_set_rx_mode(struct net_device *dev)
spin_unlock_bh(&bp->phy_lock); spin_unlock_bh(&bp->phy_lock);
} }
#define FW_BUF_SIZE 0x8000
static int
bnx2_gunzip_init(struct bnx2 *bp)
{
if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
goto gunzip_nomem1;
if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
goto gunzip_nomem2;
bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
if (bp->strm->workspace == NULL)
goto gunzip_nomem3;
return 0;
gunzip_nomem3:
kfree(bp->strm);
bp->strm = NULL;
gunzip_nomem2:
vfree(bp->gunzip_buf);
bp->gunzip_buf = NULL;
gunzip_nomem1:
printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
"uncompression.\n", bp->dev->name);
return -ENOMEM;
}
static void
bnx2_gunzip_end(struct bnx2 *bp)
{
kfree(bp->strm->workspace);
kfree(bp->strm);
bp->strm = NULL;
if (bp->gunzip_buf) {
vfree(bp->gunzip_buf);
bp->gunzip_buf = NULL;
}
}
static int
bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
{
int n, rc;
/* check gzip header */
if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
return -EINVAL;
n = 10;
#define FNAME 0x8
if (zbuf[3] & FNAME)
while ((zbuf[n++] != 0) && (n < len));
bp->strm->next_in = zbuf + n;
bp->strm->avail_in = len - n;
bp->strm->next_out = bp->gunzip_buf;
bp->strm->avail_out = FW_BUF_SIZE;
rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
if (rc != Z_OK)
return rc;
rc = zlib_inflate(bp->strm, Z_FINISH);
*outlen = FW_BUF_SIZE - bp->strm->avail_out;
*outbuf = bp->gunzip_buf;
if ((rc != Z_OK) && (rc != Z_STREAM_END))
printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
bp->dev->name, bp->strm->msg);
zlib_inflateEnd(bp->strm);
if (rc == Z_STREAM_END)
return 0;
return rc;
}
static void static void
load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len, load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
u32 rv2p_proc) u32 rv2p_proc)
@ -2092,9 +2180,9 @@ load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
for (i = 0; i < rv2p_code_len; i += 8) { for (i = 0; i < rv2p_code_len; i += 8) {
REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code); REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
rv2p_code++; rv2p_code++;
REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code); REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
rv2p_code++; rv2p_code++;
if (rv2p_proc == RV2P_PROC1) { if (rv2p_proc == RV2P_PROC1) {
@ -2134,7 +2222,7 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
int j; int j;
for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
REG_WR_IND(bp, offset, fw->text[j]); REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
} }
} }
@ -2190,15 +2278,32 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
REG_WR_IND(bp, cpu_reg->mode, val); REG_WR_IND(bp, cpu_reg->mode, val);
} }
static void static int
bnx2_init_cpus(struct bnx2 *bp) bnx2_init_cpus(struct bnx2 *bp)
{ {
struct cpu_reg cpu_reg; struct cpu_reg cpu_reg;
struct fw_info fw; struct fw_info fw;
int rc = 0;
void *text;
u32 text_len;
if ((rc = bnx2_gunzip_init(bp)) != 0)
return rc;
/* Initialize the RV2P processor. */ /* Initialize the RV2P processor. */
load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1); rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2); &text_len);
if (rc)
goto init_cpu_err;
load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
&text_len);
if (rc)
goto init_cpu_err;
load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
/* Initialize the RX Processor. */ /* Initialize the RX Processor. */
cpu_reg.mode = BNX2_RXP_CPU_MODE; cpu_reg.mode = BNX2_RXP_CPU_MODE;
@ -2222,7 +2327,13 @@ bnx2_init_cpus(struct bnx2 *bp)
fw.text_addr = bnx2_RXP_b06FwTextAddr; fw.text_addr = bnx2_RXP_b06FwTextAddr;
fw.text_len = bnx2_RXP_b06FwTextLen; fw.text_len = bnx2_RXP_b06FwTextLen;
fw.text_index = 0; fw.text_index = 0;
fw.text = bnx2_RXP_b06FwText;
rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
&text, &text_len);
if (rc)
goto init_cpu_err;
fw.text = text;
fw.data_addr = bnx2_RXP_b06FwDataAddr; fw.data_addr = bnx2_RXP_b06FwDataAddr;
fw.data_len = bnx2_RXP_b06FwDataLen; fw.data_len = bnx2_RXP_b06FwDataLen;
@ -2268,7 +2379,13 @@ bnx2_init_cpus(struct bnx2 *bp)
fw.text_addr = bnx2_TXP_b06FwTextAddr; fw.text_addr = bnx2_TXP_b06FwTextAddr;
fw.text_len = bnx2_TXP_b06FwTextLen; fw.text_len = bnx2_TXP_b06FwTextLen;
fw.text_index = 0; fw.text_index = 0;
fw.text = bnx2_TXP_b06FwText;
rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
&text, &text_len);
if (rc)
goto init_cpu_err;
fw.text = text;
fw.data_addr = bnx2_TXP_b06FwDataAddr; fw.data_addr = bnx2_TXP_b06FwDataAddr;
fw.data_len = bnx2_TXP_b06FwDataLen; fw.data_len = bnx2_TXP_b06FwDataLen;
@ -2314,7 +2431,13 @@ bnx2_init_cpus(struct bnx2 *bp)
fw.text_addr = bnx2_TPAT_b06FwTextAddr; fw.text_addr = bnx2_TPAT_b06FwTextAddr;
fw.text_len = bnx2_TPAT_b06FwTextLen; fw.text_len = bnx2_TPAT_b06FwTextLen;
fw.text_index = 0; fw.text_index = 0;
fw.text = bnx2_TPAT_b06FwText;
rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
&text, &text_len);
if (rc)
goto init_cpu_err;
fw.text = text;
fw.data_addr = bnx2_TPAT_b06FwDataAddr; fw.data_addr = bnx2_TPAT_b06FwDataAddr;
fw.data_len = bnx2_TPAT_b06FwDataLen; fw.data_len = bnx2_TPAT_b06FwDataLen;
@ -2360,7 +2483,13 @@ bnx2_init_cpus(struct bnx2 *bp)
fw.text_addr = bnx2_COM_b06FwTextAddr; fw.text_addr = bnx2_COM_b06FwTextAddr;
fw.text_len = bnx2_COM_b06FwTextLen; fw.text_len = bnx2_COM_b06FwTextLen;
fw.text_index = 0; fw.text_index = 0;
fw.text = bnx2_COM_b06FwText;
rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
&text, &text_len);
if (rc)
goto init_cpu_err;
fw.text = text;
fw.data_addr = bnx2_COM_b06FwDataAddr; fw.data_addr = bnx2_COM_b06FwDataAddr;
fw.data_len = bnx2_COM_b06FwDataLen; fw.data_len = bnx2_COM_b06FwDataLen;
@ -2384,6 +2513,9 @@ bnx2_init_cpus(struct bnx2 *bp)
load_cpu_fw(bp, &cpu_reg, &fw); load_cpu_fw(bp, &cpu_reg, &fw);
init_cpu_err:
bnx2_gunzip_end(bp);
return rc;
} }
static int static int
@ -3256,7 +3388,9 @@ bnx2_init_chip(struct bnx2 *bp)
* context block must have already been enabled. */ * context block must have already been enabled. */
bnx2_init_context(bp); bnx2_init_context(bp);
bnx2_init_cpus(bp); if ((rc = bnx2_init_cpus(bp)) != 0)
return rc;
bnx2_init_nvram(bp); bnx2_init_nvram(bp);
bnx2_set_mac_addr(bp); bnx2_set_mac_addr(bp);
@ -3556,7 +3690,9 @@ bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
if (rc) if (rc)
return rc; return rc;
bnx2_init_chip(bp); if ((rc = bnx2_init_chip(bp)) != 0)
return rc;
bnx2_init_tx_ring(bp); bnx2_init_tx_ring(bp);
bnx2_init_rx_ring(bp); bnx2_init_rx_ring(bp);
return 0; return 0;
@ -4034,6 +4170,8 @@ bnx2_timer(unsigned long data)
msg = (u32) ++bp->fw_drv_pulse_wr_seq; msg = (u32) ++bp->fw_drv_pulse_wr_seq;
REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg); REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
if ((bp->phy_flags & PHY_SERDES_FLAG) && if ((bp->phy_flags & PHY_SERDES_FLAG) &&
(CHIP_NUM(bp) == CHIP_NUM_5706)) { (CHIP_NUM(bp) == CHIP_NUM_5706)) {
@ -4252,7 +4390,7 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
} }
#endif #endif
/* Called with dev->xmit_lock. /* Called with netif_tx_lock.
* hard_start_xmit is pseudo-lockless - a lock is only required when * hard_start_xmit is pseudo-lockless - a lock is only required when
* the tx queue is full. This way, we get the benefit of lockless * the tx queue is full. This way, we get the benefit of lockless
* operations most of the time without the complexities to handle * operations most of the time without the complexities to handle
@ -4310,7 +4448,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr); ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
skb->nh.iph->check = 0; skb->nh.iph->check = 0;
skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len); skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
skb->h.th->check = skb->h.th->check =
~csum_tcpudp_magic(skb->nh.iph->saddr, ~csum_tcpudp_magic(skb->nh.iph->saddr,
skb->nh.iph->daddr, skb->nh.iph->daddr,
@ -4504,6 +4642,10 @@ bnx2_get_stats(struct net_device *dev)
net_stats->tx_aborted_errors + net_stats->tx_aborted_errors +
net_stats->tx_carrier_errors; net_stats->tx_carrier_errors;
net_stats->rx_missed_errors =
(unsigned long) (stats_blk->stat_IfInMBUFDiscards +
stats_blk->stat_FwRxDrop);
return net_stats; return net_stats;
} }
@ -4986,7 +5128,7 @@ bnx2_set_rx_csum(struct net_device *dev, u32 data)
return 0; return 0;
} }
#define BNX2_NUM_STATS 45 #define BNX2_NUM_STATS 46
static struct { static struct {
char string[ETH_GSTRING_LEN]; char string[ETH_GSTRING_LEN];
@ -5036,6 +5178,7 @@ static struct {
{ "rx_mac_ctrl_frames" }, { "rx_mac_ctrl_frames" },
{ "rx_filtered_packets" }, { "rx_filtered_packets" },
{ "rx_discards" }, { "rx_discards" },
{ "rx_fw_discards" },
}; };
#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
@ -5086,6 +5229,7 @@ static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
STATS_OFFSET32(stat_MacControlFramesReceived), STATS_OFFSET32(stat_MacControlFramesReceived),
STATS_OFFSET32(stat_IfInFramesL2FilterDiscards), STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
STATS_OFFSET32(stat_IfInMBUFDiscards), STATS_OFFSET32(stat_IfInMBUFDiscards),
STATS_OFFSET32(stat_FwRxDrop),
}; };
/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
@ -5096,7 +5240,7 @@ static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4,0,4,4,4,4,4,4,4,4, 4,0,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4, 4,4,4,4,4,4,
}; };
static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = { static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
@ -5104,7 +5248,7 @@ static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
4,4,4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4, 4,4,4,4,4,4,
}; };
#define BNX2_NUM_TESTS 6 #define BNX2_NUM_TESTS 6
@ -5634,7 +5778,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
} }
} }
if (CHIP_NUM(bp) == CHIP_NUM_5708) if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
(CHIP_ID(bp) == CHIP_ID_5708_B0) ||
(CHIP_ID(bp) == CHIP_ID_5708_B1))
bp->flags |= NO_WOL_FLAG; bp->flags |= NO_WOL_FLAG;
if (CHIP_ID(bp) == CHIP_ID_5706_A0) { if (CHIP_ID(bp) == CHIP_ID_5706_A0) {

View File

@ -231,6 +231,7 @@ struct statistics_block {
u32 stat_GenStat13; u32 stat_GenStat13;
u32 stat_GenStat14; u32 stat_GenStat14;
u32 stat_GenStat15; u32 stat_GenStat15;
u32 stat_FwRxDrop;
}; };
@ -3481,6 +3482,8 @@ struct l2_fhdr {
#define BNX2_COM_SCRATCH 0x00120000 #define BNX2_COM_SCRATCH 0x00120000
#define BNX2_FW_RX_DROP_COUNT 0x00120084
/* /*
* cp_reg definition * cp_reg definition
@ -3747,7 +3750,12 @@ struct l2_fhdr {
#define DMA_READ_CHANS 5 #define DMA_READ_CHANS 5
#define DMA_WRITE_CHANS 3 #define DMA_WRITE_CHANS 3
#define BCM_PAGE_BITS 12 /* Use CPU native page size up to 16K for the ring sizes. */
#if (PAGE_SHIFT > 14)
#define BCM_PAGE_BITS 14
#else
#define BCM_PAGE_BITS PAGE_SHIFT
#endif
#define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS) #define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS)
#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd))
@ -3770,7 +3778,7 @@ struct l2_fhdr {
#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx) #define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> 8) #define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> (BCM_PAGE_BITS - 4))
#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT) #define RX_IDX(x) ((x) & MAX_RX_DESC_CNT)
/* Context size. */ /* Context size. */
@ -4048,6 +4056,9 @@ struct bnx2 {
u32 flash_size; u32 flash_size;
int status_stats_size; int status_stats_size;
struct z_stream_s *strm;
void *gunzip_buf;
}; };
static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset); static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset);

File diff suppressed because it is too large Load Diff

View File

@ -1199,8 +1199,7 @@ int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev)
} }
#define BOND_INTERSECT_FEATURES \ #define BOND_INTERSECT_FEATURES \
(NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
NETIF_F_TSO|NETIF_F_UFO)
/* /*
* Compute the common dev->feature set available to all slaves. Some * Compute the common dev->feature set available to all slaves. Some
@ -1218,9 +1217,7 @@ static int bond_compute_features(struct bonding *bond)
features &= (slave->dev->features & BOND_INTERSECT_FEATURES); features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
if ((features & NETIF_F_SG) && if ((features & NETIF_F_SG) &&
!(features & (NETIF_F_IP_CSUM | !(features & NETIF_F_ALL_CSUM))
NETIF_F_NO_CSUM |
NETIF_F_HW_CSUM)))
features &= ~NETIF_F_SG; features &= ~NETIF_F_SG;
/* /*
@ -4191,7 +4188,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
*/ */
bond_dev->features |= NETIF_F_VLAN_CHALLENGED; bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
/* don't acquire bond device's xmit_lock when /* don't acquire bond device's netif_tx_lock when
* transmitting */ * transmitting */
bond_dev->features |= NETIF_F_LLTX; bond_dev->features |= NETIF_F_LLTX;

View File

@ -669,9 +669,9 @@ static const struct register_test nv_registers_test[] = {
* critical parts: * critical parts:
* - rx is (pseudo-) lockless: it relies on the single-threading provided * - rx is (pseudo-) lockless: it relies on the single-threading provided
* by the arch code for interrupts. * by the arch code for interrupts.
* - tx setup is lockless: it relies on dev->xmit_lock. Actual submission * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
* needs dev->priv->lock :-( * needs dev->priv->lock :-(
* - set_multicast_list: preparation lockless, relies on dev->xmit_lock. * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
*/ */
/* in dev: base, irq */ /* in dev: base, irq */
@ -1405,7 +1405,7 @@ static void drain_ring(struct net_device *dev)
/* /*
* nv_start_xmit: dev->hard_start_xmit function * nv_start_xmit: dev->hard_start_xmit function
* Called with dev->xmit_lock held. * Called with netif_tx_lock held.
*/ */
static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
@ -1599,7 +1599,7 @@ static void nv_tx_done(struct net_device *dev)
/* /*
* nv_tx_timeout: dev->tx_timeout function * nv_tx_timeout: dev->tx_timeout function
* Called with dev->xmit_lock held. * Called with netif_tx_lock held.
*/ */
static void nv_tx_timeout(struct net_device *dev) static void nv_tx_timeout(struct net_device *dev)
{ {
@ -1930,7 +1930,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* Changing the MTU is a rare event, it shouldn't matter. * Changing the MTU is a rare event, it shouldn't matter.
*/ */
nv_disable_irq(dev); nv_disable_irq(dev);
spin_lock_bh(&dev->xmit_lock); netif_tx_lock_bh(dev);
spin_lock(&np->lock); spin_lock(&np->lock);
/* stop engines */ /* stop engines */
nv_stop_rx(dev); nv_stop_rx(dev);
@ -1958,7 +1958,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_start_rx(dev); nv_start_rx(dev);
nv_start_tx(dev); nv_start_tx(dev);
spin_unlock(&np->lock); spin_unlock(&np->lock);
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
nv_enable_irq(dev); nv_enable_irq(dev);
} }
return 0; return 0;
@ -1993,7 +1993,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
if (netif_running(dev)) { if (netif_running(dev)) {
spin_lock_bh(&dev->xmit_lock); netif_tx_lock_bh(dev);
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
/* stop rx engine */ /* stop rx engine */
@ -2005,7 +2005,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
/* restart rx engine */ /* restart rx engine */
nv_start_rx(dev); nv_start_rx(dev);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
} else { } else {
nv_copy_mac_to_hw(dev); nv_copy_mac_to_hw(dev);
} }
@ -2014,7 +2014,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
/* /*
* nv_set_multicast: dev->set_multicast function * nv_set_multicast: dev->set_multicast function
* Called with dev->xmit_lock held. * Called with netif_tx_lock held.
*/ */
static void nv_set_multicast(struct net_device *dev) static void nv_set_multicast(struct net_device *dev)
{ {

View File

@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
{ {
struct sockaddr_ax25 *sa = addr; struct sockaddr_ax25 *sa = addr;
spin_lock_irq(&dev->xmit_lock); netif_tx_lock_bh(dev);
memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
spin_unlock_irq(&dev->xmit_lock); netif_tx_unlock_bh(dev);
return 0; return 0;
} }
@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
break; break;
} }
spin_lock_irq(&dev->xmit_lock); netif_tx_lock_bh(dev);
memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN); memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
spin_unlock_irq(&dev->xmit_lock); netif_tx_unlock_bh(dev);
err = 0; err = 0;
break; break;

View File

@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net_device *dev, void *addr)
{ {
struct sockaddr_ax25 *sa = addr; struct sockaddr_ax25 *sa = addr;
spin_lock_irq(&dev->xmit_lock); netif_tx_lock_bh(dev);
memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
spin_unlock_irq(&dev->xmit_lock); netif_tx_unlock_bh(dev);
return 0; return 0;
} }
@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
break; break;
} }
spin_lock_irq(&dev->xmit_lock); netif_tx_lock_bh(dev);
memcpy(dev->dev_addr, addr, AX25_ADDR_LEN); memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
spin_unlock_irq(&dev->xmit_lock); netif_tx_unlock_bh(dev);
err = 0; err = 0;
break; break;

View File

@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev)
dp->st_task_enter++; dp->st_task_enter++;
if ((skb = skb_peek(&dp->tq)) == NULL) { if ((skb = skb_peek(&dp->tq)) == NULL) {
dp->st_txq_refl_try++; dp->st_txq_refl_try++;
if (spin_trylock(&_dev->xmit_lock)) { if (netif_tx_trylock(_dev)) {
dp->st_rxq_enter++; dp->st_rxq_enter++;
while ((skb = skb_dequeue(&dp->rq)) != NULL) { while ((skb = skb_dequeue(&dp->rq)) != NULL) {
skb_queue_tail(&dp->tq, skb); skb_queue_tail(&dp->tq, skb);
dp->st_rx2tx_tran++; dp->st_rx2tx_tran++;
} }
spin_unlock(&_dev->xmit_lock); netif_tx_unlock(_dev);
} else { } else {
/* reschedule */ /* reschedule */
dp->st_rxq_notenter++; dp->st_rxq_notenter++;
@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev)
} }
} }
if (spin_trylock(&_dev->xmit_lock)) { if (netif_tx_trylock(_dev)) {
dp->st_rxq_check++; dp->st_rxq_check++;
if ((skb = skb_peek(&dp->rq)) == NULL) { if ((skb = skb_peek(&dp->rq)) == NULL) {
dp->tasklet_pending = 0; dp->tasklet_pending = 0;
@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev)
netif_wake_queue(_dev); netif_wake_queue(_dev);
} else { } else {
dp->st_rxq_rsch++; dp->st_rxq_rsch++;
spin_unlock(&_dev->xmit_lock); netif_tx_unlock(_dev);
goto resched; goto resched;
} }
spin_unlock(&_dev->xmit_lock); netif_tx_unlock(_dev);
} else { } else {
resched: resched:
dp->tasklet_pending = 1; dp->tasklet_pending = 1;

View File

@ -417,5 +417,20 @@ config PXA_FICP
available capabilities may vary from one PXA2xx target to available capabilities may vary from one PXA2xx target to
another. another.
config MCS_FIR
tristate "MosChip MCS7780 IrDA-USB dongle"
depends on IRDA && USB && EXPERIMENTAL
help
Say Y or M here if you want to build support for the MosChip
MCS7780 IrDA-USB bridge device driver.
USB bridge based on the MosChip MCS7780 don't conform to the
IrDA-USB device class specification, and therefore need their
own specific driver. Those dongles support SIR and FIR (4Mbps)
speeds.
To compile it as a module, choose M here: the module will be called
mcs7780.
endmenu endmenu

View File

@ -19,6 +19,7 @@ obj-$(CONFIG_ALI_FIR) += ali-ircc.o
obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
obj-$(CONFIG_VIA_FIR) += via-ircc.o obj-$(CONFIG_VIA_FIR) += via-ircc.o
obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
obj-$(CONFIG_MCS_FIR) += mcs7780.o
# Old dongle drivers for old SIR drivers # Old dongle drivers for old SIR drivers
obj-$(CONFIG_ESI_DONGLE_OLD) += esi.o obj-$(CONFIG_ESI_DONGLE_OLD) += esi.o
obj-$(CONFIG_TEKRAM_DONGLE_OLD) += tekram.o obj-$(CONFIG_TEKRAM_DONGLE_OLD) += tekram.o

View File

@ -34,14 +34,12 @@
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/serial_reg.h> #include <linux/serial_reg.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <linux/pm.h>
#include <linux/pm_legacy.h>
#include <net/irda/wrapper.h> #include <net/irda/wrapper.h>
#include <net/irda/irda.h> #include <net/irda/irda.h>
#include <net/irda/irda_device.h> #include <net/irda/irda_device.h>
@ -51,7 +49,19 @@
#define CHIP_IO_EXTENT 8 #define CHIP_IO_EXTENT 8
#define BROKEN_DONGLE_ID #define BROKEN_DONGLE_ID
static char *driver_name = "ali-ircc"; #define ALI_IRCC_DRIVER_NAME "ali-ircc"
/* Power Management */
static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state);
static int ali_ircc_resume(struct platform_device *dev);
static struct platform_driver ali_ircc_driver = {
.suspend = ali_ircc_suspend,
.resume = ali_ircc_resume,
.driver = {
.name = ALI_IRCC_DRIVER_NAME,
},
};
/* Module parameters */ /* Module parameters */
static int qos_mtt_bits = 0x07; /* 1 ms or more */ static int qos_mtt_bits = 0x07; /* 1 ms or more */
@ -97,10 +107,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self);
static int ali_ircc_net_open(struct net_device *dev); static int ali_ircc_net_open(struct net_device *dev);
static int ali_ircc_net_close(struct net_device *dev); static int ali_ircc_net_close(struct net_device *dev);
static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int ali_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud); static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
static void ali_ircc_suspend(struct ali_ircc_cb *self);
static void ali_ircc_wakeup(struct ali_ircc_cb *self);
static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev); static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev);
/* SIR function */ /* SIR function */
@ -146,6 +153,14 @@ static int __init ali_ircc_init(void)
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
ret = platform_driver_register(&ali_ircc_driver);
if (ret) {
IRDA_ERROR("%s, Can't register driver!\n",
ALI_IRCC_DRIVER_NAME);
return ret;
}
/* Probe for all the ALi chipsets we know about */ /* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++) for (chip= chips; chip->name; chip++, i++)
{ {
@ -214,6 +229,10 @@ static int __init ali_ircc_init(void)
} }
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
if (ret)
platform_driver_unregister(&ali_ircc_driver);
return ret; return ret;
} }
@ -229,13 +248,13 @@ static void __exit ali_ircc_cleanup(void)
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
pm_unregister_all(ali_ircc_pmproc);
for (i=0; i < 4; i++) { for (i=0; i < 4; i++) {
if (dev_self[i]) if (dev_self[i])
ali_ircc_close(dev_self[i]); ali_ircc_close(dev_self[i]);
} }
platform_driver_unregister(&ali_ircc_driver);
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
} }
@ -249,7 +268,6 @@ static int ali_ircc_open(int i, chipio_t *info)
{ {
struct net_device *dev; struct net_device *dev;
struct ali_ircc_cb *self; struct ali_ircc_cb *self;
struct pm_dev *pmdev;
int dongle_id; int dongle_id;
int err; int err;
@ -284,7 +302,8 @@ static int ali_ircc_open(int i, chipio_t *info)
self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */ self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
/* Reserve the ioports that we need */ /* Reserve the ioports that we need */
if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) { if (!request_region(self->io.fir_base, self->io.fir_ext,
ALI_IRCC_DRIVER_NAME)) {
IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __FUNCTION__, IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __FUNCTION__,
self->io.fir_base); self->io.fir_base);
err = -ENODEV; err = -ENODEV;
@ -354,14 +373,11 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Check dongle id */ /* Check dongle id */
dongle_id = ali_ircc_read_dongle_id(i, info); dongle_id = ali_ircc_read_dongle_id(i, info);
IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __FUNCTION__, driver_name, dongle_types[dongle_id]); IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __FUNCTION__,
ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]);
self->io.dongle_id = dongle_id; self->io.dongle_id = dongle_id;
pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, ali_ircc_pmproc);
if (pmdev)
pmdev->data = self;
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
return 0; return 0;
@ -548,12 +564,11 @@ static int ali_ircc_setup(chipio_t *info)
/* Should be 0x00 in the M1535/M1535D */ /* Should be 0x00 in the M1535/M1535D */
if(version != 0x00) if(version != 0x00)
{ {
IRDA_ERROR("%s, Wrong chip version %02x\n", driver_name, version); IRDA_ERROR("%s, Wrong chip version %02x\n",
ALI_IRCC_DRIVER_NAME, version);
return -1; return -1;
} }
// IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, info->cfg_base);
/* Set FIR FIFO Threshold Register */ /* Set FIR FIFO Threshold Register */
switch_bank(iobase, BANK1); switch_bank(iobase, BANK1);
outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR); outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
@ -583,7 +598,8 @@ static int ali_ircc_setup(chipio_t *info)
/* Switch to SIR space */ /* Switch to SIR space */
FIR2SIR(iobase); FIR2SIR(iobase);
IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n", driver_name); IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n",
ALI_IRCC_DRIVER_NAME);
/* Enable receive interrupts */ /* Enable receive interrupts */
// outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
@ -647,7 +663,8 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id,
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
if (!dev) { if (!dev) {
IRDA_WARNING("%s: irq %d for unknown device.\n", driver_name, irq); IRDA_WARNING("%s: irq %d for unknown device.\n",
ALI_IRCC_DRIVER_NAME, irq);
return IRQ_NONE; return IRQ_NONE;
} }
@ -1328,7 +1345,8 @@ static int ali_ircc_net_open(struct net_device *dev)
/* Request IRQ and install Interrupt Handler */ /* Request IRQ and install Interrupt Handler */
if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev)) if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
{ {
IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name, IRDA_WARNING("%s, unable to allocate irq=%d\n",
ALI_IRCC_DRIVER_NAME,
self->io.irq); self->io.irq);
return -EAGAIN; return -EAGAIN;
} }
@ -1338,7 +1356,8 @@ static int ali_ircc_net_open(struct net_device *dev)
* failure. * failure.
*/ */
if (request_dma(self->io.dma, dev->name)) { if (request_dma(self->io.dma, dev->name)) {
IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name, IRDA_WARNING("%s, unable to allocate dma=%d\n",
ALI_IRCC_DRIVER_NAME,
self->io.dma); self->io.dma);
free_irq(self->io.irq, self); free_irq(self->io.irq, self);
return -EAGAIN; return -EAGAIN;
@ -2108,60 +2127,37 @@ static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
return &self->stats; return &self->stats;
} }
static void ali_ircc_suspend(struct ali_ircc_cb *self) static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
{ {
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); struct ali_ircc_cb *self = platform_get_drvdata(dev);
IRDA_MESSAGE("%s, Suspending\n", driver_name); IRDA_MESSAGE("%s, Suspending\n", ALI_IRCC_DRIVER_NAME);
if (self->io.suspended) if (self->io.suspended)
return; return 0;
ali_ircc_net_close(self->netdev); ali_ircc_net_close(self->netdev);
self->io.suspended = 1; self->io.suspended = 1;
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
}
static void ali_ircc_wakeup(struct ali_ircc_cb *self)
{
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
if (!self->io.suspended)
return;
ali_ircc_net_open(self->netdev);
IRDA_MESSAGE("%s, Waking up\n", driver_name);
self->io.suspended = 0;
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
}
static int ali_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
{
struct ali_ircc_cb *self = (struct ali_ircc_cb*) dev->data;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
if (self) {
switch (rqst) {
case PM_SUSPEND:
ali_ircc_suspend(self);
break;
case PM_RESUME:
ali_ircc_wakeup(self);
break;
}
}
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
return 0; return 0;
} }
static int ali_ircc_resume(struct platform_device *dev)
{
struct ali_ircc_cb *self = platform_get_drvdata(dev);
if (!self->io.suspended)
return 0;
ali_ircc_net_open(self->netdev);
IRDA_MESSAGE("%s, Waking up\n", ALI_IRCC_DRIVER_NAME);
self->io.suspended = 0;
return 0;
}
/* ALi Chip Function */ /* ALi Chip Function */

View File

@ -83,9 +83,9 @@ static struct usb_device_id dongles[] = {
/* Extended Systems, Inc., XTNDAccess IrDA USB (ESI-9685) */ /* Extended Systems, Inc., XTNDAccess IrDA USB (ESI-9685) */
{ USB_DEVICE(0x8e9, 0x100), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW }, { USB_DEVICE(0x8e9, 0x100), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
/* SigmaTel STIR4210/4220/4116 USB IrDA (VFIR) Bridge */ /* SigmaTel STIR4210/4220/4116 USB IrDA (VFIR) Bridge */
{ USB_DEVICE(0x66f, 0x4210), .driver_info = IUC_STIR_4210 | IUC_SPEED_BUG }, { USB_DEVICE(0x66f, 0x4210), .driver_info = IUC_STIR421X | IUC_SPEED_BUG },
{ USB_DEVICE(0x66f, 0x4220), .driver_info = IUC_STIR_4210 | IUC_SPEED_BUG }, { USB_DEVICE(0x66f, 0x4220), .driver_info = IUC_STIR421X | IUC_SPEED_BUG },
{ USB_DEVICE(0x66f, 0x4116), .driver_info = IUC_STIR_4210 | IUC_SPEED_BUG }, { USB_DEVICE(0x66f, 0x4116), .driver_info = IUC_STIR421X | IUC_SPEED_BUG },
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS | { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
USB_DEVICE_ID_MATCH_INT_SUBCLASS, USB_DEVICE_ID_MATCH_INT_SUBCLASS,
.bInterfaceClass = USB_CLASS_APP_SPEC, .bInterfaceClass = USB_CLASS_APP_SPEC,
@ -154,7 +154,7 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
* and if either speed or xbofs (or both) needs * and if either speed or xbofs (or both) needs
* to be changed. * to be changed.
*/ */
if (self->capability & IUC_STIR_4210 && if (self->capability & IUC_STIR421X &&
((self->new_speed != -1) || (self->new_xbofs != -1))) { ((self->new_speed != -1) || (self->new_xbofs != -1))) {
/* With STIR421x, speed and xBOFs must be set at the same /* With STIR421x, speed and xBOFs must be set at the same
@ -318,7 +318,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
/* Set the new speed and xbofs in this fake frame */ /* Set the new speed and xbofs in this fake frame */
irda_usb_build_header(self, frame, 1); irda_usb_build_header(self, frame, 1);
if ( self->capability & IUC_STIR_4210 ) { if (self->capability & IUC_STIR421X) {
if (frame[0] == 0) return ; // do nothing if no change if (frame[0] == 0) return ; // do nothing if no change
frame[1] = 0; // other parameters don't change here frame[1] = 0; // other parameters don't change here
frame[2] = 0; frame[2] = 0;
@ -455,7 +455,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Change setting for next frame */ /* Change setting for next frame */
if ( self->capability & IUC_STIR_4210 ) { if (self->capability & IUC_STIR421X) {
__u8 turnaround_time; __u8 turnaround_time;
__u8* frame; __u8* frame;
turnaround_time = get_turnaround_time( skb ); turnaround_time = get_turnaround_time( skb );
@ -897,10 +897,13 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
docopy = (urb->actual_length < IRDA_RX_COPY_THRESHOLD); docopy = (urb->actual_length < IRDA_RX_COPY_THRESHOLD);
/* Allocate a new skb */ /* Allocate a new skb */
if ( self->capability & IUC_STIR_4210 ) if (self->capability & IUC_STIR421X)
newskb = dev_alloc_skb(docopy ? urb->actual_length : IRDA_SKB_MAX_MTU + USB_IRDA_SIGMATEL_HEADER); newskb = dev_alloc_skb(docopy ? urb->actual_length :
IRDA_SKB_MAX_MTU +
USB_IRDA_STIR421X_HEADER);
else else
newskb = dev_alloc_skb(docopy ? urb->actual_length : IRDA_SKB_MAX_MTU); newskb = dev_alloc_skb(docopy ? urb->actual_length :
IRDA_SKB_MAX_MTU);
if (!newskb) { if (!newskb) {
self->stats.rx_dropped++; self->stats.rx_dropped++;
@ -1022,188 +1025,140 @@ static int irda_usb_is_receiving(struct irda_usb_cb *self)
return 0; /* For now */ return 0; /* For now */
} }
#define STIR421X_PATCH_PRODUCT_VER "Product Version: "
#define STIR421X_PATCH_PRODUCT_VERSION_STR "Product Version: " #define STIR421X_PATCH_STMP_TAG "STMP"
#define STIR421X_PATCH_COMPONENT_VERSION_STR "Component Version: " #define STIR421X_PATCH_CODE_OFFSET 512 /* patch image starts before here */
#define STIR421X_PATCH_DATA_TAG_STR "STMP" /* marks end of patch file header (PC DOS text file EOF character) */
#define STIR421X_PATCH_FILE_VERSION_MAX_OFFSET 512 /* version info is before here */ #define STIR421X_PATCH_END_OF_HDR_TAG 0x1A
#define STIR421X_PATCH_FILE_IMAGE_MAX_OFFSET 512 /* patch image starts before here */ #define STIR421X_PATCH_BLOCK_SIZE 1023
#define STIR421X_PATCH_FILE_END_OF_HEADER_TAG 0x1A /* marks end of patch file header (PC DOS text file EOF character) */
/* /*
* Known firmware patches for STIR421x dongles * Function stir421x_fwupload (struct irda_usb_cb *self,
* unsigned char *patch,
* const unsigned int patch_len)
*
* Upload firmware code to SigmaTel 421X IRDA-USB dongle
*/ */
static char * stir421x_patches[] = { static int stir421x_fw_upload(struct irda_usb_cb *self,
"42101001.sb", unsigned char *patch,
"42101002.sb",
};
static int stir421x_get_patch_version(unsigned char * patch, const unsigned long patch_len)
{
unsigned int version_offset;
unsigned long version_major, version_minor, version_build;
unsigned char * version_start;
int version_found = 0;
for (version_offset = 0;
version_offset < STIR421X_PATCH_FILE_END_OF_HEADER_TAG;
version_offset++) {
if (!memcmp(patch + version_offset,
STIR421X_PATCH_PRODUCT_VERSION_STR,
sizeof(STIR421X_PATCH_PRODUCT_VERSION_STR) - 1)) {
version_found = 1;
version_start = patch +
version_offset +
sizeof(STIR421X_PATCH_PRODUCT_VERSION_STR) - 1;
break;
}
}
/* We couldn't find a product version on this patch */
if (!version_found)
return -EINVAL;
/* Let's check if the product version is dotted */
if (version_start[3] != '.' ||
version_start[7] != '.')
return -EINVAL;
version_major = simple_strtoul(version_start, NULL, 10);
version_minor = simple_strtoul(version_start + 4, NULL, 10);
version_build = simple_strtoul(version_start + 8, NULL, 10);
IRDA_DEBUG(2, "%s(), Major: %ld Minor: %ld Build: %ld\n",
__FUNCTION__,
version_major, version_minor, version_build);
return (((version_major) << 12) +
((version_minor) << 8) +
((version_build / 10) << 4) +
(version_build % 10));
}
static int stir421x_upload_patch (struct irda_usb_cb *self,
unsigned char * patch,
const unsigned int patch_len) const unsigned int patch_len)
{ {
int retval = 0; int ret = -ENOMEM;
int actual_len; int actual_len = 0;
unsigned int i = 0, download_amount = 0; unsigned int i;
unsigned char * patch_chunk; unsigned int block_size = 0;
unsigned char *patch_block;
IRDA_DEBUG (2, "%s(), Uploading STIR421x Patch\n", __FUNCTION__); patch_block = kzalloc(STIR421X_PATCH_BLOCK_SIZE, GFP_KERNEL);
if (patch_block == NULL)
patch_chunk = kzalloc(STIR421X_MAX_PATCH_DOWNLOAD_SIZE, GFP_KERNEL);
if (patch_chunk == NULL)
return -ENOMEM; return -ENOMEM;
/* break up patch into 1023-byte sections */ /* break up patch into 1023-byte sections */
for (i = 0; retval >= 0 && i < patch_len; i += download_amount) { for (i = 0; i < patch_len; i += block_size) {
download_amount = patch_len - i; block_size = patch_len - i;
if (download_amount > STIR421X_MAX_PATCH_DOWNLOAD_SIZE)
download_amount = STIR421X_MAX_PATCH_DOWNLOAD_SIZE;
/* download the patch section */ if (block_size > STIR421X_PATCH_BLOCK_SIZE)
memcpy(patch_chunk, patch + i, download_amount); block_size = STIR421X_PATCH_BLOCK_SIZE;
retval = usb_bulk_msg (self->usbdev, /* upload the patch section */
usb_sndbulkpipe (self->usbdev, memcpy(patch_block, patch + i, block_size);
ret = usb_bulk_msg(self->usbdev,
usb_sndbulkpipe(self->usbdev,
self->bulk_out_ep), self->bulk_out_ep),
patch_chunk, download_amount, patch_block, block_size,
&actual_len, msecs_to_jiffies (500)); &actual_len, msecs_to_jiffies(500));
IRDA_DEBUG (2, "%s(), Sent %u bytes\n", __FUNCTION__, IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n",
actual_len); __FUNCTION__, actual_len, ret);
if (retval == 0)
mdelay(10); if (ret < 0)
break;
} }
kfree(patch_chunk); kfree(patch_block);
if (i != patch_len) { return ret;
IRDA_ERROR ("%s(), Pushed %d bytes (!= patch_len (%d))\n",
__FUNCTION__, i, patch_len);
retval = -EIO;
} }
if (retval < 0) /*
/* todo - mark device as not ready */ * Function stir421x_patch_device(struct irda_usb_cb *self)
IRDA_ERROR ("%s(), STIR421x patch upload failed (%d)\n", *
__FUNCTION__, retval); * Get a firmware code from userspase using hotplug request_firmware() call
*/
return retval;
}
static int stir421x_patch_device(struct irda_usb_cb *self) static int stir421x_patch_device(struct irda_usb_cb *self)
{ {
unsigned int i, patch_found = 0, data_found = 0, data_offset; unsigned int i;
int patch_version, ret = 0; int ret;
const struct firmware *fw_entry; char stir421x_fw_name[11];
const struct firmware *fw;
unsigned char *fw_version_ptr; /* pointer to version string */
unsigned long fw_version = 0;
for (i = 0; i < ARRAY_SIZE(stir421x_patches); i++) { /*
if(request_firmware(&fw_entry, stir421x_patches[i], &self->usbdev->dev) != 0) { * Known firmware patch file names for STIR421x dongles
IRDA_ERROR( "%s(), Patch %s is not available\n", __FUNCTION__, stir421x_patches[i]); * are "42101001.sb" or "42101002.sb"
continue; */
} sprintf(stir421x_fw_name, "4210%4X.sb",
self->usbdev->descriptor.bcdDevice);
ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
if (ret < 0)
return ret;
/* We found a patch from userspace */ /* We get a patch from userspace */
patch_version = stir421x_get_patch_version (fw_entry->data, fw_entry->size); IRDA_MESSAGE("%s(): Received firmware %s (%u bytes)\n",
__FUNCTION__, stir421x_fw_name, fw->size);
if (patch_version < 0) {
/* Couldn't fetch a version, let's move on to the next file */
IRDA_ERROR("%s(), version parsing failed\n", __FUNCTION__);
ret = patch_version;
release_firmware(fw_entry);
continue;
}
if (patch_version != self->usbdev->descriptor.bcdDevice) {
/* Patch version and device don't match */
IRDA_ERROR ("%s(), wrong patch version (%d <-> %d)\n",
__FUNCTION__,
patch_version, self->usbdev->descriptor.bcdDevice);
ret = -EINVAL; ret = -EINVAL;
release_firmware(fw_entry);
continue;
}
/* If we're here, we've found a correct patch */ /* Get the bcd product version */
patch_found = 1; if (!memcmp(fw->data, STIR421X_PATCH_PRODUCT_VER,
break; sizeof(STIR421X_PATCH_PRODUCT_VER) - 1)) {
fw_version_ptr = fw->data +
sizeof(STIR421X_PATCH_PRODUCT_VER) - 1;
} /* Let's check if the product version is dotted */
if (fw_version_ptr[3] == '.' &&
fw_version_ptr[7] == '.') {
unsigned long major, minor, build;
major = simple_strtoul(fw_version_ptr, NULL, 10);
minor = simple_strtoul(fw_version_ptr + 4, NULL, 10);
build = simple_strtoul(fw_version_ptr + 8, NULL, 10);
/* We couldn't find a valid firmware, let's leave */ fw_version = (major << 12)
if (!patch_found) + (minor << 8)
return ret; + ((build / 10) << 4)
+ (build % 10);
/* The actual image starts after the "STMP" keyword */ IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n",
for (data_offset = 0; data_offset < STIR421X_PATCH_FILE_IMAGE_MAX_OFFSET; data_offset++) { __FUNCTION__, fw_version);
if (!memcmp(fw_entry->data + data_offset,
STIR421X_PATCH_DATA_TAG_STR,
sizeof(STIR421X_PATCH_FILE_IMAGE_MAX_OFFSET))) {
IRDA_DEBUG(2, "%s(), found patch data for STIR421x at offset %d\n",
__FUNCTION__, data_offset);
data_found = 1;
break;
} }
} }
/* We couldn't find "STMP" from the header */ if (self->usbdev->descriptor.bcdDevice == fw_version) {
if (!data_found) /*
return -EINVAL; * If we're here, we've found a correct patch
* The actual image starts after the "STMP" keyword
* so forward to the firmware header tag
*/
for (i = 0; (fw->data[i] != STIR421X_PATCH_END_OF_HDR_TAG)
&& (i < fw->size); i++) ;
/* here we check for the out of buffer case */
if ((STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i])
&& (i < STIR421X_PATCH_CODE_OFFSET)) {
if (!memcmp(fw->data + i + 1, STIR421X_PATCH_STMP_TAG,
sizeof(STIR421X_PATCH_STMP_TAG) - 1)) {
/* Let's upload the patch to the target */ /* We can upload the patch to the target */
ret = stir421x_upload_patch(self, i += sizeof(STIR421X_PATCH_STMP_TAG);
&fw_entry->data[data_offset + sizeof(STIR421X_PATCH_FILE_IMAGE_MAX_OFFSET)], ret = stir421x_fw_upload(self, &fw->data[i],
fw_entry->size - (data_offset + sizeof(STIR421X_PATCH_FILE_IMAGE_MAX_OFFSET))); fw->size - i);
}
}
}
release_firmware(fw_entry); release_firmware(fw);
return ret; return ret;
} }
@ -1702,12 +1657,12 @@ static int irda_usb_probe(struct usb_interface *intf,
init_timer(&self->rx_defer_timer); init_timer(&self->rx_defer_timer);
self->capability = id->driver_info; self->capability = id->driver_info;
self->needspatch = ((self->capability & IUC_STIR_4210) != 0) ; self->needspatch = ((self->capability & IUC_STIR421X) != 0);
/* Create all of the needed urbs */ /* Create all of the needed urbs */
if (self->capability & IUC_STIR_4210) { if (self->capability & IUC_STIR421X) {
self->max_rx_urb = IU_SIGMATEL_MAX_RX_URBS; self->max_rx_urb = IU_SIGMATEL_MAX_RX_URBS;
self->header_length = USB_IRDA_SIGMATEL_HEADER; self->header_length = USB_IRDA_STIR421X_HEADER;
} else { } else {
self->max_rx_urb = IU_MAX_RX_URBS; self->max_rx_urb = IU_MAX_RX_URBS;
self->header_length = USB_IRDA_HEADER; self->header_length = USB_IRDA_HEADER;
@ -1813,8 +1768,8 @@ static int irda_usb_probe(struct usb_interface *intf,
/* Now we fetch and upload the firmware patch */ /* Now we fetch and upload the firmware patch */
ret = stir421x_patch_device(self); ret = stir421x_patch_device(self);
self->needspatch = (ret < 0); self->needspatch = (ret < 0);
if (ret < 0) { if (self->needspatch) {
printk("patch_device failed\n"); IRDA_ERROR("STIR421X: Couldn't upload patch\n");
goto err_out_5; goto err_out_5;
} }

View File

@ -34,9 +34,6 @@
#include <net/irda/irda.h> #include <net/irda/irda.h>
#include <net/irda/irda_device.h> /* struct irlap_cb */ #include <net/irda/irda_device.h> /* struct irlap_cb */
#define PATCH_FILE_SIZE_MAX 65536
#define PATCH_FILE_SIZE_MIN 80
#define RX_COPY_THRESHOLD 200 #define RX_COPY_THRESHOLD 200
#define IRDA_USB_MAX_MTU 2051 #define IRDA_USB_MAX_MTU 2051
#define IRDA_USB_SPEED_MTU 64 /* Weird, but work like this */ #define IRDA_USB_SPEED_MTU 64 /* Weird, but work like this */
@ -107,14 +104,15 @@
#define IUC_SMALL_PKT 0x10 /* Device doesn't behave with big Rx packets */ #define IUC_SMALL_PKT 0x10 /* Device doesn't behave with big Rx packets */
#define IUC_MAX_WINDOW 0x20 /* Device underestimate the Rx window */ #define IUC_MAX_WINDOW 0x20 /* Device underestimate the Rx window */
#define IUC_MAX_XBOFS 0x40 /* Device need more xbofs than advertised */ #define IUC_MAX_XBOFS 0x40 /* Device need more xbofs than advertised */
#define IUC_STIR_4210 0x80 /* SigmaTel 4210/4220/4116 VFIR */ #define IUC_STIR421X 0x80 /* SigmaTel 4210/4220/4116 VFIR */
/* USB class definitions */ /* USB class definitions */
#define USB_IRDA_HEADER 0x01 #define USB_IRDA_HEADER 0x01
#define USB_CLASS_IRDA 0x02 /* USB_CLASS_APP_SPEC subclass */ #define USB_CLASS_IRDA 0x02 /* USB_CLASS_APP_SPEC subclass */
#define USB_DT_IRDA 0x21 #define USB_DT_IRDA 0x21
#define USB_IRDA_SIGMATEL_HEADER 0x03 #define USB_IRDA_STIR421X_HEADER 0x03
#define IU_SIGMATEL_MAX_RX_URBS (IU_MAX_ACTIVE_RX_URBS + USB_IRDA_SIGMATEL_HEADER) #define IU_SIGMATEL_MAX_RX_URBS (IU_MAX_ACTIVE_RX_URBS + \
USB_IRDA_STIR421X_HEADER)
struct irda_class_desc { struct irda_class_desc {
__u8 bLength; __u8 bLength;

1009
drivers/net/irda/mcs7780.c Normal file

File diff suppressed because it is too large Load Diff

167
drivers/net/irda/mcs7780.h Normal file
View File

@ -0,0 +1,167 @@
/*****************************************************************************
*
* Filename: mcs7780.h
* Version: 0.2-alpha
* Description: Irda MosChip USB Dongle
* Status: Experimental
* Authors: Lukasz Stelmach <stlman@poczta.fm>
* Brian Pugh <bpugh@cs.pdx.edu>
*
* Copyright (C) 2005, Lukasz Stelmach <stlman@poczta.fm>
* Copyright (C) 2005, Brian Pugh <bpugh@cs.pdx.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*****************************************************************************/
#ifndef _MCS7780_H
#define _MCS7780_H
#define MCS_MODE_SIR 0
#define MCS_MODE_MIR 1
#define MCS_MODE_FIR 2
#define MCS_CTRL_TIMEOUT 500
#define MCS_XMIT_TIMEOUT 500
/* Possible transceiver types */
#define MCS_TSC_VISHAY 0 /* Vishay TFD, default choice */
#define MCS_TSC_AGILENT 1 /* Agilent 3602/3600 */
#define MCS_TSC_SHARP 2 /* Sharp GP2W1000YP */
/* Requests */
#define MCS_RD_RTYPE 0xC0
#define MCS_WR_RTYPE 0x40
#define MCS_RDREQ 0x0F
#define MCS_WRREQ 0x0E
/* Register 0x00 */
#define MCS_MODE_REG 0
#define MCS_FIR ((__u16)0x0001)
#define MCS_SIR16US ((__u16)0x0002)
#define MCS_BBTG ((__u16)0x0004)
#define MCS_ASK ((__u16)0x0008)
#define MCS_PARITY ((__u16)0x0010)
/* SIR/MIR speed constants */
#define MCS_SPEED_SHIFT 5
#define MCS_SPEED_MASK ((__u16)0x00E0)
#define MCS_SPEED(x) ((x & MCS_SPEED_MASK) >> MCS_SPEED_SHIFT)
#define MCS_SPEED_2400 ((0 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
#define MCS_SPEED_9600 ((1 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
#define MCS_SPEED_19200 ((2 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
#define MCS_SPEED_38400 ((3 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
#define MCS_SPEED_57600 ((4 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
#define MCS_SPEED_115200 ((5 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
#define MCS_SPEED_576000 ((6 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
#define MCS_SPEED_1152000 ((7 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
#define MCS_PLLPWDN ((__u16)0x0100)
#define MCS_DRIVER ((__u16)0x0200)
#define MCS_DTD ((__u16)0x0400)
#define MCS_DIR ((__u16)0x0800)
#define MCS_SIPEN ((__u16)0x1000)
#define MCS_SENDSIP ((__u16)0x2000)
#define MCS_CHGDIR ((__u16)0x4000)
#define MCS_RESET ((__u16)0x8000)
/* Register 0x02 */
#define MCS_XCVR_REG 2
#define MCS_MODE0 ((__u16)0x0001)
#define MCS_STFIR ((__u16)0x0002)
#define MCS_XCVR_CONF ((__u16)0x0004)
#define MCS_RXFAST ((__u16)0x0008)
/* TXCUR [6:4] */
#define MCS_TXCUR_SHIFT 4
#define MCS_TXCUR_MASK ((__u16)0x0070)
#define MCS_TXCUR(x) ((x & MCS_TXCUR_MASK) >> MCS_TXCUR_SHIFT)
#define MCS_SETTXCUR(x,y) \
((x & ~MCS_TXCUR_MASK) | (y << MCS_TXCUR_SHIFT) & MCS_TXCUR_MASK)
#define MCS_MODE1 ((__u16)0x0080)
#define MCS_SMODE0 ((__u16)0x0100)
#define MCS_SMODE1 ((__u16)0x0200)
#define MCS_INVTX ((__u16)0x0400)
#define MCS_INVRX ((__u16)0x0800)
#define MCS_MINRXPW_REG 4
#define MCS_RESV_REG 7
#define MCS_IRINTX ((__u16)0x0001)
#define MCS_IRINRX ((__u16)0x0002)
struct mcs_cb {
struct usb_device *usbdev; /* init: probe_irda */
struct net_device *netdev; /* network layer */
struct irlap_cb *irlap; /* The link layer we are binded to */
struct net_device_stats stats; /* network statistics */
struct qos_info qos;
unsigned int speed; /* Current speed */
unsigned int new_speed; /* new speed */
struct work_struct work; /* Change speed work */
struct sk_buff *tx_pending;
char in_buf[4096]; /* transmit/receive buffer */
char out_buf[4096]; /* transmit/receive buffer */
__u8 *fifo_status;
iobuff_t rx_buff; /* receive unwrap state machine */
struct timeval rx_time;
spinlock_t lock;
int receiving;
__u8 ep_in;
__u8 ep_out;
struct urb *rx_urb;
struct urb *tx_urb;
int transceiver_type;
int sir_tweak;
int receive_mode;
};
static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val);
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val);
static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs);
static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs);
static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs);
static inline int mcs_setup_transceiver(struct mcs_cb *mcs);
static inline int mcs_wrap_sir_skb(struct sk_buff *skb, __u8 * buf);
static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf);
static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf);
static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len);
static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len);
static inline int mcs_setup_urbs(struct mcs_cb *mcs);
static inline int mcs_receive_start(struct mcs_cb *mcs);
static inline int mcs_find_endpoints(struct mcs_cb *mcs,
struct usb_host_endpoint *ep, int epnum);
static int mcs_speed_change(struct mcs_cb *mcs);
static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd);
static int mcs_net_close(struct net_device *netdev);
static int mcs_net_open(struct net_device *netdev);
static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev);
static void mcs_receive_irq(struct urb *urb, struct pt_regs *regs);
static void mcs_send_irq(struct urb *urb, struct pt_regs *regs);
static int mcs_hard_xmit(struct sk_buff *skb, struct net_device *netdev);
static int mcs_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void mcs_disconnect(struct usb_interface *intf);
#endif /* _MCS7780_H */

View File

@ -50,6 +50,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/usb.h> #include <linux/usb.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/kthread.h>
#include <net/irda/irda.h> #include <net/irda/irda.h>
#include <net/irda/irlap.h> #include <net/irda/irlap.h>
#include <net/irda/irda_device.h> #include <net/irda/irda_device.h>
@ -173,9 +174,7 @@ struct stir_cb {
struct qos_info qos; struct qos_info qos;
unsigned speed; /* Current speed */ unsigned speed; /* Current speed */
wait_queue_head_t thr_wait; /* transmit thread wakeup */ struct task_struct *thread; /* transmit thread */
struct completion thr_exited;
pid_t thr_pid;
struct sk_buff *tx_pending; struct sk_buff *tx_pending;
void *io_buf; /* transmit/receive buffer */ void *io_buf; /* transmit/receive buffer */
@ -577,7 +576,7 @@ static int stir_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
SKB_LINEAR_ASSERT(skb); SKB_LINEAR_ASSERT(skb);
skb = xchg(&stir->tx_pending, skb); skb = xchg(&stir->tx_pending, skb);
wake_up(&stir->thr_wait); wake_up_process(stir->thread);
/* this should never happen unless stop/wakeup problem */ /* this should never happen unless stop/wakeup problem */
if (unlikely(skb)) { if (unlikely(skb)) {
@ -753,13 +752,7 @@ static int stir_transmit_thread(void *arg)
struct net_device *dev = stir->netdev; struct net_device *dev = stir->netdev;
struct sk_buff *skb; struct sk_buff *skb;
daemonize("%s", dev->name); while (!kthread_should_stop()) {
allow_signal(SIGTERM);
while (netif_running(dev)
&& netif_device_present(dev)
&& !signal_pending(current))
{
#ifdef CONFIG_PM #ifdef CONFIG_PM
/* if suspending, then power off and wait */ /* if suspending, then power off and wait */
if (unlikely(freezing(current))) { if (unlikely(freezing(current))) {
@ -813,10 +806,11 @@ static int stir_transmit_thread(void *arg)
} }
/* sleep if nothing to send */ /* sleep if nothing to send */
wait_event_interruptible(stir->thr_wait, stir->tx_pending); set_current_state(TASK_INTERRUPTIBLE);
} schedule();
complete_and_exit (&stir->thr_exited, 0); }
return 0;
} }
@ -859,7 +853,7 @@ static void stir_rcv_irq(struct urb *urb, struct pt_regs *regs)
warn("%s: usb receive submit error: %d", warn("%s: usb receive submit error: %d",
stir->netdev->name, err); stir->netdev->name, err);
stir->receiving = 0; stir->receiving = 0;
wake_up(&stir->thr_wait); wake_up_process(stir->thread);
} }
} }
@ -928,10 +922,10 @@ static int stir_net_open(struct net_device *netdev)
} }
/** Start kernel thread for transmit. */ /** Start kernel thread for transmit. */
stir->thr_pid = kernel_thread(stir_transmit_thread, stir, stir->thread = kthread_run(stir_transmit_thread, stir,
CLONE_FS|CLONE_FILES); "%s", stir->netdev->name);
if (stir->thr_pid < 0) { if (IS_ERR(stir->thread)) {
err = stir->thr_pid; err = PTR_ERR(stir->thread);
err("stir4200: unable to start kernel thread"); err("stir4200: unable to start kernel thread");
goto err_out6; goto err_out6;
} }
@ -968,8 +962,7 @@ static int stir_net_close(struct net_device *netdev)
netif_stop_queue(netdev); netif_stop_queue(netdev);
/* Kill transmit thread */ /* Kill transmit thread */
kill_proc(stir->thr_pid, SIGTERM, 1); kthread_stop(stir->thread);
wait_for_completion(&stir->thr_exited);
kfree(stir->fifo_status); kfree(stir->fifo_status);
/* Mop up receive urb's */ /* Mop up receive urb's */
@ -1084,9 +1077,6 @@ static int stir_probe(struct usb_interface *intf,
stir->qos.min_turn_time.bits &= qos_mtt_bits; stir->qos.min_turn_time.bits &= qos_mtt_bits;
irda_qos_bits_to_value(&stir->qos); irda_qos_bits_to_value(&stir->qos);
init_completion (&stir->thr_exited);
init_waitqueue_head (&stir->thr_wait);
/* Override the network functions we need to use */ /* Override the network functions we need to use */
net->hard_start_xmit = stir_hard_xmit; net->hard_start_xmit = stir_hard_xmit;
net->open = stir_net_open; net->open = stir_net_open;

View File

@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|| (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec)) || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
break; break;
udelay(100); udelay(100);
/* must not sleep here - we are called under xmit_lock! */ /* must not sleep here - called under netif_tx_lock! */
} }
} }

View File

@ -1200,7 +1200,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
if (has_tiny_unaligned_frags(skb)) { if (has_tiny_unaligned_frags(skb)) {
if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { if (__skb_linearize(skb)) {
stats->tx_dropped++; stats->tx_dropped++;
printk(KERN_DEBUG "%s: failed to linearize tiny " printk(KERN_DEBUG "%s: failed to linearize tiny "
"unaligned fragment\n", dev->name); "unaligned fragment\n", dev->name);

View File

@ -318,12 +318,12 @@ performance critical codepaths:
The rx process only runs in the interrupt handler. Access from outside The rx process only runs in the interrupt handler. Access from outside
the interrupt handler is only permitted after disable_irq(). the interrupt handler is only permitted after disable_irq().
The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
is set, then access is permitted under spin_lock_irq(&np->lock). is set, then access is permitted under spin_lock_irq(&np->lock).
Thus configuration functions that want to access everything must call Thus configuration functions that want to access everything must call
disable_irq(dev->irq); disable_irq(dev->irq);
spin_lock_bh(dev->xmit_lock); netif_tx_lock_bh(dev);
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
IV. Notes IV. Notes

View File

@ -1609,8 +1609,6 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
kfree_skb(skb); kfree_skb(skb);
skb = ns; skb = ns;
} }
else if (!pskb_may_pull(skb, skb->len))
goto err;
else else
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;

View File

@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3" #define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.59" #define DRV_MODULE_VERSION "3.60"
#define DRV_MODULE_RELDATE "June 8, 2006" #define DRV_MODULE_RELDATE "June 17, 2006"
#define TG3_DEF_MAC_MODE 0 #define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0 #define TG3_DEF_RX_MODE 0
@ -229,6 +229,8 @@ static struct pci_device_id tg3_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
@ -2965,6 +2967,27 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
return err; return err;
} }
/* This is called whenever we suspect that the system chipset is re-
* ordering the sequence of MMIO to the tx send mailbox. The symptom
* is bogus tx completions. We try to recover by setting the
* TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
* in the workqueue.
*/
static void tg3_tx_recover(struct tg3 *tp)
{
BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
tp->write32_tx_mbox == tg3_write_indirect_mbox);
printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
"mapped I/O cycles to the network device, attempting to "
"recover. Please report the problem to the driver maintainer "
"and include system chipset information.\n", tp->dev->name);
spin_lock(&tp->lock);
tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
spin_unlock(&tp->lock);
}
/* Tigon3 never reports partial packet sends. So we do not /* Tigon3 never reports partial packet sends. So we do not
* need special logic to handle SKBs that have not had all * need special logic to handle SKBs that have not had all
* of their frags sent yet, like SunGEM does. * of their frags sent yet, like SunGEM does.
@ -2977,9 +3000,13 @@ static void tg3_tx(struct tg3 *tp)
while (sw_idx != hw_idx) { while (sw_idx != hw_idx) {
struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
struct sk_buff *skb = ri->skb; struct sk_buff *skb = ri->skb;
int i; int i, tx_bug = 0;
if (unlikely(skb == NULL)) {
tg3_tx_recover(tp);
return;
}
BUG_ON(skb == NULL);
pci_unmap_single(tp->pdev, pci_unmap_single(tp->pdev,
pci_unmap_addr(ri, mapping), pci_unmap_addr(ri, mapping),
skb_headlen(skb), skb_headlen(skb),
@ -2990,10 +3017,9 @@ static void tg3_tx(struct tg3 *tp)
sw_idx = NEXT_TX(sw_idx); sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
BUG_ON(sw_idx == hw_idx);
ri = &tp->tx_buffers[sw_idx]; ri = &tp->tx_buffers[sw_idx];
BUG_ON(ri->skb != NULL); if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
tx_bug = 1;
pci_unmap_page(tp->pdev, pci_unmap_page(tp->pdev,
pci_unmap_addr(ri, mapping), pci_unmap_addr(ri, mapping),
@ -3004,6 +3030,11 @@ static void tg3_tx(struct tg3 *tp)
} }
dev_kfree_skb(skb); dev_kfree_skb(skb);
if (unlikely(tx_bug)) {
tg3_tx_recover(tp);
return;
}
} }
tp->tx_cons = sw_idx; tp->tx_cons = sw_idx;
@ -3331,6 +3362,11 @@ static int tg3_poll(struct net_device *netdev, int *budget)
/* run TX completion thread */ /* run TX completion thread */
if (sblk->idx[0].tx_consumer != tp->tx_cons) { if (sblk->idx[0].tx_consumer != tp->tx_cons) {
tg3_tx(tp); tg3_tx(tp);
if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
netif_rx_complete(netdev);
schedule_work(&tp->reset_task);
return 0;
}
} }
/* run RX thread, within the bounds set by NAPI. /* run RX thread, within the bounds set by NAPI.
@ -3391,12 +3427,10 @@ static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
if (irq_sync) if (irq_sync)
tg3_irq_quiesce(tp); tg3_irq_quiesce(tp);
spin_lock_bh(&tp->lock); spin_lock_bh(&tp->lock);
spin_lock(&tp->tx_lock);
} }
static inline void tg3_full_unlock(struct tg3 *tp) static inline void tg3_full_unlock(struct tg3 *tp)
{ {
spin_unlock(&tp->tx_lock);
spin_unlock_bh(&tp->lock); spin_unlock_bh(&tp->lock);
} }
@ -3579,6 +3613,13 @@ static void tg3_reset_task(void *_data)
restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
tp->write32_tx_mbox = tg3_write32_tx_mbox;
tp->write32_rx_mbox = tg3_write_flush_reg32;
tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
tg3_init_hw(tp, 1); tg3_init_hw(tp, 1);
@ -3718,14 +3759,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb); len = skb_headlen(skb);
/* No BH disabling for tx_lock here. We are running in BH disabled /* We are running in BH disabled context with netif_tx_lock
* context and TX reclaim runs via tp->poll inside of a software * and TX reclaim runs via tp->poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have * interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice! * no IRQ context deadlocks to worry about either. Rejoice!
*/ */
if (!spin_trylock(&tp->tx_lock))
return NETDEV_TX_LOCKED;
if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) { if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev); netif_stop_queue(dev);
@ -3734,7 +3772,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
"queue awake!\n", dev->name); "queue awake!\n", dev->name);
} }
spin_unlock(&tp->tx_lock);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
@ -3817,15 +3854,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry; tp->tx_prod = entry;
if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
spin_lock(&tp->tx_lock);
netif_stop_queue(dev); netif_stop_queue(dev);
if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev); netif_wake_queue(tp->dev);
spin_unlock(&tp->tx_lock);
} }
out_unlock: out_unlock:
mmiowb(); mmiowb();
spin_unlock(&tp->tx_lock);
dev->trans_start = jiffies; dev->trans_start = jiffies;
@ -3844,14 +3882,11 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb); len = skb_headlen(skb);
/* No BH disabling for tx_lock here. We are running in BH disabled /* We are running in BH disabled context with netif_tx_lock
* context and TX reclaim runs via tp->poll inside of a software * and TX reclaim runs via tp->poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have * interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice! * no IRQ context deadlocks to worry about either. Rejoice!
*/ */
if (!spin_trylock(&tp->tx_lock))
return NETDEV_TX_LOCKED;
if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) { if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev); netif_stop_queue(dev);
@ -3860,7 +3895,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
"queue awake!\n", dev->name); "queue awake!\n", dev->name);
} }
spin_unlock(&tp->tx_lock);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
@ -3998,15 +4032,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry; tp->tx_prod = entry;
if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
spin_lock(&tp->tx_lock);
netif_stop_queue(dev); netif_stop_queue(dev);
if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev); netif_wake_queue(tp->dev);
spin_unlock(&tp->tx_lock);
} }
out_unlock: out_unlock:
mmiowb(); mmiowb();
spin_unlock(&tp->tx_lock);
dev->trans_start = jiffies; dev->trans_start = jiffies;
@ -11243,7 +11278,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
SET_MODULE_OWNER(dev); SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
dev->features |= NETIF_F_LLTX;
#if TG3_VLAN_TAG_USED #if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = tg3_vlan_rx_register; dev->vlan_rx_register = tg3_vlan_rx_register;

View File

@ -2074,12 +2074,22 @@ struct tg3 {
/* SMP locking strategy: /* SMP locking strategy:
* *
* lock: Held during all operations except TX packet * lock: Held during reset, PHY access, timer, and when
* processing. * updating tg3_flags and tg3_flags2.
* *
* tx_lock: Held during tg3_start_xmit and tg3_tx * tx_lock: Held during tg3_start_xmit and tg3_tx only
* when calling netif_[start|stop]_queue.
* tg3_start_xmit is protected by netif_tx_lock.
* *
* Both of these locks are to be held with BH safety. * Both of these locks are to be held with BH safety.
*
* Because the IRQ handler, tg3_poll, and tg3_start_xmit
* are running lockless, it is necessary to completely
* quiesce the chip with tg3_netif_stop and tg3_full_lock
* before reconfiguring the device.
*
* indirect_lock: Held when accessing registers indirectly
* with IRQ disabling.
*/ */
spinlock_t lock; spinlock_t lock;
spinlock_t indirect_lock; spinlock_t indirect_lock;
@ -2155,11 +2165,7 @@ struct tg3 {
#define TG3_FLAG_ENABLE_ASF 0x00000020 #define TG3_FLAG_ENABLE_ASF 0x00000020
#define TG3_FLAG_5701_REG_WRITE_BUG 0x00000040 #define TG3_FLAG_5701_REG_WRITE_BUG 0x00000040
#define TG3_FLAG_POLL_SERDES 0x00000080 #define TG3_FLAG_POLL_SERDES 0x00000080
#if defined(CONFIG_X86)
#define TG3_FLAG_MBOX_WRITE_REORDER 0x00000100 #define TG3_FLAG_MBOX_WRITE_REORDER 0x00000100
#else
#define TG3_FLAG_MBOX_WRITE_REORDER 0 /* disables code too */
#endif
#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200 #define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200
#define TG3_FLAG_WOL_SPEED_100MB 0x00000400 #define TG3_FLAG_WOL_SPEED_100MB 0x00000400
#define TG3_FLAG_WOL_ENABLE 0x00000800 #define TG3_FLAG_WOL_ENABLE 0x00000800
@ -2172,6 +2178,7 @@ struct tg3 {
#define TG3_FLAG_PCI_HIGH_SPEED 0x00040000 #define TG3_FLAG_PCI_HIGH_SPEED 0x00040000
#define TG3_FLAG_PCI_32BIT 0x00080000 #define TG3_FLAG_PCI_32BIT 0x00080000
#define TG3_FLAG_SRAM_USE_CONFIG 0x00100000 #define TG3_FLAG_SRAM_USE_CONFIG 0x00100000
#define TG3_FLAG_TX_RECOVERY_PENDING 0x00200000
#define TG3_FLAG_SERDES_WOL_CAP 0x00400000 #define TG3_FLAG_SERDES_WOL_CAP 0x00400000
#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 #define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
#define TG3_FLAG_10_100_ONLY 0x01000000 #define TG3_FLAG_10_100_ONLY 0x01000000

View File

@ -1605,11 +1605,11 @@ static void __devexit w840_remove1 (struct pci_dev *pdev)
* - get_stats: * - get_stats:
* spin_lock_irq(np->lock), doesn't touch hw if not present * spin_lock_irq(np->lock), doesn't touch hw if not present
* - hard_start_xmit: * - hard_start_xmit:
* netif_stop_queue + spin_unlock_wait(&dev->xmit_lock); * synchronize_irq + netif_tx_disable;
* - tx_timeout: * - tx_timeout:
* netif_device_detach + spin_unlock_wait(&dev->xmit_lock); * netif_device_detach + netif_tx_disable;
* - set_multicast_list * - set_multicast_list
* netif_device_detach + spin_unlock_wait(&dev->xmit_lock); * netif_device_detach + netif_tx_disable;
* - interrupt handler * - interrupt handler
* doesn't touch hw if not present, synchronize_irq waits for * doesn't touch hw if not present, synchronize_irq waits for
* running instances of the interrupt handler. * running instances of the interrupt handler.
@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
netif_device_detach(dev); netif_device_detach(dev);
update_csr6(dev, 0); update_csr6(dev, 0);
iowrite32(0, ioaddr + IntrEnable); iowrite32(0, ioaddr + IntrEnable);
netif_stop_queue(dev);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
spin_unlock_wait(&dev->xmit_lock);
synchronize_irq(dev->irq); synchronize_irq(dev->irq);
netif_tx_disable(dev);
np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;

View File

@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
int pktlen = skb->len; int pktlen = skb->len;
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
kfree_skb(skb);
return 0;
}
#endif
spin_lock_irqsave(&vptr->lock, flags); spin_lock_irqsave(&vptr->lock, flags);
index = vptr->td_curr[qnum]; index = vptr->td_curr[qnum];
@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
if (pktlen < ETH_ZLEN) { if (pktlen < ETH_ZLEN) {
/* Cannot occur until ZC support */ /* Cannot occur until ZC support */
if(skb_linearize(skb, GFP_ATOMIC))
return 0;
pktlen = ETH_ZLEN; pktlen = ETH_ZLEN;
memcpy(tdinfo->buf, skb->data, skb->len); memcpy(tdinfo->buf, skb->data, skb->len);
memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
@ -1933,7 +1938,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
tdinfo->skb = skb; tdinfo->skb = skb;
if (nfrags > 6) { if (nfrags > 6) {
skb_linearize(skb, GFP_ATOMIC);
memcpy(tdinfo->buf, skb->data, skb->len); memcpy(tdinfo->buf, skb->data, skb->len);
tdinfo->skb_dma[0] = tdinfo->buf_dma; tdinfo->skb_dma[0] = tdinfo->buf_dma;
td_ptr->tdesc0.pktsize = td_ptr->tdesc0.pktsize =

View File

@ -1787,7 +1787,9 @@ static int __orinoco_program_rids(struct net_device *dev)
/* Set promiscuity / multicast*/ /* Set promiscuity / multicast*/
priv->promiscuous = 0; priv->promiscuous = 0;
priv->mc_count = 0; priv->mc_count = 0;
__orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
/* FIXME: what about netif_tx_lock */
__orinoco_set_multicast_list(dev);
return 0; return 0;
} }

359
include/linux/dmaengine.h Normal file
View File

@ -0,0 +1,359 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef DMAENGINE_H
#define DMAENGINE_H
#include <linux/config.h>
#ifdef CONFIG_DMA_ENGINE
#include <linux/device.h>
#include <linux/uio.h>
#include <linux/kref.h>
#include <linux/completion.h>
#include <linux/rcupdate.h>
/**
* enum dma_event - resource PNP/power managment events
* @DMA_RESOURCE_SUSPEND: DMA device going into low power state
* @DMA_RESOURCE_RESUME: DMA device returning to full power
* @DMA_RESOURCE_ADDED: DMA device added to the system
* @DMA_RESOURCE_REMOVED: DMA device removed from the system
*/
enum dma_event {
DMA_RESOURCE_SUSPEND,
DMA_RESOURCE_RESUME,
DMA_RESOURCE_ADDED,
DMA_RESOURCE_REMOVED,
};
/**
* typedef dma_cookie_t
*
* if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
*/
typedef s32 dma_cookie_t;
#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
/**
* enum dma_status - DMA transaction status
* @DMA_SUCCESS: transaction completed successfully
* @DMA_IN_PROGRESS: transaction not yet processed
* @DMA_ERROR: transaction failed
*/
enum dma_status {
DMA_SUCCESS,
DMA_IN_PROGRESS,
DMA_ERROR,
};
/**
* struct dma_chan_percpu - the per-CPU part of struct dma_chan
* @refcount: local_t used for open-coded "bigref" counting
* @memcpy_count: transaction counter
* @bytes_transferred: byte counter
*/
struct dma_chan_percpu {
local_t refcount;
/* stats */
unsigned long memcpy_count;
unsigned long bytes_transferred;
};
/**
* struct dma_chan - devices supply DMA channels, clients use them
* @client: ptr to the client user of this chan, will be NULL when unused
* @device: ptr to the dma device who supplies this channel, always !NULL
* @cookie: last cookie value returned to client
* @chan_id:
* @class_dev:
* @refcount: kref, used in "bigref" slow-mode
* @slow_ref:
* @rcu:
* @client_node: used to add this to the client chan list
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
*/
struct dma_chan {
struct dma_client *client;
struct dma_device *device;
dma_cookie_t cookie;
/* sysfs */
int chan_id;
struct class_device class_dev;
struct kref refcount;
int slow_ref;
struct rcu_head rcu;
struct list_head client_node;
struct list_head device_node;
struct dma_chan_percpu *local;
};
void dma_chan_cleanup(struct kref *kref);
static inline void dma_chan_get(struct dma_chan *chan)
{
if (unlikely(chan->slow_ref))
kref_get(&chan->refcount);
else {
local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
put_cpu();
}
}
static inline void dma_chan_put(struct dma_chan *chan)
{
if (unlikely(chan->slow_ref))
kref_put(&chan->refcount, dma_chan_cleanup);
else {
local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
put_cpu();
}
}
/*
* typedef dma_event_callback - function pointer to a DMA event callback
*/
typedef void (*dma_event_callback) (struct dma_client *client,
struct dma_chan *chan, enum dma_event event);
/**
* struct dma_client - info on the entity making use of DMA services
* @event_callback: func ptr to call when something happens
* @chan_count: number of chans allocated
* @chans_desired: number of chans requested. Can be +/- chan_count
* @lock: protects access to the channels list
* @channels: the list of DMA channels allocated
* @global_node: list_head for global dma_client_list
*/
struct dma_client {
dma_event_callback event_callback;
unsigned int chan_count;
unsigned int chans_desired;
spinlock_t lock;
struct list_head channels;
struct list_head global_node;
};
/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @channels: the list of struct dma_chan
* @global_node: list_head for global dma_device_list
* @refcount:
* @done:
* @dev_id:
* Other func ptrs: used to make use of this device's capabilities
*/
struct dma_device {
unsigned int chancnt;
struct list_head channels;
struct list_head global_node;
struct kref refcount;
struct completion done;
int dev_id;
int (*device_alloc_chan_resources)(struct dma_chan *chan);
void (*device_free_chan_resources)(struct dma_chan *chan);
dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan,
struct page *page, unsigned int offset, void *kdata,
size_t len);
dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan,
struct page *dest_pg, unsigned int dest_off,
struct page *src_pg, unsigned int src_off, size_t len);
enum dma_status (*device_memcpy_complete)(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last,
dma_cookie_t *used);
void (*device_memcpy_issue_pending)(struct dma_chan *chan);
};
/* --- public DMA engine API --- */
struct dma_client *dma_async_client_register(dma_event_callback event_callback);
void dma_async_client_unregister(struct dma_client *client);
void dma_async_client_chan_request(struct dma_client *client,
unsigned int number);
/**
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
* @chan: DMA channel to offload copy to
* @dest: destination address (virtual)
* @src: source address (virtual)
* @len: length
*
* Both @dest and @src must be mappable to a bus address according to the
* DMA mapping API rules for streaming mappings.
* Both @dest and @src must stay memory resident (kernel memory or locked
* user space pages)
*/
static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len)
{
int cpu = get_cpu();
per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
per_cpu_ptr(chan->local, cpu)->memcpy_count++;
put_cpu();
return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len);
}
/**
* dma_async_memcpy_buf_to_pg - offloaded copy
* @chan: DMA channel to offload copy to
* @page: destination page
* @offset: offset in page to copy to
* @kdata: source address (virtual)
* @len: length
*
* Both @page/@offset and @kdata must be mappable to a bus address according
* to the DMA mapping API rules for streaming mappings.
* Both @page/@offset and @kdata must stay memory resident (kernel memory or
* locked user space pages)
*/
static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
struct page *page, unsigned int offset, void *kdata, size_t len)
{
int cpu = get_cpu();
per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
per_cpu_ptr(chan->local, cpu)->memcpy_count++;
put_cpu();
return chan->device->device_memcpy_buf_to_pg(chan, page, offset,
kdata, len);
}
/**
* dma_async_memcpy_buf_to_pg - offloaded copy
* @chan: DMA channel to offload copy to
* @dest_page: destination page
* @dest_off: offset in page to copy to
* @src_page: source page
* @src_off: offset in page to copy from
* @len: length
*
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
* address according to the DMA mapping API rules for streaming mappings.
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
* (kernel memory or locked user space pages)
*/
static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
unsigned int src_off, size_t len)
{
int cpu = get_cpu();
per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
per_cpu_ptr(chan->local, cpu)->memcpy_count++;
put_cpu();
return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off,
src_pg, src_off, len);
}
/**
* dma_async_memcpy_issue_pending - flush pending copies to HW
* @chan:
*
* This allows drivers to push copies to HW in batches,
* reducing MMIO writes where possible.
*/
static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan)
{
return chan->device->device_memcpy_issue_pending(chan);
}
/**
* dma_async_memcpy_complete - poll for transaction completion
* @chan: DMA channel
* @cookie: transaction identifier to check status of
* @last: returns last completed cookie, can be NULL
* @used: returns last issued cookie, can be NULL
*
* If @last and @used are passed in, upon return they reflect the driver
* internal state and can be used with dma_async_is_complete() to check
* the status of multiple cookies without re-checking hardware state.
*/
static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
{
return chan->device->device_memcpy_complete(chan, cookie, last, used);
}
/**
* dma_async_is_complete - test a cookie against chan state
* @cookie: transaction identifier to test status of
* @last_complete: last know completed transaction
* @last_used: last cookie value handed out
*
* dma_async_is_complete() is used in dma_async_memcpy_complete()
* the test logic is seperated for lightweight testing of multiple cookies
*/
static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
dma_cookie_t last_complete, dma_cookie_t last_used)
{
if (last_complete <= last_used) {
if ((cookie <= last_complete) || (cookie > last_used))
return DMA_SUCCESS;
} else {
if ((cookie <= last_complete) && (cookie > last_used))
return DMA_SUCCESS;
}
return DMA_IN_PROGRESS;
}
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
/* --- Helper iov-locking functions --- */
struct dma_page_list {
char *base_address;
int nr_pages;
struct page **pages;
};
struct dma_pinned_list {
int nr_iovecs;
struct dma_page_list page_list[0];
};
struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, struct page *page,
unsigned int offset, size_t len);
#endif /* CONFIG_DMA_ENGINE */
#endif /* DMAENGINE_H */

View File

@ -169,7 +169,7 @@ struct ip_sf_list
struct ip_mc_list struct ip_mc_list
{ {
struct in_device *interface; struct in_device *interface;
unsigned long multiaddr; __be32 multiaddr;
struct ip_sf_list *sources; struct ip_sf_list *sources;
struct ip_sf_list *tomb; struct ip_sf_list *tomb;
unsigned int sfmode; unsigned int sfmode;

View File

@ -37,6 +37,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/dmaengine.h>
struct divert_blk; struct divert_blk;
struct vlan_group; struct vlan_group;
@ -311,6 +312,9 @@ struct net_device
#define NETIF_F_LLTX 4096 /* LockLess TX */ #define NETIF_F_LLTX 4096 /* LockLess TX */
#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ #define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
struct net_device *next_sched; struct net_device *next_sched;
/* Interface index. Unique device identifier */ /* Interface index. Unique device identifier */
@ -406,7 +410,7 @@ struct net_device
* One part is mostly used on xmit path (device) * One part is mostly used on xmit path (device)
*/ */
/* hard_start_xmit synchronizer */ /* hard_start_xmit synchronizer */
spinlock_t xmit_lock ____cacheline_aligned_in_smp; spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
/* cpu id of processor entered to hard_start_xmit or -1, /* cpu id of processor entered to hard_start_xmit or -1,
if nobody entered there. if nobody entered there.
*/ */
@ -593,6 +597,9 @@ struct softnet_data
struct sk_buff *completion_queue; struct sk_buff *completion_queue;
struct net_device backlog_dev; /* Sorry. 8) */ struct net_device backlog_dev; /* Sorry. 8) */
#ifdef CONFIG_NET_DMA
struct dma_chan *net_dma;
#endif
}; };
DECLARE_PER_CPU(struct softnet_data,softnet_data); DECLARE_PER_CPU(struct softnet_data,softnet_data);
@ -889,11 +896,43 @@ static inline void __netif_rx_complete(struct net_device *dev)
clear_bit(__LINK_STATE_RX_SCHED, &dev->state); clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
} }
static inline void netif_tx_lock(struct net_device *dev)
{
spin_lock(&dev->_xmit_lock);
dev->xmit_lock_owner = smp_processor_id();
}
static inline void netif_tx_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->_xmit_lock);
dev->xmit_lock_owner = smp_processor_id();
}
static inline int netif_tx_trylock(struct net_device *dev)
{
int err = spin_trylock(&dev->_xmit_lock);
if (!err)
dev->xmit_lock_owner = smp_processor_id();
return err;
}
static inline void netif_tx_unlock(struct net_device *dev)
{
dev->xmit_lock_owner = -1;
spin_unlock(&dev->_xmit_lock);
}
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
dev->xmit_lock_owner = -1;
spin_unlock_bh(&dev->_xmit_lock);
}
static inline void netif_tx_disable(struct net_device *dev) static inline void netif_tx_disable(struct net_device *dev)
{ {
spin_lock_bh(&dev->xmit_lock); netif_tx_lock_bh(dev);
netif_stop_queue(dev); netif_stop_queue(dev);
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
} }
/* These functions live elsewhere (drivers/net/net_init.c, but related) */ /* These functions live elsewhere (drivers/net/net_init.c, but related) */

View File

@ -69,6 +69,10 @@ enum ip_conntrack_status {
/* Connection is dying (removed from lists), can not be unset. */ /* Connection is dying (removed from lists), can not be unset. */
IPS_DYING_BIT = 9, IPS_DYING_BIT = 9,
IPS_DYING = (1 << IPS_DYING_BIT), IPS_DYING = (1 << IPS_DYING_BIT),
/* Connection has fixed timeout. */
IPS_FIXED_TIMEOUT_BIT = 10,
IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT),
}; };
/* Connection tracking event bits */ /* Connection tracking event bits */

View File

@ -27,13 +27,15 @@ enum ctattr_type {
CTA_STATUS, CTA_STATUS,
CTA_PROTOINFO, CTA_PROTOINFO,
CTA_HELP, CTA_HELP,
CTA_NAT, CTA_NAT_SRC,
#define CTA_NAT CTA_NAT_SRC /* backwards compatibility */
CTA_TIMEOUT, CTA_TIMEOUT,
CTA_MARK, CTA_MARK,
CTA_COUNTERS_ORIG, CTA_COUNTERS_ORIG,
CTA_COUNTERS_REPLY, CTA_COUNTERS_REPLY,
CTA_USE, CTA_USE,
CTA_ID, CTA_ID,
CTA_NAT_DST,
__CTA_MAX __CTA_MAX
}; };
#define CTA_MAX (__CTA_MAX - 1) #define CTA_MAX (__CTA_MAX - 1)

View File

@ -0,0 +1,13 @@
#ifndef _XT_CONNSECMARK_H_target
#define _XT_CONNSECMARK_H_target
enum {
CONNSECMARK_SAVE = 1,
CONNSECMARK_RESTORE,
};
struct xt_connsecmark_target_info {
u_int8_t mode;
};
#endif /*_XT_CONNSECMARK_H_target */

View File

@ -0,0 +1,26 @@
#ifndef _XT_SECMARK_H_target
#define _XT_SECMARK_H_target
/*
* This is intended for use by various security subsystems (but not
* at the same time).
*
* 'mode' refers to the specific security subsystem which the
* packets are being marked for.
*/
#define SECMARK_MODE_SEL 0x01 /* SELinux */
#define SECMARK_SELCTX_MAX 256
struct xt_secmark_target_selinux_info {
u_int32_t selsid;
char selctx[SECMARK_SELCTX_MAX];
};
struct xt_secmark_target_info {
u_int8_t mode;
union {
struct xt_secmark_target_selinux_info sel;
} u;
};
#endif /*_XT_SECMARK_H_target */

View File

@ -0,0 +1,16 @@
#ifndef _XT_QUOTA_H
#define _XT_QUOTA_H
enum xt_quota_flags {
XT_QUOTA_INVERT = 0x1,
};
#define XT_QUOTA_MASK 0x1
struct xt_quota_info {
u_int32_t flags;
u_int32_t pad;
aligned_u64 quota;
struct xt_quota_info *master;
};
#endif /* _XT_QUOTA_H */

View File

@ -0,0 +1,32 @@
#ifndef _XT_STATISTIC_H
#define _XT_STATISTIC_H
enum xt_statistic_mode {
XT_STATISTIC_MODE_RANDOM,
XT_STATISTIC_MODE_NTH,
__XT_STATISTIC_MODE_MAX
};
#define XT_STATISTIC_MODE_MAX (__XT_STATISTIC_MODE_MAX - 1)
enum xt_statistic_flags {
XT_STATISTIC_INVERT = 0x1,
};
#define XT_STATISTIC_MASK 0x1
struct xt_statistic_info {
u_int16_t mode;
u_int16_t flags;
union {
struct {
u_int32_t probability;
} random;
struct {
u_int32_t every;
u_int32_t packet;
u_int32_t count;
} nth;
} u;
struct xt_statistic_info *master __attribute__((aligned(8)));
};
#endif /* _XT_STATISTIC_H */

View File

@ -121,6 +121,10 @@ struct ip_conntrack
u_int32_t mark; u_int32_t mark;
#endif #endif
#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK
u_int32_t secmark;
#endif
/* Traversed often, so hopefully in different cacheline to top */ /* Traversed often, so hopefully in different cacheline to top */
/* These are my tuples; original and reply */ /* These are my tuples; original and reply */
struct ip_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX]; struct ip_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
@ -154,6 +158,7 @@ struct ip_conntrack_expect
unsigned int flags; unsigned int flags;
#ifdef CONFIG_IP_NF_NAT_NEEDED #ifdef CONFIG_IP_NF_NAT_NEEDED
u_int32_t saved_ip;
/* This is the original per-proto part, used to map the /* This is the original per-proto part, used to map the
* expected connection the way the recipient expects. */ * expected connection the way the recipient expects. */
union ip_conntrack_manip_proto saved_proto; union ip_conntrack_manip_proto saved_proto;
@ -293,6 +298,7 @@ static inline int is_dying(struct ip_conntrack *ct)
} }
extern unsigned int ip_conntrack_htable_size; extern unsigned int ip_conntrack_htable_size;
extern int ip_conntrack_checksum;
#define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++) #define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++)

View File

@ -71,6 +71,13 @@ extern int (*nat_h245_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
unsigned char **data, int dataoff, unsigned char **data, int dataoff,
TransportAddress * addr, u_int16_t port, TransportAddress * addr, u_int16_t port,
struct ip_conntrack_expect * exp); struct ip_conntrack_expect * exp);
extern int (*nat_callforwarding_hook) (struct sk_buff ** pskb,
struct ip_conntrack * ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
TransportAddress * addr,
u_int16_t port,
struct ip_conntrack_expect * exp);
extern int (*nat_q931_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct, extern int (*nat_q931_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
enum ip_conntrack_info ctinfo, enum ip_conntrack_info ctinfo,
unsigned char **data, TransportAddress * addr, unsigned char **data, TransportAddress * addr,

View File

@ -1,4 +1,4 @@
/* Generated by Jing Min Zhao's ASN.1 parser, Mar 15 2006 /* Generated by Jing Min Zhao's ASN.1 parser, Apr 20 2006
* *
* Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
* *
@ -412,6 +412,7 @@ typedef struct Facility_UUIE { /* SEQUENCE */
eFacility_UUIE_destinationInfo = (1 << 14), eFacility_UUIE_destinationInfo = (1 << 14),
eFacility_UUIE_h245SecurityMode = (1 << 13), eFacility_UUIE_h245SecurityMode = (1 << 13),
} options; } options;
TransportAddress alternativeAddress;
FacilityReason reason; FacilityReason reason;
TransportAddress h245Address; TransportAddress h245Address;
Facility_UUIE_fastStart fastStart; Facility_UUIE_fastStart fastStart;

View File

@ -0,0 +1,44 @@
#ifndef __IP_CONNTRACK_SIP_H__
#define __IP_CONNTRACK_SIP_H__
#ifdef __KERNEL__
#define SIP_PORT 5060
#define SIP_TIMEOUT 3600
#define POS_VIA 0
#define POS_CONTACT 1
#define POS_CONTENT 2
#define POS_MEDIA 3
#define POS_OWNER 4
#define POS_CONNECTION 5
#define POS_REQ_HEADER 6
#define POS_SDP_HEADER 7
struct sip_header_nfo {
const char *lname;
const char *sname;
const char *ln_str;
size_t lnlen;
size_t snlen;
size_t ln_strlen;
int (*match_len)(const char *, const char *, int *);
};
extern unsigned int (*ip_nat_sip_hook)(struct sk_buff **pskb,
enum ip_conntrack_info ctinfo,
struct ip_conntrack *ct,
const char **dptr);
extern unsigned int (*ip_nat_sdp_hook)(struct sk_buff **pskb,
enum ip_conntrack_info ctinfo,
struct ip_conntrack_expect *exp,
const char *dptr);
extern int ct_sip_get_info(const char *dptr, size_t dlen,
unsigned int *matchoff,
unsigned int *matchlen,
struct sip_header_nfo *hnfo);
extern int ct_sip_lnlen(const char *line, const char *limit);
extern const char *ct_sip_search(const char *needle, const char *haystack,
size_t needle_len, size_t haystack_len);
#endif /* __KERNEL__ */
#endif /* __IP_CONNTRACK_SIP_H__ */

View File

@ -1897,6 +1897,7 @@
#define PCI_DEVICE_ID_TIGON3_5751F 0x167e #define PCI_DEVICE_ID_TIGON3_5751F 0x167e
#define PCI_DEVICE_ID_TIGON3_5787M 0x1693 #define PCI_DEVICE_ID_TIGON3_5787M 0x1693
#define PCI_DEVICE_ID_TIGON3_5782 0x1696 #define PCI_DEVICE_ID_TIGON3_5782 0x1696
#define PCI_DEVICE_ID_TIGON3_5786 0x169a
#define PCI_DEVICE_ID_TIGON3_5787 0x169b #define PCI_DEVICE_ID_TIGON3_5787 0x169b
#define PCI_DEVICE_ID_TIGON3_5788 0x169c #define PCI_DEVICE_ID_TIGON3_5788 0x169c
#define PCI_DEVICE_ID_TIGON3_5789 0x169d #define PCI_DEVICE_ID_TIGON3_5789 0x169d
@ -2053,6 +2054,7 @@
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413

View File

@ -159,7 +159,7 @@ struct sadb_spirange {
struct sadb_x_kmprivate { struct sadb_x_kmprivate {
uint16_t sadb_x_kmprivate_len; uint16_t sadb_x_kmprivate_len;
uint16_t sadb_x_kmprivate_exttype; uint16_t sadb_x_kmprivate_exttype;
u_int32_t sadb_x_kmprivate_reserved; uint32_t sadb_x_kmprivate_reserved;
} __attribute__((packed)); } __attribute__((packed));
/* sizeof(struct sadb_x_kmprivate) == 8 */ /* sizeof(struct sadb_x_kmprivate) == 8 */

View File

@ -805,31 +805,37 @@ struct swap_info_struct;
* used by the XFRM system. * used by the XFRM system.
* @sec_ctx contains the security context information being provided by * @sec_ctx contains the security context information being provided by
* the user-level policy update program (e.g., setkey). * the user-level policy update program (e.g., setkey).
* Allocate a security structure to the xp->selector.security field. * Allocate a security structure to the xp->security field.
* The security field is initialized to NULL when the xfrm_policy is * The security field is initialized to NULL when the xfrm_policy is
* allocated. * allocated.
* Return 0 if operation was successful (memory to allocate, legal context) * Return 0 if operation was successful (memory to allocate, legal context)
* @xfrm_policy_clone_security: * @xfrm_policy_clone_security:
* @old contains an existing xfrm_policy in the SPD. * @old contains an existing xfrm_policy in the SPD.
* @new contains a new xfrm_policy being cloned from old. * @new contains a new xfrm_policy being cloned from old.
* Allocate a security structure to the new->selector.security field * Allocate a security structure to the new->security field
* that contains the information from the old->selector.security field. * that contains the information from the old->security field.
* Return 0 if operation was successful (memory to allocate). * Return 0 if operation was successful (memory to allocate).
* @xfrm_policy_free_security: * @xfrm_policy_free_security:
* @xp contains the xfrm_policy * @xp contains the xfrm_policy
* Deallocate xp->selector.security. * Deallocate xp->security.
* @xfrm_policy_delete_security:
* @xp contains the xfrm_policy.
* Authorize deletion of xp->security.
* @xfrm_state_alloc_security: * @xfrm_state_alloc_security:
* @x contains the xfrm_state being added to the Security Association * @x contains the xfrm_state being added to the Security Association
* Database by the XFRM system. * Database by the XFRM system.
* @sec_ctx contains the security context information being provided by * @sec_ctx contains the security context information being provided by
* the user-level SA generation program (e.g., setkey or racoon). * the user-level SA generation program (e.g., setkey or racoon).
* Allocate a security structure to the x->sel.security field. The * Allocate a security structure to the x->security field. The
* security field is initialized to NULL when the xfrm_state is * security field is initialized to NULL when the xfrm_state is
* allocated. * allocated.
* Return 0 if operation was successful (memory to allocate, legal context). * Return 0 if operation was successful (memory to allocate, legal context).
* @xfrm_state_free_security: * @xfrm_state_free_security:
* @x contains the xfrm_state. * @x contains the xfrm_state.
* Deallocate x>sel.security. * Deallocate x->security.
* @xfrm_state_delete_security:
* @x contains the xfrm_state.
* Authorize deletion of x->security.
* @xfrm_policy_lookup: * @xfrm_policy_lookup:
* @xp contains the xfrm_policy for which the access control is being * @xp contains the xfrm_policy for which the access control is being
* checked. * checked.
@ -1298,8 +1304,10 @@ struct security_operations {
int (*xfrm_policy_alloc_security) (struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx); int (*xfrm_policy_alloc_security) (struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx);
int (*xfrm_policy_clone_security) (struct xfrm_policy *old, struct xfrm_policy *new); int (*xfrm_policy_clone_security) (struct xfrm_policy *old, struct xfrm_policy *new);
void (*xfrm_policy_free_security) (struct xfrm_policy *xp); void (*xfrm_policy_free_security) (struct xfrm_policy *xp);
int (*xfrm_policy_delete_security) (struct xfrm_policy *xp);
int (*xfrm_state_alloc_security) (struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx); int (*xfrm_state_alloc_security) (struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx);
void (*xfrm_state_free_security) (struct xfrm_state *x); void (*xfrm_state_free_security) (struct xfrm_state *x);
int (*xfrm_state_delete_security) (struct xfrm_state *x);
int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 sk_sid, u8 dir); int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 sk_sid, u8 dir);
#endif /* CONFIG_SECURITY_NETWORK_XFRM */ #endif /* CONFIG_SECURITY_NETWORK_XFRM */
@ -2934,11 +2942,21 @@ static inline void security_xfrm_policy_free(struct xfrm_policy *xp)
security_ops->xfrm_policy_free_security(xp); security_ops->xfrm_policy_free_security(xp);
} }
static inline int security_xfrm_policy_delete(struct xfrm_policy *xp)
{
return security_ops->xfrm_policy_delete_security(xp);
}
static inline int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) static inline int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
{ {
return security_ops->xfrm_state_alloc_security(x, sec_ctx); return security_ops->xfrm_state_alloc_security(x, sec_ctx);
} }
static inline int security_xfrm_state_delete(struct xfrm_state *x)
{
return security_ops->xfrm_state_delete_security(x);
}
static inline void security_xfrm_state_free(struct xfrm_state *x) static inline void security_xfrm_state_free(struct xfrm_state *x)
{ {
security_ops->xfrm_state_free_security(x); security_ops->xfrm_state_free_security(x);
@ -2963,6 +2981,11 @@ static inline void security_xfrm_policy_free(struct xfrm_policy *xp)
{ {
} }
static inline int security_xfrm_policy_delete(struct xfrm_policy *xp)
{
return 0;
}
static inline int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) static inline int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
{ {
return 0; return 0;
@ -2972,6 +2995,11 @@ static inline void security_xfrm_state_free(struct xfrm_state *x)
{ {
} }
static inline int security_xfrm_state_delete(struct xfrm_state *x)
{
return 0;
}
static inline int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 sk_sid, u8 dir) static inline int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 sk_sid, u8 dir)
{ {
return 0; return 0;

View File

@ -118,6 +118,27 @@ void selinux_get_ipc_sid(const struct kern_ipc_perm *ipcp, u32 *sid);
*/ */
void selinux_get_task_sid(struct task_struct *tsk, u32 *sid); void selinux_get_task_sid(struct task_struct *tsk, u32 *sid);
/**
* selinux_string_to_sid - map a security context string to a security ID
* @str: the security context string to be mapped
* @sid: ID value returned via this.
*
* Returns 0 if successful, with the SID stored in sid. A value
* of zero for sid indicates no SID could be determined (but no error
* occurred).
*/
int selinux_string_to_sid(char *str, u32 *sid);
/**
* selinux_relabel_packet_permission - check permission to relabel a packet
* @sid: ID value to be applied to network packet (via SECMARK, most likely)
*
* Returns 0 if the current task is allowed to label packets with the
* supplied security ID. Note that it is implicit that the packet is always
* being relabeled from the default unlabled value, and that the access
* control decision is made in the AVC.
*/
int selinux_relabel_packet_permission(u32 sid);
#else #else
@ -172,6 +193,17 @@ static inline void selinux_get_task_sid(struct task_struct *tsk, u32 *sid)
*sid = 0; *sid = 0;
} }
static inline int selinux_string_to_sid(const char *str, u32 *sid)
{
*sid = 0;
return 0;
}
static inline int selinux_relabel_packet_permission(u32 sid)
{
return 0;
}
#endif /* CONFIG_SECURITY_SELINUX */ #endif /* CONFIG_SECURITY_SELINUX */
#endif /* _LINUX_SELINUX_H */ #endif /* _LINUX_SELINUX_H */

View File

@ -29,6 +29,7 @@
#include <linux/net.h> #include <linux/net.h>
#include <linux/textsearch.h> #include <linux/textsearch.h>
#include <net/checksum.h> #include <net/checksum.h>
#include <linux/dmaengine.h>
#define HAVE_ALLOC_SKB /* For the drivers to know */ #define HAVE_ALLOC_SKB /* For the drivers to know */
#define HAVE_ALIGNABLE_SKB /* Ditto 8) */ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
@ -209,6 +210,7 @@ enum {
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @tc_index: Traffic control index * @tc_index: Traffic control index
* @tc_verd: traffic control verdict * @tc_verd: traffic control verdict
* @secmark: security marking
*/ */
struct sk_buff { struct sk_buff {
@ -285,6 +287,12 @@ struct sk_buff {
__u16 tc_verd; /* traffic control verdict */ __u16 tc_verd; /* traffic control verdict */
#endif #endif
#endif #endif
#ifdef CONFIG_NET_DMA
dma_cookie_t dma_cookie;
#endif
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
#endif
/* These elements must be at the end, see alloc_skb() for details. */ /* These elements must be at the end, see alloc_skb() for details. */
@ -967,15 +975,16 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
#define NET_SKB_PAD 16 #define NET_SKB_PAD 16
#endif #endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline void __skb_trim(struct sk_buff *skb, unsigned int len) static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{ {
if (!skb->data_len) { if (unlikely(skb->data_len)) {
WARN_ON(1);
return;
}
skb->len = len; skb->len = len;
skb->tail = skb->data + len; skb->tail = skb->data + len;
} else
___pskb_trim(skb, len, 0);
} }
/** /**
@ -985,6 +994,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
* *
* Cut the length of a buffer down by removing data from the tail. If * Cut the length of a buffer down by removing data from the tail. If
* the buffer is already under the length specified it is not modified. * the buffer is already under the length specified it is not modified.
* The skb must be linear.
*/ */
static inline void skb_trim(struct sk_buff *skb, unsigned int len) static inline void skb_trim(struct sk_buff *skb, unsigned int len)
{ {
@ -995,12 +1005,10 @@ static inline void skb_trim(struct sk_buff *skb, unsigned int len)
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{ {
if (!skb->data_len) { if (skb->data_len)
skb->len = len; return ___pskb_trim(skb, len);
skb->tail = skb->data+len; __skb_trim(skb, len);
return 0; return 0;
}
return ___pskb_trim(skb, len, 1);
} }
static inline int pskb_trim(struct sk_buff *skb, unsigned int len) static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
@ -1161,18 +1169,34 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i,
return 0; return 0;
} }
static inline int __skb_linearize(struct sk_buff *skb)
{
return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
}
/** /**
* skb_linearize - convert paged skb to linear one * skb_linearize - convert paged skb to linear one
* @skb: buffer to linarize * @skb: buffer to linarize
* @gfp: allocation mode
* *
* If there is no free memory -ENOMEM is returned, otherwise zero * If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released. * is returned and the old skb data released.
*/ */
extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp); static inline int skb_linearize(struct sk_buff *skb)
static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
{ {
return __skb_linearize(skb, gfp); return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
}
/**
* skb_linearize_cow - make sure skb is linear and writable
* @skb: buffer to process
*
* If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released.
*/
static inline int skb_linearize_cow(struct sk_buff *skb)
{
return skb_is_nonlinear(skb) || skb_cloned(skb) ?
__skb_linearize(skb) : 0;
} }
/** /**
@ -1396,5 +1420,23 @@ static inline void nf_reset(struct sk_buff *skb)
static inline void nf_reset(struct sk_buff *skb) {} static inline void nf_reset(struct sk_buff *skb) {}
#endif /* CONFIG_NETFILTER */ #endif /* CONFIG_NETFILTER */
#ifdef CONFIG_NETWORK_SECMARK
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{
to->secmark = from->secmark;
}
static inline void skb_init_secmark(struct sk_buff *skb)
{
skb->secmark = 0;
}
#else
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{ }
static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */ #endif /* _LINUX_SKBUFF_H */

View File

@ -313,6 +313,7 @@ enum
NET_NF_CONNTRACK_FRAG6_TIMEOUT=29, NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30, NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31, NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
NET_NF_CONNTRACK_CHECKSUM=32,
}; };
/* /proc/sys/net/ipv4 */ /* /proc/sys/net/ipv4 */
@ -403,6 +404,8 @@ enum
NET_TCP_MTU_PROBING=113, NET_TCP_MTU_PROBING=113,
NET_TCP_BASE_MSS=114, NET_TCP_BASE_MSS=114,
NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
NET_TCP_DMA_COPYBREAK=116,
NET_TCP_SLOW_START_AFTER_IDLE=117,
}; };
enum { enum {
@ -491,6 +494,7 @@ enum
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
NET_IPV4_NF_CONNTRACK_COUNT=27, NET_IPV4_NF_CONNTRACK_COUNT=27,
NET_IPV4_NF_CONNTRACK_CHECKSUM=28,
}; };
/* /proc/sys/net/ipv6 */ /* /proc/sys/net/ipv6 */

View File

@ -18,6 +18,7 @@
#define _LINUX_TCP_H #define _LINUX_TCP_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/dmaengine.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
struct tcphdr { struct tcphdr {
@ -233,6 +234,13 @@ struct tcp_sock {
struct iovec *iov; struct iovec *iov;
int memory; int memory;
int len; int len;
#ifdef CONFIG_NET_DMA
/* members for async copy */
struct dma_chan *dma_chan;
int wakeup;
struct dma_pinned_list *pinned_list;
dma_cookie_t dma_cookie;
#endif
} ucopy; } ucopy;
__u32 snd_wl1; /* Sequence for window update */ __u32 snd_wl1; /* Sequence for window update */

View File

@ -118,6 +118,10 @@ enum
XFRM_SHARE_UNIQUE /* Use once */ XFRM_SHARE_UNIQUE /* Use once */
}; };
#define XFRM_MODE_TRANSPORT 0
#define XFRM_MODE_TUNNEL 1
#define XFRM_MODE_MAX 2
/* Netlink configuration messages. */ /* Netlink configuration messages. */
enum { enum {
XFRM_MSG_BASE = 0x10, XFRM_MSG_BASE = 0x10,

View File

@ -147,7 +147,6 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
struct ipv4_config struct ipv4_config
{ {
int log_martians; int log_martians;
int autoconfig;
int no_pmtu_disc; int no_pmtu_disc;
}; };

View File

@ -16,6 +16,7 @@
#include <linux/if.h> #include <linux/if.h>
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/llc.h> #include <linux/llc.h>
#include <linux/etherdevice.h>
#include <net/llc.h> #include <net/llc.h>
#define LLC_DATAUNIT_PRIM 1 #define LLC_DATAUNIT_PRIM 1
@ -61,8 +62,6 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */ #define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */ #define LLC_STATUS_RESET_DONE 8 /* */
extern u8 llc_mac_null_var[IFHWADDRLEN];
/** /**
* llc_mac_null - determines if a address is a null mac address * llc_mac_null - determines if a address is a null mac address
* @mac: Mac address to test if null. * @mac: Mac address to test if null.
@ -70,16 +69,20 @@ extern u8 llc_mac_null_var[IFHWADDRLEN];
* Determines if a given address is a null mac address. Returns 0 if the * Determines if a given address is a null mac address. Returns 0 if the
* address is not a null mac, 1 if the address is a null mac. * address is not a null mac, 1 if the address is a null mac.
*/ */
static __inline__ int llc_mac_null(u8 *mac) static inline int llc_mac_null(const u8 *mac)
{ {
return !memcmp(mac, llc_mac_null_var, IFHWADDRLEN); return is_zero_ether_addr(mac);
} }
static __inline__ int llc_addrany(struct llc_addr *addr) static inline int llc_addrany(const struct llc_addr *addr)
{ {
return llc_mac_null(addr->mac) && !addr->lsap; return llc_mac_null(addr->mac) && !addr->lsap;
} }
static inline int llc_mac_multicast(const u8 *mac)
{
return is_multicast_ether_addr(mac);
}
/** /**
* llc_mac_match - determines if two mac addresses are the same * llc_mac_match - determines if two mac addresses are the same
* @mac1: First mac address to compare. * @mac1: First mac address to compare.
@ -89,9 +92,9 @@ static __inline__ int llc_addrany(struct llc_addr *addr)
* is not a complete match up to len, 1 if a complete match up to len is * is not a complete match up to len, 1 if a complete match up to len is
* found. * found.
*/ */
static __inline__ int llc_mac_match(u8 *mac1, u8 *mac2) static inline int llc_mac_match(const u8 *mac1, const u8 *mac2)
{ {
return !memcmp(mac1, mac2, IFHWADDRLEN); return !compare_ether_addr(mac1, mac2);
} }
extern int llc_establish_connection(struct sock *sk, u8 *lmac, extern int llc_establish_connection(struct sock *sk, u8 *lmac,

44
include/net/netdma.h Normal file
View File

@ -0,0 +1,44 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef NETDMA_H
#define NETDMA_H
#include <linux/config.h>
#ifdef CONFIG_NET_DMA
#include <linux/dmaengine.h>
#include <linux/skbuff.h>
static inline struct dma_chan *get_softnet_dma(void)
{
struct dma_chan *chan;
rcu_read_lock();
chan = rcu_dereference(__get_cpu_var(softnet_data.net_dma));
if (chan)
dma_chan_get(chan);
rcu_read_unlock();
return chan;
}
int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
const struct sk_buff *skb, int offset, struct iovec *to,
size_t len, struct dma_pinned_list *pinned_list);
#endif /* CONFIG_NET_DMA */
#endif /* NETDMA_H */

View File

@ -114,6 +114,10 @@ struct nf_conn
u_int32_t mark; u_int32_t mark;
#endif #endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
u_int32_t secmark;
#endif
/* Storage reserved for other modules: */ /* Storage reserved for other modules: */
union nf_conntrack_proto proto; union nf_conntrack_proto proto;
@ -285,6 +289,7 @@ static inline int nf_ct_is_dying(struct nf_conn *ct)
} }
extern unsigned int nf_conntrack_htable_size; extern unsigned int nf_conntrack_htable_size;
extern int nf_conntrack_checksum;
#define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++) #define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++)

View File

@ -20,6 +20,19 @@ static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb,
} }
#endif /* CONFIG_IP_NF_CONNTRACK_MARK */ #endif /* CONFIG_IP_NF_CONNTRACK_MARK */
#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK
static inline u_int32_t *nf_ct_get_secmark(const struct sk_buff *skb,
u_int32_t *ctinfo)
{
struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo);
if (ct)
return &ct->secmark;
else
return NULL;
}
#endif /* CONFIG_IP_NF_CONNTRACK_SECMARK */
#ifdef CONFIG_IP_NF_CT_ACCT #ifdef CONFIG_IP_NF_CT_ACCT
static inline struct ip_conntrack_counter * static inline struct ip_conntrack_counter *
nf_ct_get_counters(const struct sk_buff *skb) nf_ct_get_counters(const struct sk_buff *skb)
@ -70,6 +83,19 @@ static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb,
} }
#endif /* CONFIG_NF_CONNTRACK_MARK */ #endif /* CONFIG_NF_CONNTRACK_MARK */
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static inline u_int32_t *nf_ct_get_secmark(const struct sk_buff *skb,
u_int32_t *ctinfo)
{
struct nf_conn *ct = nf_ct_get(skb, ctinfo);
if (ct)
return &ct->secmark;
else
return NULL;
}
#endif /* CONFIG_NF_CONNTRACK_MARK */
#ifdef CONFIG_NF_CT_ACCT #ifdef CONFIG_NF_CT_ACCT
static inline struct ip_conntrack_counter * static inline struct ip_conntrack_counter *
nf_ct_get_counters(const struct sk_buff *skb) nf_ct_get_counters(const struct sk_buff *skb)

View File

@ -36,7 +36,7 @@ extern rwlock_t raw_v4_lock;
extern struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num, extern struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
unsigned long raddr, unsigned long laddr, __be32 raddr, __be32 laddr,
int dif); int dif);
extern int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash); extern int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash);

View File

@ -255,7 +255,7 @@ extern int sctp_debug_flag;
#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) #define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
#define SCTP_ENABLE_DEBUG #define SCTP_ENABLE_DEBUG
#define SCTP_DISABLE_DEBUG #define SCTP_DISABLE_DEBUG
#define SCTP_ASSERT(expr, str, func) #define SCTP_ASSERT(expr, str, func) BUG_ON(!(expr))
#endif /* SCTP_DEBUG */ #endif /* SCTP_DEBUG */

View File

@ -555,7 +555,8 @@ struct sctp_af {
int (*to_addr_param) (const union sctp_addr *, int (*to_addr_param) (const union sctp_addr *,
union sctp_addr_param *); union sctp_addr_param *);
int (*addr_valid) (union sctp_addr *, int (*addr_valid) (union sctp_addr *,
struct sctp_sock *); struct sctp_sock *,
const struct sk_buff *);
sctp_scope_t (*scope) (union sctp_addr *); sctp_scope_t (*scope) (union sctp_addr *);
void (*inaddr_any) (union sctp_addr *, unsigned short); void (*inaddr_any) (union sctp_addr *, unsigned short);
int (*is_any) (const union sctp_addr *); int (*is_any) (const union sctp_addr *);

View File

@ -132,6 +132,7 @@ struct sock_common {
* @sk_receive_queue: incoming packets * @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed * @sk_wmem_alloc: transmit queue bytes committed
* @sk_write_queue: Packet sending queue * @sk_write_queue: Packet sending queue
* @sk_async_wait_queue: DMA copied packets
* @sk_omem_alloc: "o" is "option" or "other" * @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size * @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward * @sk_forward_alloc: space allocated forward
@ -205,6 +206,7 @@ struct sock {
atomic_t sk_omem_alloc; atomic_t sk_omem_alloc;
struct sk_buff_head sk_receive_queue; struct sk_buff_head sk_receive_queue;
struct sk_buff_head sk_write_queue; struct sk_buff_head sk_write_queue;
struct sk_buff_head sk_async_wait_queue;
int sk_wmem_queued; int sk_wmem_queued;
int sk_forward_alloc; int sk_forward_alloc;
gfp_t sk_allocation; gfp_t sk_allocation;
@ -871,10 +873,7 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
if (filter) { if (filter) {
unsigned int pkt_len = sk_run_filter(skb, filter->insns, unsigned int pkt_len = sk_run_filter(skb, filter->insns,
filter->len); filter->len);
if (!pkt_len) err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
err = -EPERM;
else
skb_trim(skb, pkt_len);
} }
if (needlock) if (needlock)
@ -1271,11 +1270,22 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
* This routine must be called with interrupts disabled or with the socket * This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok. * locked so that the sk_buff queue operation is ok.
*/ */
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) #ifdef CONFIG_NET_DMA
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
if (!copied_early)
__kfree_skb(skb);
else
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
}
#else
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
{ {
__skb_unlink(skb, &sk->sk_receive_queue); __skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb); __kfree_skb(skb);
} }
#endif
extern void sock_enable_timestamp(struct sock *sk); extern void sock_enable_timestamp(struct sock *sk);
extern int sock_get_timestamp(struct sock *, struct timeval __user *); extern int sock_get_timestamp(struct sock *, struct timeval __user *);

View File

@ -28,6 +28,7 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/dmaengine.h>
#include <net/inet_connection_sock.h> #include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h> #include <net/inet_timewait_sock.h>
@ -218,6 +219,7 @@ extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto; extern int sysctl_tcp_frto;
extern int sysctl_tcp_low_latency; extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_dma_copybreak;
extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor; extern int sysctl_tcp_tso_win_divisor;
@ -225,6 +227,7 @@ extern int sysctl_tcp_abc;
extern int sysctl_tcp_mtu_probing; extern int sysctl_tcp_mtu_probing;
extern int sysctl_tcp_base_mss; extern int sysctl_tcp_base_mss;
extern int sysctl_tcp_workaround_signed_windows; extern int sysctl_tcp_workaround_signed_windows;
extern int sysctl_tcp_slow_start_after_idle;
extern atomic_t tcp_memory_allocated; extern atomic_t tcp_memory_allocated;
extern atomic_t tcp_sockets_allocated; extern atomic_t tcp_sockets_allocated;
@ -293,6 +296,8 @@ extern int tcp_rcv_established(struct sock *sk,
extern void tcp_rcv_space_adjust(struct sock *sk); extern void tcp_rcv_space_adjust(struct sock *sk);
extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
extern int tcp_twsk_unique(struct sock *sk, extern int tcp_twsk_unique(struct sock *sk,
struct sock *sktw, void *twp); struct sock *sktw, void *twp);
@ -628,7 +633,7 @@ struct tcp_congestion_ops {
/* return slow start threshold (required) */ /* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk); u32 (*ssthresh)(struct sock *sk);
/* lower bound for congestion window (optional) */ /* lower bound for congestion window (optional) */
u32 (*min_cwnd)(struct sock *sk); u32 (*min_cwnd)(const struct sock *sk);
/* do new cwnd calculation (required) */ /* do new cwnd calculation (required) */
void (*cong_avoid)(struct sock *sk, u32 ack, void (*cong_avoid)(struct sock *sk, u32 ack,
u32 rtt, u32 in_flight, int good_ack); u32 rtt, u32 in_flight, int good_ack);
@ -663,7 +668,7 @@ extern struct tcp_congestion_ops tcp_init_congestion_ops;
extern u32 tcp_reno_ssthresh(struct sock *sk); extern u32 tcp_reno_ssthresh(struct sock *sk);
extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
u32 rtt, u32 in_flight, int flag); u32 rtt, u32 in_flight, int flag);
extern u32 tcp_reno_min_cwnd(struct sock *sk); extern u32 tcp_reno_min_cwnd(const struct sock *sk);
extern struct tcp_congestion_ops tcp_reno; extern struct tcp_congestion_ops tcp_reno;
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@ -817,6 +822,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
tp->ucopy.len = 0; tp->ucopy.len = 0;
tp->ucopy.memory = 0; tp->ucopy.memory = 0;
skb_queue_head_init(&tp->ucopy.prequeue); skb_queue_head_init(&tp->ucopy.prequeue);
#ifdef CONFIG_NET_DMA
tp->ucopy.dma_chan = NULL;
tp->ucopy.wakeup = 0;
tp->ucopy.pinned_list = NULL;
tp->ucopy.dma_cookie = 0;
#endif
} }
/* Packet is added to VJ-style prequeue for processing in process /* Packet is added to VJ-style prequeue for processing in process

View File

@ -20,6 +20,8 @@
#include <net/ip6_fib.h> #include <net/ip6_fib.h>
#define XFRM_ALIGN8(len) (((len) + 7) & ~7) #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
#define MODULE_ALIAS_XFRM_MODE(family, encap) \
MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
extern struct sock *xfrm_nl; extern struct sock *xfrm_nl;
extern u32 sysctl_xfrm_aevent_etime; extern u32 sysctl_xfrm_aevent_etime;
@ -164,6 +166,7 @@ struct xfrm_state
/* Reference to data common to all the instances of this /* Reference to data common to all the instances of this
* transformer. */ * transformer. */
struct xfrm_type *type; struct xfrm_type *type;
struct xfrm_mode *mode;
/* Security context */ /* Security context */
struct xfrm_sec_ctx *security; struct xfrm_sec_ctx *security;
@ -204,8 +207,8 @@ struct xfrm_type;
struct xfrm_dst; struct xfrm_dst;
struct xfrm_policy_afinfo { struct xfrm_policy_afinfo {
unsigned short family; unsigned short family;
rwlock_t lock; struct xfrm_type *type_map[IPPROTO_MAX];
struct xfrm_type_map *type_map; struct xfrm_mode *mode_map[XFRM_MODE_MAX];
struct dst_ops *dst_ops; struct dst_ops *dst_ops;
void (*garbage_collect)(void); void (*garbage_collect)(void);
int (*dst_lookup)(struct xfrm_dst **dst, struct flowi *fl); int (*dst_lookup)(struct xfrm_dst **dst, struct flowi *fl);
@ -232,7 +235,6 @@ extern int __xfrm_state_delete(struct xfrm_state *x);
struct xfrm_state_afinfo { struct xfrm_state_afinfo {
unsigned short family; unsigned short family;
rwlock_t lock;
struct list_head *state_bydst; struct list_head *state_bydst;
struct list_head *state_byspi; struct list_head *state_byspi;
int (*init_flags)(struct xfrm_state *x); int (*init_flags)(struct xfrm_state *x);
@ -264,16 +266,24 @@ struct xfrm_type
u32 (*get_max_size)(struct xfrm_state *, int size); u32 (*get_max_size)(struct xfrm_state *, int size);
}; };
struct xfrm_type_map {
rwlock_t lock;
struct xfrm_type *map[256];
};
extern int xfrm_register_type(struct xfrm_type *type, unsigned short family); extern int xfrm_register_type(struct xfrm_type *type, unsigned short family);
extern int xfrm_unregister_type(struct xfrm_type *type, unsigned short family); extern int xfrm_unregister_type(struct xfrm_type *type, unsigned short family);
extern struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family); extern struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family);
extern void xfrm_put_type(struct xfrm_type *type); extern void xfrm_put_type(struct xfrm_type *type);
struct xfrm_mode {
int (*input)(struct xfrm_state *x, struct sk_buff *skb);
int (*output)(struct sk_buff *skb);
struct module *owner;
unsigned int encap;
};
extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
extern struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family);
extern void xfrm_put_mode(struct xfrm_mode *mode);
struct xfrm_tmpl struct xfrm_tmpl
{ {
/* id in template is interpreted as: /* id in template is interpreted as:

View File

@ -66,6 +66,13 @@ source "net/ipv6/Kconfig"
endif # if INET endif # if INET
config NETWORK_SECMARK
bool "Security Marking"
help
This enables security marking of network packets, similar
to nfmark, but designated for security purposes.
If you are unsure how to answer this question, answer N.
menuconfig NETFILTER menuconfig NETFILTER
bool "Network packet filtering (replaces ipchains)" bool "Network packet filtering (replaces ipchains)"
---help--- ---help---
@ -215,6 +222,21 @@ config NET_PKTGEN
To compile this code as a module, choose M here: the To compile this code as a module, choose M here: the
module will be called pktgen. module will be called pktgen.
config NET_TCPPROBE
tristate "TCP connection probing"
depends on INET && EXPERIMENTAL && PROC_FS && KPROBES
---help---
This module allows for capturing the changes to TCP connection
state in response to incoming packets. It is used for debugging
TCP congestion avoidance modules. If you don't understand
what was just said, you don't need it: say N.
Documentation on how to use the packet generator can be found
at http://linux-net.osdl.org/index.php/TcpProbe
To compile this code as a module, choose M here: the
module will be called tcp_probe.
endmenu endmenu
endmenu endmenu

View File

@ -98,7 +98,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
return; return;
} }
spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */ netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
entry->neigh->used = jiffies; entry->neigh->used = jiffies;
for (walk = &entry->vccs; *walk; walk = &(*walk)->next) for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
if (*walk == clip_vcc) { if (*walk == clip_vcc) {
@ -122,7 +122,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
"0x%p)\n", entry, clip_vcc); "0x%p)\n", entry, clip_vcc);
out: out:
spin_unlock_bh(&entry->neigh->dev->xmit_lock); netif_tx_unlock_bh(entry->neigh->dev);
} }
/* The neighbour entry n->lock is held. */ /* The neighbour entry n->lock is held. */

View File

@ -6,7 +6,7 @@ obj-$(CONFIG_BRIDGE) += bridge.o
bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \ bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \ br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \
br_stp_if.o br_stp_timer.o br_stp_if.o br_stp_timer.o br_netlink.o
bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o

View File

@ -30,36 +30,46 @@ static struct llc_sap *br_stp_sap;
static int __init br_init(void) static int __init br_init(void)
{ {
int err;
br_stp_sap = llc_sap_open(LLC_SAP_BSPAN, br_stp_rcv); br_stp_sap = llc_sap_open(LLC_SAP_BSPAN, br_stp_rcv);
if (!br_stp_sap) { if (!br_stp_sap) {
printk(KERN_ERR "bridge: can't register sap for STP\n"); printk(KERN_ERR "bridge: can't register sap for STP\n");
return -EBUSY; return -EADDRINUSE;
} }
br_fdb_init(); br_fdb_init();
#ifdef CONFIG_BRIDGE_NETFILTER err = br_netfilter_init();
if (br_netfilter_init()) if (err)
return 1; goto err_out1;
#endif
err = register_netdevice_notifier(&br_device_notifier);
if (err)
goto err_out2;
br_netlink_init();
brioctl_set(br_ioctl_deviceless_stub); brioctl_set(br_ioctl_deviceless_stub);
br_handle_frame_hook = br_handle_frame; br_handle_frame_hook = br_handle_frame;
br_fdb_get_hook = br_fdb_get; br_fdb_get_hook = br_fdb_get;
br_fdb_put_hook = br_fdb_put; br_fdb_put_hook = br_fdb_put;
register_netdevice_notifier(&br_device_notifier);
return 0; return 0;
err_out2:
br_netfilter_fini();
err_out1:
llc_sap_put(br_stp_sap);
return err;
} }
static void __exit br_deinit(void) static void __exit br_deinit(void)
{ {
rcu_assign_pointer(br_stp_sap->rcv_func, NULL); rcu_assign_pointer(br_stp_sap->rcv_func, NULL);
#ifdef CONFIG_BRIDGE_NETFILTER br_netlink_fini();
br_netfilter_fini(); br_netfilter_fini();
#endif
unregister_netdevice_notifier(&br_device_notifier); unregister_netdevice_notifier(&br_device_notifier);
brioctl_set(NULL); brioctl_set(NULL);

View File

@ -145,9 +145,9 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
struct net_bridge *br = netdev_priv(dev); struct net_bridge *br = netdev_priv(dev);
if (data) if (data)
br->feature_mask |= NETIF_F_IP_CSUM; br->feature_mask |= NETIF_F_NO_CSUM;
else else
br->feature_mask &= ~NETIF_F_IP_CSUM; br->feature_mask &= ~NETIF_F_ALL_CSUM;
br_features_recompute(br); br_features_recompute(br);
return 0; return 0;
@ -185,5 +185,5 @@ void br_dev_setup(struct net_device *dev)
dev->priv_flags = IFF_EBRIDGE; dev->priv_flags = IFF_EBRIDGE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
| NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM; | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_NO_CSUM;
} }

View File

@ -20,14 +20,11 @@
#include <linux/netfilter_bridge.h> #include <linux/netfilter_bridge.h>
#include "br_private.h" #include "br_private.h"
/* Don't forward packets to originating port or forwarding diasabled */
static inline int should_deliver(const struct net_bridge_port *p, static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
if (skb->dev == p->dev || return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING);
p->state != BR_STATE_FORWARDING)
return 0;
return 1;
} }
static inline unsigned packet_length(const struct sk_buff *skb) static inline unsigned packet_length(const struct sk_buff *skb)
@ -55,10 +52,9 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
int br_forward_finish(struct sk_buff *skb) int br_forward_finish(struct sk_buff *skb)
{ {
NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
br_dev_queue_push_xmit); br_dev_queue_push_xmit);
return 0;
} }
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)

View File

@ -372,12 +372,17 @@ void br_features_recompute(struct net_bridge *br)
struct net_bridge_port *p; struct net_bridge_port *p;
unsigned long features, checksum; unsigned long features, checksum;
features = br->feature_mask &~ NETIF_F_IP_CSUM; checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
checksum = br->feature_mask & NETIF_F_IP_CSUM; features = br->feature_mask & ~NETIF_F_ALL_CSUM;
list_for_each_entry(p, &br->port_list, list) { list_for_each_entry(p, &br->port_list, list) {
if (!(p->dev->features if (checksum & NETIF_F_NO_CSUM &&
& (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM))) !(p->dev->features & NETIF_F_NO_CSUM))
checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
if (checksum & NETIF_F_HW_CSUM &&
!(p->dev->features & NETIF_F_HW_CSUM))
checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
if (!(p->dev->features & NETIF_F_IP_CSUM))
checksum = 0; checksum = 0;
features &= p->dev->features; features &= p->dev->features;
} }

View File

@ -407,12 +407,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
if (pkt_len + sizeof(struct ipv6hdr) > skb->len) if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
goto inhdr_error; goto inhdr_error;
if (pkt_len + sizeof(struct ipv6hdr) < skb->len) { if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
if (__pskb_trim(skb, pkt_len + sizeof(struct ipv6hdr)))
goto inhdr_error; goto inhdr_error;
if (skb->ip_summed == CHECKSUM_HW)
skb->ip_summed = CHECKSUM_NONE;
}
} }
if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb)) if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
goto inhdr_error; goto inhdr_error;
@ -495,11 +491,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
if (skb->len < len || len < 4 * iph->ihl) if (skb->len < len || len < 4 * iph->ihl)
goto inhdr_error; goto inhdr_error;
if (skb->len > len) { pskb_trim_rcsum(skb, len);
__pskb_trim(skb, len);
if (skb->ip_summed == CHECKSUM_HW)
skb->ip_summed = CHECKSUM_NONE;
}
nf_bridge_put(skb->nf_bridge); nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb)) if (!nf_bridge_alloc(skb))

199
net/bridge/br_netlink.c Normal file
View File

@ -0,0 +1,199 @@
/*
* Bridge netlink control interface
*
* Authors:
* Stephen Hemminger <shemminger@osdl.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include "br_private.h"
/*
* Create one netlink message for one interface
* Contains port and master info as well as carrier and bridge state.
*/
static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port,
u32 pid, u32 seq, int event, unsigned int flags)
{
const struct net_bridge *br = port->br;
const struct net_device *dev = port->dev;
struct ifinfomsg *r;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
u32 mtu = dev->mtu;
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
u8 portstate = port->state;
pr_debug("br_fill_info event %d port %s master %s\n",
event, dev->name, br->dev->name);
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
r = NLMSG_DATA(nlh);
r->ifi_family = AF_BRIDGE;
r->__ifi_pad = 0;
r->ifi_type = dev->type;
r->ifi_index = dev->ifindex;
r->ifi_flags = dev_get_flags(dev);
r->ifi_change = 0;
RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name);
RTA_PUT(skb, IFLA_MASTER, sizeof(int), &br->dev->ifindex);
if (dev->addr_len)
RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
RTA_PUT(skb, IFLA_MTU, sizeof(mtu), &mtu);
if (dev->ifindex != dev->iflink)
RTA_PUT(skb, IFLA_LINK, sizeof(int), &dev->iflink);
RTA_PUT(skb, IFLA_OPERSTATE, sizeof(operstate), &operstate);
if (event == RTM_NEWLINK)
RTA_PUT(skb, IFLA_PROTINFO, sizeof(portstate), &portstate);
nlh->nlmsg_len = skb->tail - b;
return skb->len;
nlmsg_failure:
rtattr_failure:
skb_trim(skb, b - skb->data);
return -EINVAL;
}
/*
* Notify listeners of a change in port information
*/
void br_ifinfo_notify(int event, struct net_bridge_port *port)
{
struct sk_buff *skb;
int err = -ENOMEM;
pr_debug("bridge notify event=%d\n", event);
skb = alloc_skb(NLMSG_SPACE(sizeof(struct ifinfomsg) + 128),
GFP_ATOMIC);
if (!skb)
goto err_out;
err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0);
if (err)
goto err_kfree;
NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC);
return;
err_kfree:
kfree_skb(skb);
err_out:
netlink_set_err(rtnl, 0, RTNLGRP_LINK, err);
}
/*
* Dump information about all ports, in response to GETLINK
*/
static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net_device *dev;
int idx;
int s_idx = cb->args[0];
int err = 0;
read_lock(&dev_base_lock);
for (dev = dev_base, idx = 0; dev; dev = dev->next) {
struct net_bridge_port *p = dev->br_port;
/* not a bridge port */
if (!p)
continue;
if (idx < s_idx)
continue;
err = br_fill_ifinfo(skb, p, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI);
if (err <= 0)
break;
++idx;
}
read_unlock(&dev_base_lock);
cb->args[0] = idx;
return skb->len;
}
/*
* Change state of port (ie from forwarding to blocking etc)
* Used by spanning tree in user space.
*/
static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct rtattr **rta = arg;
struct ifinfomsg *ifm = NLMSG_DATA(nlh);
struct net_device *dev;
struct net_bridge_port *p;
u8 new_state;
if (ifm->ifi_family != AF_BRIDGE)
return -EPFNOSUPPORT;
/* Must pass valid state as PROTINFO */
if (rta[IFLA_PROTINFO-1]) {
u8 *pstate = RTA_DATA(rta[IFLA_PROTINFO-1]);
new_state = *pstate;
} else
return -EINVAL;
if (new_state > BR_STATE_BLOCKING)
return -EINVAL;
/* Find bridge port */
dev = __dev_get_by_index(ifm->ifi_index);
if (!dev)
return -ENODEV;
p = dev->br_port;
if (!p)
return -EINVAL;
/* if kernel STP is running, don't allow changes */
if (p->br->stp_enabled)
return -EBUSY;
if (!netif_running(dev))
return -ENETDOWN;
if (!netif_carrier_ok(dev) && new_state != BR_STATE_DISABLED)
return -ENETDOWN;
p->state = new_state;
br_log_state(p);
return 0;
}
static struct rtnetlink_link bridge_rtnetlink_table[RTM_NR_MSGTYPES] = {
[RTM_GETLINK - RTM_BASE] = { .dumpit = br_dump_ifinfo, },
[RTM_SETLINK - RTM_BASE] = { .doit = br_rtm_setlink, },
};
void __init br_netlink_init(void)
{
rtnetlink_links[PF_BRIDGE] = bridge_rtnetlink_table;
}
void __exit br_netlink_fini(void)
{
rtnetlink_links[PF_BRIDGE] = NULL;
}

View File

@ -14,6 +14,7 @@
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include "br_private.h" #include "br_private.h"
@ -49,6 +50,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
case NETDEV_CHANGEADDR: case NETDEV_CHANGEADDR:
br_fdb_changeaddr(p, dev->dev_addr); br_fdb_changeaddr(p, dev->dev_addr);
br_ifinfo_notify(RTM_NEWLINK, p);
br_stp_recalculate_bridge_id(br); br_stp_recalculate_bridge_id(br);
break; break;

View File

@ -29,7 +29,7 @@
#define BR_PORT_DEBOUNCE (HZ/10) #define BR_PORT_DEBOUNCE (HZ/10)
#define BR_VERSION "2.1" #define BR_VERSION "2.2"
typedef struct bridge_id bridge_id; typedef struct bridge_id bridge_id;
typedef struct mac_addr mac_addr; typedef struct mac_addr mac_addr;
@ -192,8 +192,13 @@ extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
extern int br_ioctl_deviceless_stub(unsigned int cmd, void __user *arg); extern int br_ioctl_deviceless_stub(unsigned int cmd, void __user *arg);
/* br_netfilter.c */ /* br_netfilter.c */
#ifdef CONFIG_BRIDGE_NETFILTER
extern int br_netfilter_init(void); extern int br_netfilter_init(void);
extern void br_netfilter_fini(void); extern void br_netfilter_fini(void);
#else
#define br_netfilter_init() (0)
#define br_netfilter_fini() do { } while(0)
#endif
/* br_stp.c */ /* br_stp.c */
extern void br_log_state(const struct net_bridge_port *p); extern void br_log_state(const struct net_bridge_port *p);
@ -232,6 +237,11 @@ extern struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent); extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
/* br_netlink.c */
extern void br_netlink_init(void);
extern void br_netlink_fini(void);
extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
/* br_sysfs_if.c */ /* br_sysfs_if.c */
extern struct sysfs_ops brport_sysfs_ops; extern struct sysfs_ops brport_sysfs_ops;

View File

@ -16,6 +16,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include "br_private.h" #include "br_private.h"
#include "br_private_stp.h" #include "br_private_stp.h"
@ -86,6 +87,7 @@ void br_stp_disable_bridge(struct net_bridge *br)
void br_stp_enable_port(struct net_bridge_port *p) void br_stp_enable_port(struct net_bridge_port *p)
{ {
br_init_port(p); br_init_port(p);
br_ifinfo_notify(RTM_NEWLINK, p);
br_port_state_selection(p->br); br_port_state_selection(p->br);
} }
@ -99,6 +101,8 @@ void br_stp_disable_port(struct net_bridge_port *p)
printk(KERN_INFO "%s: port %i(%s) entering %s state\n", printk(KERN_INFO "%s: port %i(%s) entering %s state\n",
br->dev->name, p->port_no, p->dev->name, "disabled"); br->dev->name, p->port_no, p->dev->name, "disabled");
br_ifinfo_notify(RTM_DELLINK, p);
wasroot = br_is_root_bridge(br); wasroot = br_is_root_bridge(br);
br_become_designated_port(p); br_become_designated_port(p);
p->state = BR_STATE_DISABLED; p->state = BR_STATE_DISABLED;

View File

@ -16,3 +16,4 @@ obj-$(CONFIG_NET_DIVERT) += dv.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o obj-$(CONFIG_NET_PKTGEN) += pktgen.o
obj-$(CONFIG_WIRELESS_EXT) += wireless.o obj-$(CONFIG_WIRELESS_EXT) += wireless.o
obj-$(CONFIG_NETPOLL) += netpoll.o obj-$(CONFIG_NETPOLL) += netpoll.o
obj-$(CONFIG_NET_DMA) += user_dma.o

View File

@ -115,6 +115,7 @@
#include <net/iw_handler.h> #include <net/iw_handler.h>
#include <asm/current.h> #include <asm/current.h>
#include <linux/audit.h> #include <linux/audit.h>
#include <linux/dmaengine.h>
/* /*
* The list of packet types we will receive (as opposed to discard) * The list of packet types we will receive (as opposed to discard)
@ -148,6 +149,12 @@ static DEFINE_SPINLOCK(ptype_lock);
static struct list_head ptype_base[16]; /* 16 way hashed list */ static struct list_head ptype_base[16]; /* 16 way hashed list */
static struct list_head ptype_all; /* Taps */ static struct list_head ptype_all; /* Taps */
#ifdef CONFIG_NET_DMA
static struct dma_client *net_dma_client;
static unsigned int net_dma_count;
static spinlock_t net_dma_event_lock;
#endif
/* /*
* The @dev_base list is protected by @dev_base_lock and the rtnl * The @dev_base list is protected by @dev_base_lock and the rtnl
* semaphore. * semaphore.
@ -1215,75 +1222,15 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
#define illegal_highdma(dev, skb) (0) #define illegal_highdma(dev, skb) (0)
#endif #endif
/* Keep head the same: replace data */
int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
{
unsigned int size;
u8 *data;
long offset;
struct skb_shared_info *ninfo;
int headerlen = skb->data - skb->head;
int expand = (skb->tail + skb->data_len) - skb->end;
if (skb_shared(skb))
BUG();
if (expand <= 0)
expand = 0;
size = skb->end - skb->head + expand;
size = SKB_DATA_ALIGN(size);
data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
if (!data)
return -ENOMEM;
/* Copy entire thing */
if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
BUG();
/* Set up shinfo */
ninfo = (struct skb_shared_info*)(data + size);
atomic_set(&ninfo->dataref, 1);
ninfo->tso_size = skb_shinfo(skb)->tso_size;
ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
ninfo->nr_frags = 0;
ninfo->frag_list = NULL;
/* Offset between the two in bytes */
offset = data - skb->head;
/* Free old data. */
skb_release_data(skb);
skb->head = data;
skb->end = data + size;
/* Set up new pointers */
skb->h.raw += offset;
skb->nh.raw += offset;
skb->mac.raw += offset;
skb->tail += offset;
skb->data += offset;
/* We are no longer a clone, even if we were. */
skb->cloned = 0;
skb->tail += skb->data_len;
skb->data_len = 0;
return 0;
}
#define HARD_TX_LOCK(dev, cpu) { \ #define HARD_TX_LOCK(dev, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \
spin_lock(&dev->xmit_lock); \ netif_tx_lock(dev); \
dev->xmit_lock_owner = cpu; \
} \ } \
} }
#define HARD_TX_UNLOCK(dev) { \ #define HARD_TX_UNLOCK(dev) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \
dev->xmit_lock_owner = -1; \ netif_tx_unlock(dev); \
spin_unlock(&dev->xmit_lock); \
} \ } \
} }
@ -1321,7 +1268,7 @@ int dev_queue_xmit(struct sk_buff *skb)
if (skb_shinfo(skb)->frag_list && if (skb_shinfo(skb)->frag_list &&
!(dev->features & NETIF_F_FRAGLIST) && !(dev->features & NETIF_F_FRAGLIST) &&
__skb_linearize(skb, GFP_ATOMIC)) __skb_linearize(skb))
goto out_kfree_skb; goto out_kfree_skb;
/* Fragmented skb is linearized if device does not support SG, /* Fragmented skb is linearized if device does not support SG,
@ -1330,14 +1277,14 @@ int dev_queue_xmit(struct sk_buff *skb)
*/ */
if (skb_shinfo(skb)->nr_frags && if (skb_shinfo(skb)->nr_frags &&
(!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
__skb_linearize(skb, GFP_ATOMIC)) __skb_linearize(skb))
goto out_kfree_skb; goto out_kfree_skb;
/* If packet is not checksummed and device does not support /* If packet is not checksummed and device does not support
* checksumming for this protocol, complete checksumming here. * checksumming for this protocol, complete checksumming here.
*/ */
if (skb->ip_summed == CHECKSUM_HW && if (skb->ip_summed == CHECKSUM_HW &&
(!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) && (!(dev->features & NETIF_F_GEN_CSUM) &&
(!(dev->features & NETIF_F_IP_CSUM) || (!(dev->features & NETIF_F_IP_CSUM) ||
skb->protocol != htons(ETH_P_IP)))) skb->protocol != htons(ETH_P_IP))))
if (skb_checksum_help(skb, 0)) if (skb_checksum_help(skb, 0))
@ -1382,8 +1329,8 @@ int dev_queue_xmit(struct sk_buff *skb)
/* The device has no queue. Common case for software devices: /* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels... loopback, all the sorts of tunnels...
Really, it is unlikely that xmit_lock protection is necessary here. Really, it is unlikely that netif_tx_lock protection is necessary
(f.e. loopback and IP tunnels are clean ignoring statistics here. (f.e. loopback and IP tunnels are clean ignoring statistics
counters.) counters.)
However, it is possible, that they rely on protection However, it is possible, that they rely on protection
made by us here. made by us here.
@ -1846,6 +1793,19 @@ static void net_rx_action(struct softirq_action *h)
} }
} }
out: out:
#ifdef CONFIG_NET_DMA
/*
* There may not be any more sk_buffs coming right now, so push
* any pending DMA copies to hardware
*/
if (net_dma_client) {
struct dma_chan *chan;
rcu_read_lock();
list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node)
dma_async_memcpy_issue_pending(chan);
rcu_read_unlock();
}
#endif
local_irq_enable(); local_irq_enable();
return; return;
@ -2785,7 +2745,7 @@ int register_netdevice(struct net_device *dev)
BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
spin_lock_init(&dev->queue_lock); spin_lock_init(&dev->queue_lock);
spin_lock_init(&dev->xmit_lock); spin_lock_init(&dev->_xmit_lock);
dev->xmit_lock_owner = -1; dev->xmit_lock_owner = -1;
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
spin_lock_init(&dev->ingress_lock); spin_lock_init(&dev->ingress_lock);
@ -2829,9 +2789,7 @@ int register_netdevice(struct net_device *dev)
/* Fix illegal SG+CSUM combinations. */ /* Fix illegal SG+CSUM combinations. */
if ((dev->features & NETIF_F_SG) && if ((dev->features & NETIF_F_SG) &&
!(dev->features & (NETIF_F_IP_CSUM | !(dev->features & NETIF_F_ALL_CSUM)) {
NETIF_F_NO_CSUM |
NETIF_F_HW_CSUM))) {
printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
dev->name); dev->name);
dev->features &= ~NETIF_F_SG; dev->features &= ~NETIF_F_SG;
@ -3300,6 +3258,88 @@ static int dev_cpu_callback(struct notifier_block *nfb,
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_NET_DMA
/**
* net_dma_rebalance -
* This is called when the number of channels allocated to the net_dma_client
* changes. The net_dma_client tries to have one DMA channel per CPU.
*/
static void net_dma_rebalance(void)
{
unsigned int cpu, i, n;
struct dma_chan *chan;
lock_cpu_hotplug();
if (net_dma_count == 0) {
for_each_online_cpu(cpu)
rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL);
unlock_cpu_hotplug();
return;
}
i = 0;
cpu = first_cpu(cpu_online_map);
rcu_read_lock();
list_for_each_entry(chan, &net_dma_client->channels, client_node) {
n = ((num_online_cpus() / net_dma_count)
+ (i < (num_online_cpus() % net_dma_count) ? 1 : 0));
while(n) {
per_cpu(softnet_data.net_dma, cpu) = chan;
cpu = next_cpu(cpu, cpu_online_map);
n--;
}
i++;
}
rcu_read_unlock();
unlock_cpu_hotplug();
}
/**
* netdev_dma_event - event callback for the net_dma_client
* @client: should always be net_dma_client
* @chan:
* @event:
*/
static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
enum dma_event event)
{
spin_lock(&net_dma_event_lock);
switch (event) {
case DMA_RESOURCE_ADDED:
net_dma_count++;
net_dma_rebalance();
break;
case DMA_RESOURCE_REMOVED:
net_dma_count--;
net_dma_rebalance();
break;
default:
break;
}
spin_unlock(&net_dma_event_lock);
}
/**
* netdev_dma_regiser - register the networking subsystem as a DMA client
*/
static int __init netdev_dma_register(void)
{
spin_lock_init(&net_dma_event_lock);
net_dma_client = dma_async_client_register(netdev_dma_event);
if (net_dma_client == NULL)
return -ENOMEM;
dma_async_client_chan_request(net_dma_client, num_online_cpus());
return 0;
}
#else
static int __init netdev_dma_register(void) { return -ENODEV; }
#endif /* CONFIG_NET_DMA */
/* /*
* Initialize the DEV module. At boot time this walks the device list and * Initialize the DEV module. At boot time this walks the device list and
@ -3353,6 +3393,8 @@ static int __init net_dev_init(void)
atomic_set(&queue->backlog_dev.refcnt, 1); atomic_set(&queue->backlog_dev.refcnt, 1);
} }
netdev_dma_register();
dev_boot_phase = 0; dev_boot_phase = 0;
open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
@ -3371,7 +3413,6 @@ subsys_initcall(net_dev_init);
EXPORT_SYMBOL(__dev_get_by_index); EXPORT_SYMBOL(__dev_get_by_index);
EXPORT_SYMBOL(__dev_get_by_name); EXPORT_SYMBOL(__dev_get_by_name);
EXPORT_SYMBOL(__dev_remove_pack); EXPORT_SYMBOL(__dev_remove_pack);
EXPORT_SYMBOL(__skb_linearize);
EXPORT_SYMBOL(dev_valid_name); EXPORT_SYMBOL(dev_valid_name);
EXPORT_SYMBOL(dev_add_pack); EXPORT_SYMBOL(dev_add_pack);
EXPORT_SYMBOL(dev_alloc_name); EXPORT_SYMBOL(dev_alloc_name);

View File

@ -62,7 +62,7 @@
* Device mc lists are changed by bh at least if IPv6 is enabled, * Device mc lists are changed by bh at least if IPv6 is enabled,
* so that it must be bh protected. * so that it must be bh protected.
* *
* We block accesses to device mc filters with dev->xmit_lock. * We block accesses to device mc filters with netif_tx_lock.
*/ */
/* /*
@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev)
void dev_mc_upload(struct net_device *dev) void dev_mc_upload(struct net_device *dev)
{ {
spin_lock_bh(&dev->xmit_lock); netif_tx_lock_bh(dev);
__dev_mc_upload(dev); __dev_mc_upload(dev);
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
} }
/* /*
@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
int err = 0; int err = 0;
struct dev_mc_list *dmi, **dmip; struct dev_mc_list *dmi, **dmip;
spin_lock_bh(&dev->xmit_lock); netif_tx_lock_bh(dev);
for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
/* /*
@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
*/ */
__dev_mc_upload(dev); __dev_mc_upload(dev);
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
return 0; return 0;
} }
} }
err = -ENOENT; err = -ENOENT;
done: done:
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
return err; return err;
} }
@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC); dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
spin_lock_bh(&dev->xmit_lock); netif_tx_lock_bh(dev);
for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
dmi->dmi_addrlen == alen) { dmi->dmi_addrlen == alen) {
@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
} }
if ((dmi = dmi1) == NULL) { if ((dmi = dmi1) == NULL) {
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
return -ENOMEM; return -ENOMEM;
} }
memcpy(dmi->dmi_addr, addr, alen); memcpy(dmi->dmi_addr, addr, alen);
@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
__dev_mc_upload(dev); __dev_mc_upload(dev);
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
return 0; return 0;
done: done:
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
kfree(dmi1); kfree(dmi1);
return err; return err;
} }
@ -204,7 +204,7 @@ done:
void dev_mc_discard(struct net_device *dev) void dev_mc_discard(struct net_device *dev)
{ {
spin_lock_bh(&dev->xmit_lock); netif_tx_lock_bh(dev);
while (dev->mc_list != NULL) { while (dev->mc_list != NULL) {
struct dev_mc_list *tmp = dev->mc_list; struct dev_mc_list *tmp = dev->mc_list;
@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *dev)
} }
dev->mc_count = 0; dev->mc_count = 0;
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
struct dev_mc_list *m; struct dev_mc_list *m;
struct net_device *dev = v; struct net_device *dev = v;
spin_lock_bh(&dev->xmit_lock); netif_tx_lock_bh(dev);
for (m = dev->mc_list; m; m = m->next) { for (m = dev->mc_list; m; m = m->next) {
int i; int i;
@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
seq_putc(seq, '\n'); seq_putc(seq, '\n');
} }
spin_unlock_bh(&dev->xmit_lock); netif_tx_unlock_bh(dev);
return 0; return 0;
} }

View File

@ -30,7 +30,7 @@ u32 ethtool_op_get_link(struct net_device *dev)
u32 ethtool_op_get_tx_csum(struct net_device *dev) u32 ethtool_op_get_tx_csum(struct net_device *dev)
{ {
return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0; return (dev->features & NETIF_F_ALL_CSUM) != 0;
} }
int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
@ -551,9 +551,7 @@ static int ethtool_set_sg(struct net_device *dev, char __user *useraddr)
return -EFAULT; return -EFAULT;
if (edata.data && if (edata.data &&
!(dev->features & (NETIF_F_IP_CSUM | !(dev->features & NETIF_F_ALL_CSUM))
NETIF_F_NO_CSUM |
NETIF_F_HW_CSUM)))
return -EINVAL; return -EINVAL;
return __ethtool_set_sg(dev, edata.data); return __ethtool_set_sg(dev, edata.data);
@ -591,7 +589,7 @@ static int ethtool_set_tso(struct net_device *dev, char __user *useraddr)
static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr) static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr)
{ {
struct ethtool_value edata = { ETHTOOL_GTSO }; struct ethtool_value edata = { ETHTOOL_GUFO };
if (!dev->ethtool_ops->get_ufo) if (!dev->ethtool_ops->get_ufo)
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -600,6 +598,7 @@ static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr)
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr) static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
{ {
struct ethtool_value edata; struct ethtool_value edata;

View File

@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
do { do {
npinfo->tries--; npinfo->tries--;
spin_lock(&np->dev->xmit_lock); netif_tx_lock(np->dev);
np->dev->xmit_lock_owner = smp_processor_id();
/* /*
* network drivers do not expect to be called if the queue is * network drivers do not expect to be called if the queue is
* stopped. * stopped.
*/ */
if (netif_queue_stopped(np->dev)) { if (netif_queue_stopped(np->dev)) {
np->dev->xmit_lock_owner = -1; netif_tx_unlock(np->dev);
spin_unlock(&np->dev->xmit_lock);
netpoll_poll(np); netpoll_poll(np);
udelay(50); udelay(50);
continue; continue;
} }
status = np->dev->hard_start_xmit(skb, np->dev); status = np->dev->hard_start_xmit(skb, np->dev);
np->dev->xmit_lock_owner = -1; netif_tx_unlock(np->dev);
spin_unlock(&np->dev->xmit_lock);
/* success */ /* success */
if(!status) { if(!status) {

View File

@ -2897,7 +2897,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
} }
} }
spin_lock_bh(&odev->xmit_lock); netif_tx_lock_bh(odev);
if (!netif_queue_stopped(odev)) { if (!netif_queue_stopped(odev)) {
atomic_inc(&(pkt_dev->skb->users)); atomic_inc(&(pkt_dev->skb->users));
@ -2942,7 +2942,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->next_tx_ns = 0; pkt_dev->next_tx_ns = 0;
} }
spin_unlock_bh(&odev->xmit_lock); netif_tx_unlock_bh(odev);
/* If pkt_dev->count is zero, then run forever */ /* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {

View File

@ -464,7 +464,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
n->tc_verd = CLR_TC_MUNGED(n->tc_verd); n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
C(input_dev); C(input_dev);
#endif #endif
skb_copy_secmark(n, skb);
#endif #endif
C(truesize); C(truesize);
atomic_set(&n->users, 1); atomic_set(&n->users, 1);
@ -526,6 +526,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#endif #endif
new->tc_index = old->tc_index; new->tc_index = old->tc_index;
#endif #endif
skb_copy_secmark(new, old);
atomic_set(&new->users, 1); atomic_set(&new->users, 1);
skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size; skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs; skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
@ -800,12 +801,10 @@ struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
return nskb; return nskb;
} }
/* Trims skb to length len. It can change skb pointers, if "realloc" is 1. /* Trims skb to length len. It can change skb pointers.
* If realloc==0 and trimming is impossible without change of data,
* it is BUG().
*/ */
int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) int ___pskb_trim(struct sk_buff *skb, unsigned int len)
{ {
int offset = skb_headlen(skb); int offset = skb_headlen(skb);
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
@ -815,7 +814,6 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
int end = offset + skb_shinfo(skb)->frags[i].size; int end = offset + skb_shinfo(skb)->frags[i].size;
if (end > len) { if (end > len) {
if (skb_cloned(skb)) { if (skb_cloned(skb)) {
BUG_ON(!realloc);
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return -ENOMEM; return -ENOMEM;
} }

View File

@ -832,6 +832,9 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
atomic_set(&newsk->sk_omem_alloc, 0); atomic_set(&newsk->sk_omem_alloc, 0);
skb_queue_head_init(&newsk->sk_receive_queue); skb_queue_head_init(&newsk->sk_receive_queue);
skb_queue_head_init(&newsk->sk_write_queue); skb_queue_head_init(&newsk->sk_write_queue);
#ifdef CONFIG_NET_DMA
skb_queue_head_init(&newsk->sk_async_wait_queue);
#endif
rwlock_init(&newsk->sk_dst_lock); rwlock_init(&newsk->sk_dst_lock);
rwlock_init(&newsk->sk_callback_lock); rwlock_init(&newsk->sk_callback_lock);
@ -1383,6 +1386,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
skb_queue_head_init(&sk->sk_receive_queue); skb_queue_head_init(&sk->sk_receive_queue);
skb_queue_head_init(&sk->sk_write_queue); skb_queue_head_init(&sk->sk_write_queue);
skb_queue_head_init(&sk->sk_error_queue); skb_queue_head_init(&sk->sk_error_queue);
#ifdef CONFIG_NET_DMA
skb_queue_head_init(&sk->sk_async_wait_queue);
#endif
sk->sk_send_head = NULL; sk->sk_send_head = NULL;

131
net/core/user_dma.c Normal file
View File

@ -0,0 +1,131 @@
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
* Portions based on net/core/datagram.c and copyrighted by their authors.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
/*
* This code allows the net stack to make use of a DMA engine for
* skb to iovec copies.
*/
#include <linux/dmaengine.h>
#include <linux/socket.h>
#include <linux/rtnetlink.h> /* for BUG_TRAP */
#include <net/tcp.h>
#define NET_DMA_DEFAULT_COPYBREAK 4096
int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
/**
* dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
* @skb - buffer to copy
* @offset - offset in the buffer to start copying from
* @iovec - io vector to copy to
* @len - amount of data to copy from buffer to iovec
* @pinned_list - locked iovec buffer data
*
* Note: the iovec is modified during the copy.
*/
int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
struct sk_buff *skb, int offset, struct iovec *to,
size_t len, struct dma_pinned_list *pinned_list)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
dma_cookie_t cookie = 0;
/* Copy header. */
if (copy > 0) {
if (copy > len)
copy = len;
cookie = dma_memcpy_to_iovec(chan, to, pinned_list,
skb->data + offset, copy);
if (cookie < 0)
goto fault;
len -= copy;
if (len == 0)
goto end;
offset += copy;
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
copy = end - offset;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = frag->page;
if (copy > len)
copy = len;
cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
frag->page_offset + offset - start, copy);
if (cookie < 0)
goto fault;
len -= copy;
if (len == 0)
goto end;
offset += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
end = start + list->len;
copy = end - offset;
if (copy > 0) {
if (copy > len)
copy = len;
cookie = dma_skb_copy_datagram_iovec(chan, list,
offset - start, to, copy,
pinned_list);
if (cookie < 0)
goto fault;
len -= copy;
if (len == 0)
goto end;
offset += copy;
}
start = end;
}
}
end:
if (!len) {
skb->dma_cookie = cookie;
return cookie;
}
fault:
return -EFAULT;
}

View File

@ -719,7 +719,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
} }
dccp_pr_debug("packet_type=%s\n", dccp_pr_debug("packet_type=%s\n",
dccp_packet_name(dh->dccph_type)); dccp_packet_name(dh->dccph_type));
sk_eat_skb(sk, skb); sk_eat_skb(sk, skb, 0);
verify_sock_status: verify_sock_status:
if (sock_flag(sk, SOCK_DONE)) { if (sock_flag(sk, SOCK_DONE)) {
len = 0; len = 0;
@ -773,7 +773,7 @@ verify_sock_status:
} }
found_fin_ok: found_fin_ok:
if (!(flags & MSG_PEEK)) if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb); sk_eat_skb(sk, skb, 0);
break; break;
} while (1); } while (1);
out: out:

View File

@ -801,8 +801,7 @@ got_it:
* We linearize everything except data segments here. * We linearize everything except data segments here.
*/ */
if (cb->nsp_flags & ~0x60) { if (cb->nsp_flags & ~0x60) {
if (unlikely(skb_is_nonlinear(skb)) && if (unlikely(skb_linearize(skb)))
skb_linearize(skb, GFP_ATOMIC) != 0)
goto free_out; goto free_out;
} }

View File

@ -629,8 +629,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
padlen); padlen);
if (flags & DN_RT_PKT_CNTL) { if (flags & DN_RT_PKT_CNTL) {
if (unlikely(skb_is_nonlinear(skb)) && if (unlikely(skb_linearize(skb)))
skb_linearize(skb, GFP_ATOMIC) != 0)
goto dump_it; goto dump_it;
switch(flags & DN_RT_CNTL_MSK) { switch(flags & DN_RT_CNTL_MSK) {

Some files were not shown because too many files have changed in this diff Show More