OpenCloudOS-Kernel/drivers/firewire/net.c

1706 lines
41 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* IPv4 over IEEE 1394, per RFC 2734
* IPv6 over IEEE 1394, per RFC 3146
*
* Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
*
* based on eth1394 by Ben Collins et al
*/
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/highmem.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jiffies.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/unaligned.h>
#include <net/arp.h>
firewire net, ipv4 arp: Extend hardware address and remove driver-level packet inspection. Inspection of upper layer protocol is considered harmful, especially if it is about ARP or other stateful upper layer protocol; driver cannot (and should not) have full state of them. IPv4 over Firewire module used to inspect ARP (both in sending path and in receiving path), and record peer's GUID, max packet size, max speed and fifo address. This patch removes such inspection by extending our "hardware address" definition to include other information as well: max packet size, max speed and fifo. By doing this, The neighbour module in networking subsystem can cache them. Note: As we have started ignoring sspd and max_rec in ARP/NDP, those information will not be used in the driver when sending. When a packet is being sent, the IP layer fills our pseudo header with the extended "hardware address", including GUID and fifo. The driver can look-up node-id (the real but rather volatile low-level address) by GUID, and then the module can send the packet to the wire using parameters provided in the extendedn hardware address. This approach is realistic because IP over IEEE1394 (RFC2734) and IPv6 over IEEE1394 (RFC3146) share same "hardware address" format in their address resolution protocols. Here, extended "hardware address" is defined as follows: union fwnet_hwaddr { u8 u[16]; struct { __be64 uniq_id; /* EUI-64 */ u8 max_rec; /* max packet size */ u8 sspd; /* max speed */ __be16 fifo_hi; /* hi 16bits of FIFO addr */ __be32 fifo_lo; /* lo 32bits of FIFO addr */ } __packed uc; }; Note that Hardware address is declared as union, so that we can map full IP address into this, when implementing MCAP (Multicast Cannel Allocation Protocol) for IPv6, but IP and ARP subsystem do not need to know this format in detail. One difference between original ARP (RFC826) and 1394 ARP (RFC2734) is that 1394 ARP Request/Reply do not contain the target hardware address field (aka ar$tha). This difference is handled in the ARP subsystem. CC: Stephan Gatzka <stephan.gatzka@gmail.com> Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-03-25 16:26:16 +08:00
#include <net/firewire.h>
/* rx limits */
#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */
#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2)
/* tx limits */
#define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */
#define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */
#define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */
#define IEEE1394_BROADCAST_CHANNEL 31
#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
#define IEEE1394_MAX_PAYLOAD_S100 512
#define FWNET_NO_FIFO_ADDR (~0ULL)
#define IANA_SPECIFIER_ID 0x00005eU
#define RFC2734_SW_VERSION 0x000001U
#define RFC3146_SW_VERSION 0x000002U
#define IEEE1394_GASP_HDR_SIZE 8
#define RFC2374_UNFRAG_HDR_SIZE 4
#define RFC2374_FRAG_HDR_SIZE 8
#define RFC2374_FRAG_OVERHEAD 4
#define RFC2374_HDR_UNFRAG 0 /* unfragmented */
#define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */
#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
static bool fwnet_hwaddr_is_multicast(u8 *ha)
{
return !!(*ha & 1);
}
/* IPv4 and IPv6 encapsulation header */
struct rfc2734_header {
u32 w0;
u32 w1;
};
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
firewire: net: fix fragmented datagram_size off-by-one RFC 2734 defines the datagram_size field in fragment encapsulation headers thus: datagram_size: The encoded size of the entire IP datagram. The value of datagram_size [...] SHALL be one less than the value of Total Length in the datagram's IP header (see STD 5, RFC 791). Accordingly, the eth1394 driver of Linux 2.6.36 and older set and got this field with a -/+1 offset: ether1394_tx() /* transmit */ ether1394_encapsulate_prep() hdr->ff.dg_size = dg_size - 1; ether1394_data_handler() /* receive */ if (hdr->common.lf == ETH1394_HDR_LF_FF) dg_size = hdr->ff.dg_size + 1; else dg_size = hdr->sf.dg_size + 1; Likewise, I observe OS X 10.4 and Windows XP Pro SP3 to transmit 1500 byte sized datagrams in fragments with datagram_size=1499 if link fragmentation is required. Only firewire-net sets and gets datagram_size without this offset. The result is lacking interoperability of firewire-net with OS X, Windows XP, and presumably Linux' eth1394. (I did not test with the latter.) For example, FTP data transfers to a Linux firewire-net box with max_rec smaller than the 1500 bytes MTU - from OS X fail entirely, - from Win XP start out with a bunch of fragmented datagrams which time out, then continue with unfragmented datagrams because Win XP temporarily reduces the MTU to 576 bytes. So let's fix firewire-net's datagram_size accessors. Note that firewire-net thereby loses interoperability with unpatched firewire-net, but only if link fragmentation is employed. (This happens with large broadcast datagrams, and with large datagrams on several FireWire CardBus cards with smaller max_rec than equivalent PCI cards, and it can be worked around by setting a small enough MTU.) Cc: stable@vger.kernel.org Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
2016-10-31 00:32:01 +08:00
#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
firewire: net: fix fragmented datagram_size off-by-one RFC 2734 defines the datagram_size field in fragment encapsulation headers thus: datagram_size: The encoded size of the entire IP datagram. The value of datagram_size [...] SHALL be one less than the value of Total Length in the datagram's IP header (see STD 5, RFC 791). Accordingly, the eth1394 driver of Linux 2.6.36 and older set and got this field with a -/+1 offset: ether1394_tx() /* transmit */ ether1394_encapsulate_prep() hdr->ff.dg_size = dg_size - 1; ether1394_data_handler() /* receive */ if (hdr->common.lf == ETH1394_HDR_LF_FF) dg_size = hdr->ff.dg_size + 1; else dg_size = hdr->sf.dg_size + 1; Likewise, I observe OS X 10.4 and Windows XP Pro SP3 to transmit 1500 byte sized datagrams in fragments with datagram_size=1499 if link fragmentation is required. Only firewire-net sets and gets datagram_size without this offset. The result is lacking interoperability of firewire-net with OS X, Windows XP, and presumably Linux' eth1394. (I did not test with the latter.) For example, FTP data transfers to a Linux firewire-net box with max_rec smaller than the 1500 bytes MTU - from OS X fail entirely, - from Win XP start out with a bunch of fragmented datagrams which time out, then continue with unfragmented datagrams because Win XP temporarily reduces the MTU to 576 bytes. So let's fix firewire-net's datagram_size accessors. Note that firewire-net thereby loses interoperability with unpatched firewire-net, but only if link fragmentation is employed. (This happens with large broadcast datagrams, and with large datagrams on several FireWire CardBus cards with smaller max_rec than equivalent PCI cards, and it can be worked around by setting a small enough MTU.) Cc: stable@vger.kernel.org Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
2016-10-31 00:32:01 +08:00
#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
firewire: net: fix fragmented datagram_size off-by-one RFC 2734 defines the datagram_size field in fragment encapsulation headers thus: datagram_size: The encoded size of the entire IP datagram. The value of datagram_size [...] SHALL be one less than the value of Total Length in the datagram's IP header (see STD 5, RFC 791). Accordingly, the eth1394 driver of Linux 2.6.36 and older set and got this field with a -/+1 offset: ether1394_tx() /* transmit */ ether1394_encapsulate_prep() hdr->ff.dg_size = dg_size - 1; ether1394_data_handler() /* receive */ if (hdr->common.lf == ETH1394_HDR_LF_FF) dg_size = hdr->ff.dg_size + 1; else dg_size = hdr->sf.dg_size + 1; Likewise, I observe OS X 10.4 and Windows XP Pro SP3 to transmit 1500 byte sized datagrams in fragments with datagram_size=1499 if link fragmentation is required. Only firewire-net sets and gets datagram_size without this offset. The result is lacking interoperability of firewire-net with OS X, Windows XP, and presumably Linux' eth1394. (I did not test with the latter.) For example, FTP data transfers to a Linux firewire-net box with max_rec smaller than the 1500 bytes MTU - from OS X fail entirely, - from Win XP start out with a bunch of fragmented datagrams which time out, then continue with unfragmented datagrams because Win XP temporarily reduces the MTU to 576 bytes. So let's fix firewire-net's datagram_size accessors. Note that firewire-net thereby loses interoperability with unpatched firewire-net, but only if link fragmentation is employed. (This happens with large broadcast datagrams, and with large datagrams on several FireWire CardBus cards with smaller max_rec than equivalent PCI cards, and it can be worked around by setting a small enough MTU.) Cc: stable@vger.kernel.org Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
2016-10-31 00:32:01 +08:00
#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr,
unsigned ether_type)
{
hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG)
| fwnet_set_hdr_ether_type(ether_type);
}
static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr,
unsigned ether_type, unsigned dg_size, unsigned dgl)
{
hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG)
| fwnet_set_hdr_dg_size(dg_size)
| fwnet_set_hdr_ether_type(ether_type);
hdr->w1 = fwnet_set_hdr_dgl(dgl);
}
static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr,
unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl)
{
hdr->w0 = fwnet_set_hdr_lf(lf)
| fwnet_set_hdr_dg_size(dg_size)
| fwnet_set_hdr_fg_off(fg_off);
hdr->w1 = fwnet_set_hdr_dgl(dgl);
}
/* This list keeps track of what parts of the datagram have been filled in */
struct fwnet_fragment_info {
struct list_head fi_link;
u16 offset;
u16 len;
};
struct fwnet_partial_datagram {
struct list_head pd_link;
struct list_head fi_list;
struct sk_buff *skb;
/* FIXME Why not use skb->data? */
char *pbuf;
u16 datagram_label;
u16 ether_type;
u16 datagram_size;
};
static DEFINE_MUTEX(fwnet_device_mutex);
static LIST_HEAD(fwnet_device_list);
struct fwnet_device {
struct list_head dev_link;
spinlock_t lock;
enum {
FWNET_BROADCAST_ERROR,
FWNET_BROADCAST_RUNNING,
FWNET_BROADCAST_STOPPED,
} broadcast_state;
struct fw_iso_context *broadcast_rcv_context;
struct fw_iso_buffer broadcast_rcv_buffer;
void **broadcast_rcv_buffer_ptrs;
unsigned broadcast_rcv_next_ptr;
unsigned num_broadcast_rcv_ptrs;
unsigned rcv_buffer_size;
/*
* This value is the maximum unfragmented datagram size that can be
* sent by the hardware. It already has the GASP overhead and the
* unfragmented datagram header overhead calculated into it.
*/
unsigned broadcast_xmt_max_payload;
u16 broadcast_xmt_datagramlabel;
/*
* The CSR address that remote nodes must send datagrams to for us to
* receive them.
*/
struct fw_address_handler handler;
u64 local_fifo;
/* Number of tx datagrams that have been queued but not yet acked */
int queued_datagrams;
int peer_count;
struct list_head peer_list;
struct fw_card *card;
struct net_device *netdev;
};
struct fwnet_peer {
struct list_head peer_link;
struct fwnet_device *dev;
u64 guid;
/* guarded by dev->lock */
struct list_head pd_list; /* received partial datagrams */
unsigned pdg_size; /* pd_list size */
u16 datagram_label; /* outgoing datagram label */
u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
int node_id;
int generation;
unsigned speed;
};
/* This is our task struct. It's used for the packet complete callback. */
struct fwnet_packet_task {
struct fw_transaction transaction;
struct rfc2734_header hdr;
struct sk_buff *skb;
struct fwnet_device *dev;
int outstanding_pkts;
u64 fifo_addr;
u16 dest_node;
u16 max_payload;
u8 generation;
u8 speed;
u8 enqueued;
};
/*
* saddr == NULL means use device source address.
* daddr == NULL means leave destination address (eg unresolved arp).
*/
static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
unsigned short type, const void *daddr,
const void *saddr, unsigned len)
{
struct fwnet_header *h;
h = skb_push(skb, sizeof(*h));
put_unaligned_be16(type, &h->h_proto);
if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
memset(h->h_dest, 0, net->addr_len);
return net->hard_header_len;
}
if (daddr) {
memcpy(h->h_dest, daddr, net->addr_len);
return net->hard_header_len;
}
return -net->hard_header_len;
}
static int fwnet_header_cache(const struct neighbour *neigh,
struct hh_cache *hh, __be16 type)
{
struct net_device *net;
struct fwnet_header *h;
if (type == cpu_to_be16(ETH_P_802_3))
return -1;
net = neigh->dev;
h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
net: add annotations on hh->hh_len lockless accesses KCSAN reported a data-race [1] While we can use READ_ONCE() on the read sides, we need to make sure hh->hh_len is written last. [1] BUG: KCSAN: data-race in eth_header_cache / neigh_resolve_output write to 0xffff8880b9dedcb8 of 4 bytes by task 29760 on cpu 0: eth_header_cache+0xa9/0xd0 net/ethernet/eth.c:247 neigh_hh_init net/core/neighbour.c:1463 [inline] neigh_resolve_output net/core/neighbour.c:1480 [inline] neigh_resolve_output+0x415/0x470 net/core/neighbour.c:1470 neigh_output include/net/neighbour.h:511 [inline] ip6_finish_output2+0x7a2/0xec0 net/ipv6/ip6_output.c:116 __ip6_finish_output net/ipv6/ip6_output.c:142 [inline] __ip6_finish_output+0x2d7/0x330 net/ipv6/ip6_output.c:127 ip6_finish_output+0x41/0x160 net/ipv6/ip6_output.c:152 NF_HOOK_COND include/linux/netfilter.h:294 [inline] ip6_output+0xf2/0x280 net/ipv6/ip6_output.c:175 dst_output include/net/dst.h:436 [inline] NF_HOOK include/linux/netfilter.h:305 [inline] ndisc_send_skb+0x459/0x5f0 net/ipv6/ndisc.c:505 ndisc_send_ns+0x207/0x430 net/ipv6/ndisc.c:647 rt6_probe_deferred+0x98/0xf0 net/ipv6/route.c:615 process_one_work+0x3d4/0x890 kernel/workqueue.c:2269 worker_thread+0xa0/0x800 kernel/workqueue.c:2415 kthread+0x1d4/0x200 drivers/block/aoe/aoecmd.c:1253 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:352 read to 0xffff8880b9dedcb8 of 4 bytes by task 29572 on cpu 1: neigh_resolve_output net/core/neighbour.c:1479 [inline] neigh_resolve_output+0x113/0x470 net/core/neighbour.c:1470 neigh_output include/net/neighbour.h:511 [inline] ip6_finish_output2+0x7a2/0xec0 net/ipv6/ip6_output.c:116 __ip6_finish_output net/ipv6/ip6_output.c:142 [inline] __ip6_finish_output+0x2d7/0x330 net/ipv6/ip6_output.c:127 ip6_finish_output+0x41/0x160 net/ipv6/ip6_output.c:152 NF_HOOK_COND include/linux/netfilter.h:294 [inline] ip6_output+0xf2/0x280 net/ipv6/ip6_output.c:175 dst_output include/net/dst.h:436 [inline] NF_HOOK include/linux/netfilter.h:305 [inline] ndisc_send_skb+0x459/0x5f0 net/ipv6/ndisc.c:505 ndisc_send_ns+0x207/0x430 net/ipv6/ndisc.c:647 rt6_probe_deferred+0x98/0xf0 net/ipv6/route.c:615 process_one_work+0x3d4/0x890 kernel/workqueue.c:2269 worker_thread+0xa0/0x800 kernel/workqueue.c:2415 kthread+0x1d4/0x200 drivers/block/aoe/aoecmd.c:1253 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:352 Reported by Kernel Concurrency Sanitizer on: CPU: 1 PID: 29572 Comm: kworker/1:4 Not tainted 5.4.0-rc6+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Workqueue: events rt6_probe_deferred Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-08 10:29:11 +08:00
/* Pairs with the READ_ONCE() in neigh_resolve_output(),
* neigh_hh_output() and neigh_update_hhs().
*/
smp_store_release(&hh->hh_len, FWNET_HLEN);
return 0;
}
/* Called by Address Resolution module to notify changes in address. */
static void fwnet_header_cache_update(struct hh_cache *hh,
const struct net_device *net, const unsigned char *haddr)
{
memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len);
}
static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
return FWNET_ALEN;
}
static const struct header_ops fwnet_header_ops = {
.create = fwnet_header_create,
.cache = fwnet_header_cache,
.cache_update = fwnet_header_cache_update,
.parse = fwnet_header_parse,
};
/* FIXME: is this correct for all cases? */
static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd,
unsigned offset, unsigned len)
{
struct fwnet_fragment_info *fi;
unsigned end = offset + len;
list_for_each_entry(fi, &pd->fi_list, fi_link)
if (offset < fi->offset + fi->len && end > fi->offset)
return true;
return false;
}
/* Assumes that new fragment does not overlap any existing fragments */
static struct fwnet_fragment_info *fwnet_frag_new(
struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
{
struct fwnet_fragment_info *fi, *fi2, *new;
struct list_head *list;
list = &pd->fi_list;
list_for_each_entry(fi, &pd->fi_list, fi_link) {
if (fi->offset + fi->len == offset) {
/* The new fragment can be tacked on to the end */
/* Did the new fragment plug a hole? */
fi2 = list_entry(fi->fi_link.next,
struct fwnet_fragment_info, fi_link);
if (fi->offset + fi->len == fi2->offset) {
/* glue fragments together */
fi->len += len + fi2->len;
list_del(&fi2->fi_link);
kfree(fi2);
} else {
fi->len += len;
}
return fi;
}
if (offset + len == fi->offset) {
/* The new fragment can be tacked on to the beginning */
/* Did the new fragment plug a hole? */
fi2 = list_entry(fi->fi_link.prev,
struct fwnet_fragment_info, fi_link);
if (fi2->offset + fi2->len == fi->offset) {
/* glue fragments together */
fi2->len += fi->len + len;
list_del(&fi->fi_link);
kfree(fi);
return fi2;
}
fi->offset = offset;
fi->len += len;
return fi;
}
if (offset > fi->offset + fi->len) {
list = &fi->fi_link;
break;
}
if (offset + len < fi->offset) {
list = fi->fi_link.prev;
break;
}
}
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new)
return NULL;
new->offset = offset;
new->len = len;
list_add(&new->fi_link, list);
return new;
}
static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
void *frag_buf, unsigned frag_off, unsigned frag_len)
{
struct fwnet_partial_datagram *new;
struct fwnet_fragment_info *fi;
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new)
goto fail;
INIT_LIST_HEAD(&new->fi_list);
fi = fwnet_frag_new(new, frag_off, frag_len);
if (fi == NULL)
goto fail_w_new;
new->datagram_label = datagram_label;
new->datagram_size = dg_size;
new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net));
if (new->skb == NULL)
goto fail_w_fi;
skb_reserve(new->skb, LL_RESERVED_SPACE(net));
new->pbuf = skb_put(new->skb, dg_size);
memcpy(new->pbuf + frag_off, frag_buf, frag_len);
list_add_tail(&new->pd_link, &peer->pd_list);
return new;
fail_w_fi:
kfree(fi);
fail_w_new:
kfree(new);
fail:
return NULL;
}
static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
u16 datagram_label)
{
struct fwnet_partial_datagram *pd;
list_for_each_entry(pd, &peer->pd_list, pd_link)
if (pd->datagram_label == datagram_label)
return pd;
return NULL;
}
static void fwnet_pd_delete(struct fwnet_partial_datagram *old)
{
struct fwnet_fragment_info *fi, *n;
list_for_each_entry_safe(fi, n, &old->fi_list, fi_link)
kfree(fi);
list_del(&old->pd_link);
dev_kfree_skb_any(old->skb);
kfree(old);
}
static bool fwnet_pd_update(struct fwnet_peer *peer,
struct fwnet_partial_datagram *pd, void *frag_buf,
unsigned frag_off, unsigned frag_len)
{
if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
return false;
memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
/*
* Move list entry to beginning of list so that oldest partial
* datagrams percolate to the end of the list
*/
list_move_tail(&pd->pd_link, &peer->pd_list);
return true;
}
static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd)
{
struct fwnet_fragment_info *fi;
fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
return fi->len == pd->datagram_size;
}
/* caller must hold dev->lock */
static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
u64 guid)
{
struct fwnet_peer *peer;
list_for_each_entry(peer, &dev->peer_list, peer_link)
if (peer->guid == guid)
return peer;
return NULL;
}
/* caller must hold dev->lock */
static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
int node_id, int generation)
{
struct fwnet_peer *peer;
list_for_each_entry(peer, &dev->peer_list, peer_link)
if (peer->node_id == node_id &&
peer->generation == generation)
return peer;
return NULL;
}
/* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */
static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
{
max_rec = min(max_rec, speed + 8);
max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */
return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
}
static int fwnet_finish_incoming_packet(struct net_device *net,
struct sk_buff *skb, u16 source_node_id,
bool is_broadcast, u16 ether_type)
{
int status;
switch (ether_type) {
case ETH_P_ARP:
case ETH_P_IP:
#if IS_ENABLED(CONFIG_IPV6)
case ETH_P_IPV6:
#endif
break;
default:
goto err;
}
/* Write metadata, and then pass to the receive level */
skb->dev = net;
skb->ip_summed = CHECKSUM_NONE;
/*
* Parse the encapsulation header. This actually does the job of
firewire net, ipv4 arp: Extend hardware address and remove driver-level packet inspection. Inspection of upper layer protocol is considered harmful, especially if it is about ARP or other stateful upper layer protocol; driver cannot (and should not) have full state of them. IPv4 over Firewire module used to inspect ARP (both in sending path and in receiving path), and record peer's GUID, max packet size, max speed and fifo address. This patch removes such inspection by extending our "hardware address" definition to include other information as well: max packet size, max speed and fifo. By doing this, The neighbour module in networking subsystem can cache them. Note: As we have started ignoring sspd and max_rec in ARP/NDP, those information will not be used in the driver when sending. When a packet is being sent, the IP layer fills our pseudo header with the extended "hardware address", including GUID and fifo. The driver can look-up node-id (the real but rather volatile low-level address) by GUID, and then the module can send the packet to the wire using parameters provided in the extendedn hardware address. This approach is realistic because IP over IEEE1394 (RFC2734) and IPv6 over IEEE1394 (RFC3146) share same "hardware address" format in their address resolution protocols. Here, extended "hardware address" is defined as follows: union fwnet_hwaddr { u8 u[16]; struct { __be64 uniq_id; /* EUI-64 */ u8 max_rec; /* max packet size */ u8 sspd; /* max speed */ __be16 fifo_hi; /* hi 16bits of FIFO addr */ __be32 fifo_lo; /* lo 32bits of FIFO addr */ } __packed uc; }; Note that Hardware address is declared as union, so that we can map full IP address into this, when implementing MCAP (Multicast Cannel Allocation Protocol) for IPv6, but IP and ARP subsystem do not need to know this format in detail. One difference between original ARP (RFC826) and 1394 ARP (RFC2734) is that 1394 ARP Request/Reply do not contain the target hardware address field (aka ar$tha). This difference is handled in the ARP subsystem. CC: Stephan Gatzka <stephan.gatzka@gmail.com> Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-03-25 16:26:16 +08:00
* converting to an ethernet-like pseudo frame header.
*/
if (dev_hard_header(skb, net, ether_type,
firewire net, ipv4 arp: Extend hardware address and remove driver-level packet inspection. Inspection of upper layer protocol is considered harmful, especially if it is about ARP or other stateful upper layer protocol; driver cannot (and should not) have full state of them. IPv4 over Firewire module used to inspect ARP (both in sending path and in receiving path), and record peer's GUID, max packet size, max speed and fifo address. This patch removes such inspection by extending our "hardware address" definition to include other information as well: max packet size, max speed and fifo. By doing this, The neighbour module in networking subsystem can cache them. Note: As we have started ignoring sspd and max_rec in ARP/NDP, those information will not be used in the driver when sending. When a packet is being sent, the IP layer fills our pseudo header with the extended "hardware address", including GUID and fifo. The driver can look-up node-id (the real but rather volatile low-level address) by GUID, and then the module can send the packet to the wire using parameters provided in the extendedn hardware address. This approach is realistic because IP over IEEE1394 (RFC2734) and IPv6 over IEEE1394 (RFC3146) share same "hardware address" format in their address resolution protocols. Here, extended "hardware address" is defined as follows: union fwnet_hwaddr { u8 u[16]; struct { __be64 uniq_id; /* EUI-64 */ u8 max_rec; /* max packet size */ u8 sspd; /* max speed */ __be16 fifo_hi; /* hi 16bits of FIFO addr */ __be32 fifo_lo; /* lo 32bits of FIFO addr */ } __packed uc; }; Note that Hardware address is declared as union, so that we can map full IP address into this, when implementing MCAP (Multicast Cannel Allocation Protocol) for IPv6, but IP and ARP subsystem do not need to know this format in detail. One difference between original ARP (RFC826) and 1394 ARP (RFC2734) is that 1394 ARP Request/Reply do not contain the target hardware address field (aka ar$tha). This difference is handled in the ARP subsystem. CC: Stephan Gatzka <stephan.gatzka@gmail.com> Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-03-25 16:26:16 +08:00
is_broadcast ? net->broadcast : net->dev_addr,
NULL, skb->len) >= 0) {
struct fwnet_header *eth;
u16 *rawp;
__be16 protocol;
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
eth = (struct fwnet_header *)skb_mac_header(skb);
if (fwnet_hwaddr_is_multicast(eth->h_dest)) {
if (memcmp(eth->h_dest, net->broadcast,
net->addr_len) == 0)
skb->pkt_type = PACKET_BROADCAST;
#if 0
else
skb->pkt_type = PACKET_MULTICAST;
#endif
} else {
if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
skb->pkt_type = PACKET_OTHERHOST;
}
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
protocol = eth->h_proto;
} else {
rawp = (u16 *)skb->data;
if (*rawp == 0xffff)
protocol = htons(ETH_P_802_3);
else
protocol = htons(ETH_P_802_2);
}
skb->protocol = protocol;
}
status = netif_rx(skb);
if (status == NET_RX_DROP) {
net->stats.rx_errors++;
net->stats.rx_dropped++;
} else {
net->stats.rx_packets++;
net->stats.rx_bytes += skb->len;
}
return 0;
err:
net->stats.rx_errors++;
net->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return -ENOENT;
}
static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int source_node_id, int generation,
bool is_broadcast)
{
struct sk_buff *skb;
struct net_device *net = dev->netdev;
struct rfc2734_header hdr;
unsigned lf;
unsigned long flags;
struct fwnet_peer *peer;
struct fwnet_partial_datagram *pd;
int fg_off;
int dg_size;
u16 datagram_label;
int retval;
u16 ether_type;
if (len <= RFC2374_UNFRAG_HDR_SIZE)
return 0;
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) {
/*
* An unfragmented datagram has been received by the ieee1394
* bus. Build an skbuff around it so we can pass it to the
* high level network layer.
*/
ether_type = fwnet_get_hdr_ether_type(&hdr);
buf++;
len -= RFC2374_UNFRAG_HDR_SIZE;
skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
if (unlikely(!skb)) {
net->stats.rx_dropped++;
return -ENOMEM;
}
skb_reserve(skb, LL_RESERVED_SPACE(net));
skb_put_data(skb, buf, len);
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
}
/* A datagram fragment has been received, now the fun begins. */
if (len <= RFC2374_FRAG_HDR_SIZE)
return 0;
hdr.w1 = ntohl(buf[1]);
buf += 2;
len -= RFC2374_FRAG_HDR_SIZE;
if (lf == RFC2374_HDR_FIRSTFRAG) {
ether_type = fwnet_get_hdr_ether_type(&hdr);
fg_off = 0;
} else {
ether_type = 0;
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
firewire: net: fix fragmented datagram_size off-by-one RFC 2734 defines the datagram_size field in fragment encapsulation headers thus: datagram_size: The encoded size of the entire IP datagram. The value of datagram_size [...] SHALL be one less than the value of Total Length in the datagram's IP header (see STD 5, RFC 791). Accordingly, the eth1394 driver of Linux 2.6.36 and older set and got this field with a -/+1 offset: ether1394_tx() /* transmit */ ether1394_encapsulate_prep() hdr->ff.dg_size = dg_size - 1; ether1394_data_handler() /* receive */ if (hdr->common.lf == ETH1394_HDR_LF_FF) dg_size = hdr->ff.dg_size + 1; else dg_size = hdr->sf.dg_size + 1; Likewise, I observe OS X 10.4 and Windows XP Pro SP3 to transmit 1500 byte sized datagrams in fragments with datagram_size=1499 if link fragmentation is required. Only firewire-net sets and gets datagram_size without this offset. The result is lacking interoperability of firewire-net with OS X, Windows XP, and presumably Linux' eth1394. (I did not test with the latter.) For example, FTP data transfers to a Linux firewire-net box with max_rec smaller than the 1500 bytes MTU - from OS X fail entirely, - from Win XP start out with a bunch of fragmented datagrams which time out, then continue with unfragmented datagrams because Win XP temporarily reduces the MTU to 576 bytes. So let's fix firewire-net's datagram_size accessors. Note that firewire-net thereby loses interoperability with unpatched firewire-net, but only if link fragmentation is employed. (This happens with large broadcast datagrams, and with large datagrams on several FireWire CardBus cards with smaller max_rec than equivalent PCI cards, and it can be worked around by setting a small enough MTU.) Cc: stable@vger.kernel.org Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
2016-10-31 00:32:01 +08:00
dg_size = fwnet_get_hdr_dg_size(&hdr);
if (fg_off + len > dg_size)
return 0;
spin_lock_irqsave(&dev->lock, flags);
peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
if (!peer) {
retval = -ENOENT;
goto fail;
}
pd = fwnet_pd_find(peer, datagram_label);
if (pd == NULL) {
while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
/* remove the oldest */
fwnet_pd_delete(list_first_entry(&peer->pd_list,
struct fwnet_partial_datagram, pd_link));
peer->pdg_size--;
}
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
retval = -ENOMEM;
goto fail;
}
peer->pdg_size++;
} else {
if (fwnet_frag_overlap(pd, fg_off, len) ||
pd->datagram_size != dg_size) {
/*
* Differing datagram sizes or overlapping fragments,
* discard old datagram and start a new one.
*/
fwnet_pd_delete(pd);
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
peer->pdg_size--;
retval = -ENOMEM;
goto fail;
}
} else {
if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
/*
* Couldn't save off fragment anyway
* so might as well obliterate the
* datagram now.
*/
fwnet_pd_delete(pd);
peer->pdg_size--;
retval = -ENOMEM;
goto fail;
}
}
} /* new datagram or add to existing one */
if (lf == RFC2374_HDR_FIRSTFRAG)
pd->ether_type = ether_type;
if (fwnet_pd_is_complete(pd)) {
ether_type = pd->ether_type;
peer->pdg_size--;
skb = skb_get(pd->skb);
fwnet_pd_delete(pd);
spin_unlock_irqrestore(&dev->lock, flags);
return fwnet_finish_incoming_packet(net, skb, source_node_id,
false, ether_type);
}
/*
* Datagram is not complete, we're done for the
* moment.
*/
retval = 0;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
return retval;
}
static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int tcode, int destination, int source, int generation,
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
struct fwnet_device *dev = callback_data;
int rcode;
if (destination == IEEE1394_ALL_NODES) {
kfree(r);
return;
}
if (offset != dev->handler.offset)
rcode = RCODE_ADDRESS_ERROR;
else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
rcode = RCODE_TYPE_ERROR;
else if (fwnet_incoming_packet(dev, payload, length,
source, generation, false) != 0) {
dev_err(&dev->netdev->dev, "incoming packet failure\n");
rcode = RCODE_CONFLICT_ERROR;
} else
rcode = RCODE_COMPLETE;
fw_send_response(card, r, rcode);
}
static int gasp_source_id(__be32 *p)
{
return be32_to_cpu(p[0]) >> 16;
}
static u32 gasp_specifier_id(__be32 *p)
{
return (be32_to_cpu(p[0]) & 0xffff) << 8 |
(be32_to_cpu(p[1]) & 0xff000000) >> 24;
}
static u32 gasp_version(__be32 *p)
{
return be32_to_cpu(p[1]) & 0xffffff;
}
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
struct fwnet_device *dev;
struct fw_iso_packet packet;
__be16 *hdr_ptr;
__be32 *buf_ptr;
int retval;
u32 length;
unsigned long offset;
unsigned long flags;
dev = data;
hdr_ptr = header;
length = be16_to_cpup(hdr_ptr);
spin_lock_irqsave(&dev->lock, flags);
offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
dev->broadcast_rcv_next_ptr = 0;
spin_unlock_irqrestore(&dev->lock, flags);
if (length > IEEE1394_GASP_HDR_SIZE &&
gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
(gasp_version(buf_ptr) == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6)
|| gasp_version(buf_ptr) == RFC3146_SW_VERSION
#endif
))
fwnet_incoming_packet(dev, buf_ptr + 2,
length - IEEE1394_GASP_HDR_SIZE,
gasp_source_id(buf_ptr),
context->card->generation, true);
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;
packet.skip = 0;
packet.tag = 3;
packet.sy = 0;
packet.header_length = IEEE1394_GASP_HDR_SIZE;
spin_lock_irqsave(&dev->lock, flags);
retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
&dev->broadcast_rcv_buffer, offset);
spin_unlock_irqrestore(&dev->lock, flags);
if (retval >= 0)
fw_iso_context_queue_flush(dev->broadcast_rcv_context);
else
dev_err(&dev->netdev->dev, "requeue failed\n");
}
static struct kmem_cache *fwnet_packet_task_cache;
static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
{
dev_kfree_skb_any(ptask->skb);
kmem_cache_free(fwnet_packet_task_cache, ptask);
}
/* Caller must hold dev->lock. */
static void dec_queued_datagrams(struct fwnet_device *dev)
{
if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS)
netif_wake_queue(dev->netdev);
}
static int fwnet_send_packet(struct fwnet_packet_task *ptask);
static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev = ptask->dev;
struct sk_buff *skb = ptask->skb;
unsigned long flags;
bool free;
spin_lock_irqsave(&dev->lock, flags);
ptask->outstanding_pkts--;
/* Check whether we or the networking TX soft-IRQ is last user. */
free = (ptask->outstanding_pkts == 0 && ptask->enqueued);
if (free)
dec_queued_datagrams(dev);
if (ptask->outstanding_pkts == 0) {
dev->netdev->stats.tx_packets++;
dev->netdev->stats.tx_bytes += skb->len;
}
spin_unlock_irqrestore(&dev->lock, flags);
if (ptask->outstanding_pkts > 0) {
u16 dg_size;
u16 fg_off;
u16 datagram_label;
u16 lf;
/* Update the ptask to point to the next fragment and send it */
lf = fwnet_get_hdr_lf(&ptask->hdr);
switch (lf) {
case RFC2374_HDR_LASTFRAG:
case RFC2374_HDR_UNFRAG:
default:
dev_err(&dev->netdev->dev,
"outstanding packet %x lf %x, header %x,%x\n",
ptask->outstanding_pkts, lf, ptask->hdr.w0,
ptask->hdr.w1);
BUG();
case RFC2374_HDR_FIRSTFRAG:
/* Set frag type here for future interior fragments */
dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
break;
case RFC2374_HDR_INTFRAG:
dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
fg_off = fwnet_get_hdr_fg_off(&ptask->hdr)
+ ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
break;
}
if (ptask->dest_node == IEEE1394_ALL_NODES) {
skb_pull(skb,
ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
} else {
skb_pull(skb, ptask->max_payload);
}
if (ptask->outstanding_pkts > 1) {
fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
dg_size, fg_off, datagram_label);
} else {
fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG,
dg_size, fg_off, datagram_label);
ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
}
fwnet_send_packet(ptask);
}
if (free)
fwnet_free_ptask(ptask);
}
static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev = ptask->dev;
unsigned long flags;
bool free;
spin_lock_irqsave(&dev->lock, flags);
/* One fragment failed; don't try to send remaining fragments. */
ptask->outstanding_pkts = 0;
/* Check whether we or the networking TX soft-IRQ is last user. */
free = ptask->enqueued;
if (free)
dec_queued_datagrams(dev);
dev->netdev->stats.tx_dropped++;
dev->netdev->stats.tx_errors++;
spin_unlock_irqrestore(&dev->lock, flags);
if (free)
fwnet_free_ptask(ptask);
}
static void fwnet_write_complete(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
{
struct fwnet_packet_task *ptask = data;
static unsigned long j;
static int last_rcode, errors_skipped;
if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
} else {
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
dev_err(&ptask->dev->netdev->dev,
"fwnet_write_complete failed: %x (skipped %d)\n",
rcode, errors_skipped);
errors_skipped = 0;
last_rcode = rcode;
} else {
errors_skipped++;
}
fwnet_transmit_packet_failed(ptask);
}
}
static int fwnet_send_packet(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev;
unsigned tx_len;
struct rfc2734_header *bufhdr;
unsigned long flags;
bool free;
dev = ptask->dev;
tx_len = ptask->max_payload;
switch (fwnet_get_hdr_lf(&ptask->hdr)) {
case RFC2374_HDR_UNFRAG:
bufhdr = skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
break;
case RFC2374_HDR_FIRSTFRAG:
case RFC2374_HDR_INTFRAG:
case RFC2374_HDR_LASTFRAG:
bufhdr = skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
break;
default:
BUG();
}
if (ptask->dest_node == IEEE1394_ALL_NODES) {
u8 *p;
int generation;
int node_id;
unsigned int sw_version;
/* ptask->generation may not have been set yet */
generation = dev->card->generation;
smp_rmb();
node_id = dev->card->node_id;
switch (ptask->skb->protocol) {
default:
sw_version = RFC2734_SW_VERSION;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
sw_version = RFC3146_SW_VERSION;
#endif
}
p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
| sw_version, &p[4]);
/* We should not transmit if broadcast_channel.valid == 0. */
fw_send_request(dev->card, &ptask->transaction,
TCODE_STREAM_DATA,
fw_stream_packet_destination_id(3,
IEEE1394_BROADCAST_CHANNEL, 0),
generation, SCODE_100, 0ULL, ptask->skb->data,
tx_len + 8, fwnet_write_complete, ptask);
spin_lock_irqsave(&dev->lock, flags);
/* If the AT tasklet already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
else
dec_queued_datagrams(dev);
spin_unlock_irqrestore(&dev->lock, flags);
goto out;
}
fw_send_request(dev->card, &ptask->transaction,
TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node,
ptask->generation, ptask->speed, ptask->fifo_addr,
ptask->skb->data, tx_len, fwnet_write_complete, ptask);
spin_lock_irqsave(&dev->lock, flags);
/* If the AT tasklet already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
else
dec_queued_datagrams(dev);
spin_unlock_irqrestore(&dev->lock, flags);
netif_trans_update(dev->netdev);
out:
if (free)
fwnet_free_ptask(ptask);
return 0;
}
static void fwnet_fifo_stop(struct fwnet_device *dev)
{
if (dev->local_fifo == FWNET_NO_FIFO_ADDR)
return;
fw_core_remove_address_handler(&dev->handler);
dev->local_fifo = FWNET_NO_FIFO_ADDR;
}
static int fwnet_fifo_start(struct fwnet_device *dev)
{
int retval;
if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
return 0;
dev->handler.length = 4096;
dev->handler.address_callback = fwnet_receive_packet;
dev->handler.callback_data = dev;
retval = fw_core_add_address_handler(&dev->handler,
&fw_high_memory_region);
if (retval < 0)
return retval;
dev->local_fifo = dev->handler.offset;
return 0;
}
static void __fwnet_broadcast_stop(struct fwnet_device *dev)
{
unsigned u;
if (dev->broadcast_state != FWNET_BROADCAST_ERROR) {
for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++)
kunmap(dev->broadcast_rcv_buffer.pages[u]);
fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
}
if (dev->broadcast_rcv_context) {
fw_iso_context_destroy(dev->broadcast_rcv_context);
dev->broadcast_rcv_context = NULL;
}
kfree(dev->broadcast_rcv_buffer_ptrs);
dev->broadcast_rcv_buffer_ptrs = NULL;
dev->broadcast_state = FWNET_BROADCAST_ERROR;
}
static void fwnet_broadcast_stop(struct fwnet_device *dev)
{
if (dev->broadcast_state == FWNET_BROADCAST_ERROR)
return;
fw_iso_context_stop(dev->broadcast_rcv_context);
__fwnet_broadcast_stop(dev);
}
static int fwnet_broadcast_start(struct fwnet_device *dev)
{
struct fw_iso_context *context;
int retval;
unsigned num_packets;
unsigned max_receive;
struct fw_iso_packet packet;
unsigned long offset;
void **ptrptr;
unsigned u;
if (dev->broadcast_state != FWNET_BROADCAST_ERROR)
return 0;
max_receive = 1U << (dev->card->max_receive + 1);
num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
treewide: kmalloc() -> kmalloc_array() The kmalloc() function has a 2-factor argument form, kmalloc_array(). This patch replaces cases of: kmalloc(a * b, gfp) with: kmalloc_array(a * b, gfp) as well as handling cases of: kmalloc(a * b * c, gfp) with: kmalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kmalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kmalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The tools/ directory was manually excluded, since it has its own implementation of kmalloc(). The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kmalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kmalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kmalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(char) * COUNT + COUNT , ...) | kmalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kmalloc + kmalloc_array ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kmalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kmalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kmalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kmalloc(C1 * C2 * C3, ...) | kmalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kmalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kmalloc(sizeof(THING) * C2, ...) | kmalloc(sizeof(TYPE) * C2, ...) | kmalloc(C1 * C2 * C3, ...) | kmalloc(C1 * C2, ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - (E1) * E2 + E1, E2 , ...) | - kmalloc + kmalloc_array ( - (E1) * (E2) + E1, E2 , ...) | - kmalloc + kmalloc_array ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:55:00 +08:00
ptrptr = kmalloc_array(num_packets, sizeof(void *), GFP_KERNEL);
if (!ptrptr) {
retval = -ENOMEM;
goto failed;
}
dev->broadcast_rcv_buffer_ptrs = ptrptr;
context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE,
IEEE1394_BROADCAST_CHANNEL,
dev->card->link_speed, 8,
fwnet_receive_broadcast, dev);
if (IS_ERR(context)) {
retval = PTR_ERR(context);
goto failed;
}
retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card,
FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
if (retval < 0)
goto failed;
dev->broadcast_state = FWNET_BROADCAST_STOPPED;
for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
void *ptr;
unsigned v;
ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
*ptrptr++ = (void *) ((char *)ptr + v * max_receive);
}
dev->broadcast_rcv_context = context;
packet.payload_length = max_receive;
packet.interrupt = 1;
packet.skip = 0;
packet.tag = 3;
packet.sy = 0;
packet.header_length = IEEE1394_GASP_HDR_SIZE;
offset = 0;
for (u = 0; u < num_packets; u++) {
retval = fw_iso_context_queue(context, &packet,
&dev->broadcast_rcv_buffer, offset);
if (retval < 0)
goto failed;
offset += max_receive;
}
dev->num_broadcast_rcv_ptrs = num_packets;
dev->rcv_buffer_size = max_receive;
dev->broadcast_rcv_next_ptr = 0U;
retval = fw_iso_context_start(context, -1, 0,
FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
if (retval < 0)
goto failed;
/* FIXME: adjust it according to the min. speed of all known peers? */
dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
- IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
dev->broadcast_state = FWNET_BROADCAST_RUNNING;
return 0;
failed:
__fwnet_broadcast_stop(dev);
return retval;
}
static void set_carrier_state(struct fwnet_device *dev)
{
if (dev->peer_count > 1)
netif_carrier_on(dev->netdev);
else
netif_carrier_off(dev->netdev);
}
/* ifup */
static int fwnet_open(struct net_device *net)
{
struct fwnet_device *dev = netdev_priv(net);
int ret;
ret = fwnet_broadcast_start(dev);
if (ret)
return ret;
netif_start_queue(net);
spin_lock_irq(&dev->lock);
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
return 0;
}
/* ifdown */
static int fwnet_stop(struct net_device *net)
{
struct fwnet_device *dev = netdev_priv(net);
netif_stop_queue(net);
fwnet_broadcast_stop(dev);
return 0;
}
static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
{
struct fwnet_header hdr_buf;
struct fwnet_device *dev = netdev_priv(net);
__be16 proto;
u16 dest_node;
unsigned max_payload;
u16 dg_size;
u16 *datagram_label_ptr;
struct fwnet_packet_task *ptask;
struct fwnet_peer *peer;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
/* Can this happen? */
if (netif_queue_stopped(dev->netdev)) {
spin_unlock_irqrestore(&dev->lock, flags);
return NETDEV_TX_BUSY;
}
ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
if (ptask == NULL)
goto fail;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto fail;
/*
* Make a copy of the driver-specific header.
* We might need to rebuild the header on tx failure.
*/
memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
proto = hdr_buf.h_proto;
switch (proto) {
case htons(ETH_P_ARP):
case htons(ETH_P_IP):
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
#endif
break;
default:
goto fail;
}
skb_pull(skb, sizeof(hdr_buf));
dg_size = skb->len;
/*
* Set the transmission type for the packet. ARP packets and IP
* broadcast packets are sent via GASP.
*/
if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) {
max_payload = dev->broadcast_xmt_max_payload;
datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
ptask->fifo_addr = FWNET_NO_FIFO_ADDR;
ptask->generation = 0;
ptask->dest_node = IEEE1394_ALL_NODES;
ptask->speed = SCODE_100;
} else {
firewire net, ipv4 arp: Extend hardware address and remove driver-level packet inspection. Inspection of upper layer protocol is considered harmful, especially if it is about ARP or other stateful upper layer protocol; driver cannot (and should not) have full state of them. IPv4 over Firewire module used to inspect ARP (both in sending path and in receiving path), and record peer's GUID, max packet size, max speed and fifo address. This patch removes such inspection by extending our "hardware address" definition to include other information as well: max packet size, max speed and fifo. By doing this, The neighbour module in networking subsystem can cache them. Note: As we have started ignoring sspd and max_rec in ARP/NDP, those information will not be used in the driver when sending. When a packet is being sent, the IP layer fills our pseudo header with the extended "hardware address", including GUID and fifo. The driver can look-up node-id (the real but rather volatile low-level address) by GUID, and then the module can send the packet to the wire using parameters provided in the extendedn hardware address. This approach is realistic because IP over IEEE1394 (RFC2734) and IPv6 over IEEE1394 (RFC3146) share same "hardware address" format in their address resolution protocols. Here, extended "hardware address" is defined as follows: union fwnet_hwaddr { u8 u[16]; struct { __be64 uniq_id; /* EUI-64 */ u8 max_rec; /* max packet size */ u8 sspd; /* max speed */ __be16 fifo_hi; /* hi 16bits of FIFO addr */ __be32 fifo_lo; /* lo 32bits of FIFO addr */ } __packed uc; }; Note that Hardware address is declared as union, so that we can map full IP address into this, when implementing MCAP (Multicast Cannel Allocation Protocol) for IPv6, but IP and ARP subsystem do not need to know this format in detail. One difference between original ARP (RFC826) and 1394 ARP (RFC2734) is that 1394 ARP Request/Reply do not contain the target hardware address field (aka ar$tha). This difference is handled in the ARP subsystem. CC: Stephan Gatzka <stephan.gatzka@gmail.com> Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-03-25 16:26:16 +08:00
union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest;
__be64 guid = get_unaligned(&ha->uc.uniq_id);
u8 generation;
peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
firewire net, ipv4 arp: Extend hardware address and remove driver-level packet inspection. Inspection of upper layer protocol is considered harmful, especially if it is about ARP or other stateful upper layer protocol; driver cannot (and should not) have full state of them. IPv4 over Firewire module used to inspect ARP (both in sending path and in receiving path), and record peer's GUID, max packet size, max speed and fifo address. This patch removes such inspection by extending our "hardware address" definition to include other information as well: max packet size, max speed and fifo. By doing this, The neighbour module in networking subsystem can cache them. Note: As we have started ignoring sspd and max_rec in ARP/NDP, those information will not be used in the driver when sending. When a packet is being sent, the IP layer fills our pseudo header with the extended "hardware address", including GUID and fifo. The driver can look-up node-id (the real but rather volatile low-level address) by GUID, and then the module can send the packet to the wire using parameters provided in the extendedn hardware address. This approach is realistic because IP over IEEE1394 (RFC2734) and IPv6 over IEEE1394 (RFC3146) share same "hardware address" format in their address resolution protocols. Here, extended "hardware address" is defined as follows: union fwnet_hwaddr { u8 u[16]; struct { __be64 uniq_id; /* EUI-64 */ u8 max_rec; /* max packet size */ u8 sspd; /* max speed */ __be16 fifo_hi; /* hi 16bits of FIFO addr */ __be32 fifo_lo; /* lo 32bits of FIFO addr */ } __packed uc; }; Note that Hardware address is declared as union, so that we can map full IP address into this, when implementing MCAP (Multicast Cannel Allocation Protocol) for IPv6, but IP and ARP subsystem do not need to know this format in detail. One difference between original ARP (RFC826) and 1394 ARP (RFC2734) is that 1394 ARP Request/Reply do not contain the target hardware address field (aka ar$tha). This difference is handled in the ARP subsystem. CC: Stephan Gatzka <stephan.gatzka@gmail.com> Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-03-25 16:26:16 +08:00
if (!peer)
goto fail;
generation = peer->generation;
dest_node = peer->node_id;
max_payload = peer->max_payload;
datagram_label_ptr = &peer->datagram_label;
ptask->fifo_addr = get_unaligned_be48(ha->uc.fifo);
ptask->generation = generation;
ptask->dest_node = dest_node;
ptask->speed = peer->speed;
}
ptask->hdr.w0 = 0;
ptask->hdr.w1 = 0;
ptask->skb = skb;
ptask->dev = dev;
/* Does it all fit in one packet? */
if (dg_size <= max_payload) {
fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
ptask->outstanding_pkts = 1;
max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
} else {
u16 datagram_label;
max_payload -= RFC2374_FRAG_OVERHEAD;
datagram_label = (*datagram_label_ptr)++;
fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size,
datagram_label);
ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload);
max_payload += RFC2374_FRAG_HDR_SIZE;
}
if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS)
netif_stop_queue(dev->netdev);
spin_unlock_irqrestore(&dev->lock, flags);
ptask->max_payload = max_payload;
ptask->enqueued = 0;
fwnet_send_packet(ptask);
return NETDEV_TX_OK;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
if (ptask)
kmem_cache_free(fwnet_packet_task_cache, ptask);
if (skb != NULL)
dev_kfree_skb(skb);
net->stats.tx_dropped++;
net->stats.tx_errors++;
/*
* FIXME: According to a patch from 2003-02-26, "returning non-zero
* causes serious problems" here, allegedly. Before that patch,
* -ERRNO was returned which is not appropriate under Linux 2.6.
* Perhaps more needs to be done? Stop the queue in serious
* conditions and restart it elsewhere?
*/
return NETDEV_TX_OK;
}
static const struct ethtool_ops fwnet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops fwnet_netdev_ops = {
.ndo_open = fwnet_open,
.ndo_stop = fwnet_stop,
.ndo_start_xmit = fwnet_tx,
};
static void fwnet_init_dev(struct net_device *net)
{
net->header_ops = &fwnet_header_ops;
net->netdev_ops = &fwnet_netdev_ops;
net->watchdog_timeo = 2 * HZ;
net->flags = IFF_BROADCAST | IFF_MULTICAST;
net->features = NETIF_F_HIGHDMA;
net->addr_len = FWNET_ALEN;
net->hard_header_len = FWNET_HLEN;
net->type = ARPHRD_IEEE1394;
net->tx_queue_len = FWNET_TX_QUEUE_LEN;
net->ethtool_ops = &fwnet_ethtool_ops;
}
/* caller must hold fwnet_device_mutex */
static struct fwnet_device *fwnet_dev_find(struct fw_card *card)
{
struct fwnet_device *dev;
list_for_each_entry(dev, &fwnet_device_list, dev_link)
if (dev->card == card)
return dev;
return NULL;
}
static int fwnet_add_peer(struct fwnet_device *dev,
struct fw_unit *unit, struct fw_device *device)
{
struct fwnet_peer *peer;
peer = kmalloc(sizeof(*peer), GFP_KERNEL);
if (!peer)
return -ENOMEM;
dev_set_drvdata(&unit->device, peer);
peer->dev = dev;
peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
INIT_LIST_HEAD(&peer->pd_list);
peer->pdg_size = 0;
peer->datagram_label = 0;
peer->speed = device->max_speed;
peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
peer->generation = device->generation;
smp_rmb();
peer->node_id = device->node_id;
spin_lock_irq(&dev->lock);
list_add_tail(&peer->peer_link, &dev->peer_list);
dev->peer_count++;
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
return 0;
}
static int fwnet_probe(struct fw_unit *unit,
const struct ieee1394_device_id *id)
{
struct fw_device *device = fw_parent_device(unit);
struct fw_card *card = device->card;
struct net_device *net;
bool allocated_netdev = false;
struct fwnet_device *dev;
union fwnet_hwaddr ha;
int ret;
mutex_lock(&fwnet_device_mutex);
dev = fwnet_dev_find(card);
if (dev) {
net = dev->netdev;
goto have_dev;
}
net = alloc_netdev(sizeof(*dev), "firewire%d", NET_NAME_UNKNOWN,
fwnet_init_dev);
if (net == NULL) {
mutex_unlock(&fwnet_device_mutex);
return -ENOMEM;
}
allocated_netdev = true;
SET_NETDEV_DEV(net, card->device);
dev = netdev_priv(net);
spin_lock_init(&dev->lock);
dev->broadcast_state = FWNET_BROADCAST_ERROR;
dev->broadcast_rcv_context = NULL;
dev->broadcast_xmt_max_payload = 0;
dev->broadcast_xmt_datagramlabel = 0;
dev->local_fifo = FWNET_NO_FIFO_ADDR;
dev->queued_datagrams = 0;
INIT_LIST_HEAD(&dev->peer_list);
dev->card = card;
dev->netdev = net;
ret = fwnet_fifo_start(dev);
if (ret < 0)
goto out;
dev->local_fifo = dev->handler.offset;
/*
* default MTU: RFC 2734 cl. 4, RFC 3146 cl. 4
* maximum MTU: RFC 2734 cl. 4.2, fragment encapsulation header's
* maximum possible datagram_size + 1 = 0xfff + 1
*/
net->mtu = 1500U;
net: use core MTU range checking in misc drivers firewire-net: - set min/max_mtu - remove fwnet_change_mtu nes: - set max_mtu - clean up nes_netdev_change_mtu xpnet: - set min/max_mtu - remove xpnet_dev_change_mtu hippi: - set min/max_mtu - remove hippi_change_mtu batman-adv: - set max_mtu - remove batadv_interface_change_mtu - initialization is a little async, not 100% certain that max_mtu is set in the optimal place, don't have hardware to test with rionet: - set min/max_mtu - remove rionet_change_mtu slip: - set min/max_mtu - streamline sl_change_mtu um/net_kern: - remove pointless ndo_change_mtu hsi/clients/ssi_protocol: - use core MTU range checking - remove now redundant ssip_pn_set_mtu ipoib: - set a default max MTU value - Note: ipoib's actual max MTU can vary, depending on if the device is in connected mode or not, so we'll just set the max_mtu value to the max possible, and let the ndo_change_mtu function continue to validate any new MTU change requests with checks for CM or not. Note that ipoib has no min_mtu set, and thus, the network core's mtu > 0 check is the only lower bounds here. mptlan: - use net core MTU range checking - remove now redundant mpt_lan_change_mtu fddi: - min_mtu = 21, max_mtu = 4470 - remove now redundant fddi_change_mtu (including export) fjes: - min_mtu = 8192, max_mtu = 65536 - The max_mtu value is actually one over IP_MAX_MTU here, but the idea is to get past the core net MTU range checks so fjes_change_mtu can validate a new MTU against what it supports (see fjes_support_mtu in fjes_hw.c) hsr: - min_mtu = 0 (calls ether_setup, max_mtu is 1500) f_phonet: - min_mtu = 6, max_mtu = 65541 u_ether: - min_mtu = 14, max_mtu = 15412 phonet/pep-gprs: - min_mtu = 576, max_mtu = 65530 - remove redundant gprs_set_mtu CC: netdev@vger.kernel.org CC: linux-rdma@vger.kernel.org CC: Stefan Richter <stefanr@s5r6.in-berlin.de> CC: Faisal Latif <faisal.latif@intel.com> CC: linux-rdma@vger.kernel.org CC: Cliff Whickman <cpw@sgi.com> CC: Robin Holt <robinmholt@gmail.com> CC: Jes Sorensen <jes@trained-monkey.org> CC: Marek Lindner <mareklindner@neomailbox.ch> CC: Simon Wunderlich <sw@simonwunderlich.de> CC: Antonio Quartulli <a@unstable.cc> CC: Sathya Prakash <sathya.prakash@broadcom.com> CC: Chaitra P B <chaitra.basappa@broadcom.com> CC: Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com> CC: MPT-FusionLinux.pdl@broadcom.com CC: Sebastian Reichel <sre@kernel.org> CC: Felipe Balbi <balbi@kernel.org> CC: Arvid Brodin <arvid.brodin@alten.se> CC: Remi Denis-Courmont <courmisch@gmail.com> Signed-off-by: Jarod Wilson <jarod@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:22 +08:00
net->min_mtu = ETH_MIN_MTU;
net->max_mtu = 4096U;
/* Set our hardware address while we're at it */
ha.uc.uniq_id = cpu_to_be64(card->guid);
ha.uc.max_rec = dev->card->max_receive;
ha.uc.sspd = dev->card->link_speed;
put_unaligned_be48(dev->local_fifo, ha.uc.fifo);
dev_addr_set(net, ha.u);
firewire net, ipv4 arp: Extend hardware address and remove driver-level packet inspection. Inspection of upper layer protocol is considered harmful, especially if it is about ARP or other stateful upper layer protocol; driver cannot (and should not) have full state of them. IPv4 over Firewire module used to inspect ARP (both in sending path and in receiving path), and record peer's GUID, max packet size, max speed and fifo address. This patch removes such inspection by extending our "hardware address" definition to include other information as well: max packet size, max speed and fifo. By doing this, The neighbour module in networking subsystem can cache them. Note: As we have started ignoring sspd and max_rec in ARP/NDP, those information will not be used in the driver when sending. When a packet is being sent, the IP layer fills our pseudo header with the extended "hardware address", including GUID and fifo. The driver can look-up node-id (the real but rather volatile low-level address) by GUID, and then the module can send the packet to the wire using parameters provided in the extendedn hardware address. This approach is realistic because IP over IEEE1394 (RFC2734) and IPv6 over IEEE1394 (RFC3146) share same "hardware address" format in their address resolution protocols. Here, extended "hardware address" is defined as follows: union fwnet_hwaddr { u8 u[16]; struct { __be64 uniq_id; /* EUI-64 */ u8 max_rec; /* max packet size */ u8 sspd; /* max speed */ __be16 fifo_hi; /* hi 16bits of FIFO addr */ __be32 fifo_lo; /* lo 32bits of FIFO addr */ } __packed uc; }; Note that Hardware address is declared as union, so that we can map full IP address into this, when implementing MCAP (Multicast Cannel Allocation Protocol) for IPv6, but IP and ARP subsystem do not need to know this format in detail. One difference between original ARP (RFC826) and 1394 ARP (RFC2734) is that 1394 ARP Request/Reply do not contain the target hardware address field (aka ar$tha). This difference is handled in the ARP subsystem. CC: Stephan Gatzka <stephan.gatzka@gmail.com> Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-03-25 16:26:16 +08:00
memset(net->broadcast, -1, net->addr_len);
ret = register_netdev(net);
if (ret)
goto out;
list_add_tail(&dev->dev_link, &fwnet_device_list);
dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n",
dev_name(card->device));
have_dev:
ret = fwnet_add_peer(dev, unit, device);
if (ret && allocated_netdev) {
unregister_netdev(net);
list_del(&dev->dev_link);
out:
fwnet_fifo_stop(dev);
free_netdev(net);
}
mutex_unlock(&fwnet_device_mutex);
return ret;
}
/*
* FIXME abort partially sent fragmented datagrams,
* discard partially received fragmented datagrams
*/
static void fwnet_update(struct fw_unit *unit)
{
struct fw_device *device = fw_parent_device(unit);
struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
int generation;
generation = device->generation;
spin_lock_irq(&peer->dev->lock);
peer->node_id = device->node_id;
peer->generation = generation;
spin_unlock_irq(&peer->dev->lock);
}
static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev)
{
struct fwnet_partial_datagram *pd, *pd_next;
spin_lock_irq(&dev->lock);
list_del(&peer->peer_link);
dev->peer_count--;
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
fwnet_pd_delete(pd);
kfree(peer);
}
static void fwnet_remove(struct fw_unit *unit)
{
struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
struct fwnet_device *dev = peer->dev;
struct net_device *net;
int i;
mutex_lock(&fwnet_device_mutex);
net = dev->netdev;
fwnet_remove_peer(peer, dev);
if (list_empty(&dev->peer_list)) {
unregister_netdev(net);
fwnet_fifo_stop(dev);
for (i = 0; dev->queued_datagrams && i < 5; i++)
ssleep(1);
WARN_ON(dev->queued_datagrams);
list_del(&dev->dev_link);
free_netdev(net);
}
mutex_unlock(&fwnet_device_mutex);
}
static const struct ieee1394_device_id fwnet_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.specifier_id = IANA_SPECIFIER_ID,
.version = RFC2734_SW_VERSION,
},
#if IS_ENABLED(CONFIG_IPV6)
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.specifier_id = IANA_SPECIFIER_ID,
.version = RFC3146_SW_VERSION,
},
#endif
{ }
};
static struct fw_driver fwnet_driver = {
.driver = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = fwnet_probe,
.update = fwnet_update,
.remove = fwnet_remove,
.id_table = fwnet_id_table,
};
static const u32 rfc2374_unit_directory_data[] = {
0x00040000, /* directory_length */
0x1200005e, /* unit_specifier_id: IANA */
0x81000003, /* textual descriptor offset */
0x13000001, /* unit_sw_version: RFC 2734 */
0x81000005, /* textual descriptor offset */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49414e41, /* I A N A */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49507634, /* I P v 4 */
};
static struct fw_descriptor rfc2374_unit_directory = {
.length = ARRAY_SIZE(rfc2374_unit_directory_data),
.key = (CSR_DIRECTORY | CSR_UNIT) << 24,
.data = rfc2374_unit_directory_data
};
#if IS_ENABLED(CONFIG_IPV6)
static const u32 rfc3146_unit_directory_data[] = {
0x00040000, /* directory_length */
0x1200005e, /* unit_specifier_id: IANA */
0x81000003, /* textual descriptor offset */
0x13000002, /* unit_sw_version: RFC 3146 */
0x81000005, /* textual descriptor offset */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49414e41, /* I A N A */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49507636, /* I P v 6 */
};
static struct fw_descriptor rfc3146_unit_directory = {
.length = ARRAY_SIZE(rfc3146_unit_directory_data),
.key = (CSR_DIRECTORY | CSR_UNIT) << 24,
.data = rfc3146_unit_directory_data
};
#endif
static int __init fwnet_init(void)
{
int err;
err = fw_core_add_descriptor(&rfc2374_unit_directory);
if (err)
return err;
#if IS_ENABLED(CONFIG_IPV6)
err = fw_core_add_descriptor(&rfc3146_unit_directory);
if (err)
goto out;
#endif
fwnet_packet_task_cache = kmem_cache_create("packet_task",
sizeof(struct fwnet_packet_task), 0, 0, NULL);
if (!fwnet_packet_task_cache) {
err = -ENOMEM;
goto out2;
}
err = driver_register(&fwnet_driver.driver);
if (!err)
return 0;
kmem_cache_destroy(fwnet_packet_task_cache);
out2:
#if IS_ENABLED(CONFIG_IPV6)
fw_core_remove_descriptor(&rfc3146_unit_directory);
out:
#endif
fw_core_remove_descriptor(&rfc2374_unit_directory);
return err;
}
module_init(fwnet_init);
static void __exit fwnet_cleanup(void)
{
driver_unregister(&fwnet_driver.driver);
kmem_cache_destroy(fwnet_packet_task_cache);
#if IS_ENABLED(CONFIG_IPV6)
fw_core_remove_descriptor(&rfc3146_unit_directory);
#endif
fw_core_remove_descriptor(&rfc2374_unit_directory);
}
module_exit(fwnet_cleanup);
MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);