qeth: new qeth device driver

List of major changes and improvements:
 no manipulation of the global ARP constructor
 clean code split into core, layer 2 and layer 3 functionality
 better exploitation of the ethtool interface
 better representation of the various hardware capabilities
 fix packet socket support (tcpdump), no fake_ll required
 osasnmpd notification via udev events
 coding style and beautification

Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
Frank Blaschka 2008-02-15 09:19:42 +01:00 committed by Jeff Garzik
parent 04885948b1
commit 4a71df5004
14 changed files with 13498 additions and 21 deletions

View File

@ -537,11 +537,9 @@ CONFIG_CTC=m
# CONFIG_SMSGIUCV is not set # CONFIG_SMSGIUCV is not set
# CONFIG_CLAW is not set # CONFIG_CLAW is not set
CONFIG_QETH=y CONFIG_QETH=y
CONFIG_QETH_L2=y
# CONFIG_QETH_L3=y
# Gigabit Ethernet default settings CONFIG_QETH_IPV6=y
#
# CONFIG_QETH_IPV6 is not set
CONFIG_CCWGROUP=y CONFIG_CCWGROUP=y
# CONFIG_PPP is not set # CONFIG_PPP is not set
# CONFIG_SLIP is not set # CONFIG_SLIP is not set

View File

@ -67,23 +67,26 @@ config QETH
To compile this driver as a module, choose M. To compile this driver as a module, choose M.
The module name is qeth.ko. The module name is qeth.ko.
config QETH_L2
tristate "qeth layer 2 device support"
depends on QETH
help
Select this option to be able to run qeth devices in layer 2 mode.
To compile as a module, choose M. The module name is qeth_l2.ko.
If unsure, choose y.
comment "Gigabit Ethernet default settings" config QETH_L3
depends on QETH tristate "qeth layer 3 device support"
depends on QETH
help
Select this option to be able to run qeth devices in layer 3 mode.
To compile as a module choose M. The module name is qeth_l3.ko.
If unsure, choose Y.
config QETH_IPV6 config QETH_IPV6
bool "IPv6 support for gigabit ethernet" bool
depends on (QETH = IPV6) || (QETH && IPV6 = 'y') depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
help default y
If CONFIG_QETH is switched on, this option will include IPv6
support in the qeth device driver.
config QETH_VLAN
bool "VLAN support for gigabit ethernet"
depends on (QETH = VLAN_8021Q) || (QETH && VLAN_8021Q = 'y')
help
If CONFIG_QETH is switched on, this option will include IEEE
802.1q VLAN support in the qeth device driver.
config CCWGROUP config CCWGROUP
tristate tristate

View File

@ -8,6 +8,9 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_LCS) += lcs.o cu3088.o obj-$(CONFIG_LCS) += lcs.o cu3088.o
obj-$(CONFIG_CLAW) += claw.o cu3088.o obj-$(CONFIG_CLAW) += claw.o cu3088.o
qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_core_offl.o
qeth-$(CONFIG_PROC_FS) += qeth_proc.o
obj-$(CONFIG_QETH) += qeth.o obj-$(CONFIG_QETH) += qeth.o
qeth_l2-y += qeth_l2_main.o
obj-$(CONFIG_QETH_L2) += qeth_l2.o
qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
obj-$(CONFIG_QETH_L3) += qeth_l3.o

View File

@ -0,0 +1,916 @@
/*
* drivers/s390/net/qeth_core.h
*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#ifndef __QETH_CORE_H__
#define __QETH_CORE_H__
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/if_tr.h>
#include <linux/trdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/ctype.h>
#include <linux/in6.h>
#include <linux/bitops.h>
#include <linux/seq_file.h>
#include <linux/ethtool.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include "qeth_core_mpc.h"
/**
* Debug Facility stuff
*/
#define QETH_DBF_SETUP_NAME "qeth_setup"
#define QETH_DBF_SETUP_LEN 8
#define QETH_DBF_SETUP_PAGES 8
#define QETH_DBF_SETUP_NR_AREAS 1
#define QETH_DBF_SETUP_LEVEL 5
#define QETH_DBF_MISC_NAME "qeth_misc"
#define QETH_DBF_MISC_LEN 128
#define QETH_DBF_MISC_PAGES 2
#define QETH_DBF_MISC_NR_AREAS 1
#define QETH_DBF_MISC_LEVEL 2
#define QETH_DBF_DATA_NAME "qeth_data"
#define QETH_DBF_DATA_LEN 96
#define QETH_DBF_DATA_PAGES 8
#define QETH_DBF_DATA_NR_AREAS 1
#define QETH_DBF_DATA_LEVEL 2
#define QETH_DBF_CONTROL_NAME "qeth_control"
#define QETH_DBF_CONTROL_LEN 256
#define QETH_DBF_CONTROL_PAGES 8
#define QETH_DBF_CONTROL_NR_AREAS 1
#define QETH_DBF_CONTROL_LEVEL 5
#define QETH_DBF_TRACE_NAME "qeth_trace"
#define QETH_DBF_TRACE_LEN 8
#define QETH_DBF_TRACE_PAGES 4
#define QETH_DBF_TRACE_NR_AREAS 1
#define QETH_DBF_TRACE_LEVEL 3
#define QETH_DBF_SENSE_NAME "qeth_sense"
#define QETH_DBF_SENSE_LEN 64
#define QETH_DBF_SENSE_PAGES 2
#define QETH_DBF_SENSE_NR_AREAS 1
#define QETH_DBF_SENSE_LEVEL 2
#define QETH_DBF_QERR_NAME "qeth_qerr"
#define QETH_DBF_QERR_LEN 8
#define QETH_DBF_QERR_PAGES 2
#define QETH_DBF_QERR_NR_AREAS 1
#define QETH_DBF_QERR_LEVEL 2
#define QETH_DBF_TEXT(name, level, text) \
do { \
debug_text_event(qeth_dbf_##name, level, text); \
} while (0)
#define QETH_DBF_HEX(name, level, addr, len) \
do { \
debug_event(qeth_dbf_##name, level, (void *)(addr), len); \
} while (0)
/* Allow to sort out low debug levels early to avoid wasted sprints */
static inline int qeth_dbf_passes(debug_info_t *dbf_grp, int level)
{
return (level <= dbf_grp->level);
}
/**
* some more debug stuff
*/
#define PRINTK_HEADER "qeth: "
#define SENSE_COMMAND_REJECT_BYTE 0
#define SENSE_COMMAND_REJECT_FLAG 0x80
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
/*
* Common IO related definitions
*/
#define CARD_RDEV(card) card->read.ccwdev
#define CARD_WDEV(card) card->write.ccwdev
#define CARD_DDEV(card) card->data.ccwdev
#define CARD_BUS_ID(card) card->gdev->dev.bus_id
#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id
#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id
#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id
#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id
/**
* card stuff
*/
struct qeth_perf_stats {
unsigned int bufs_rec;
unsigned int bufs_sent;
unsigned int skbs_sent_pack;
unsigned int bufs_sent_pack;
unsigned int sc_dp_p;
unsigned int sc_p_dp;
/* qdio_input_handler: number of times called, time spent in */
__u64 inbound_start_time;
unsigned int inbound_cnt;
unsigned int inbound_time;
/* qeth_send_packet: number of times called, time spent in */
__u64 outbound_start_time;
unsigned int outbound_cnt;
unsigned int outbound_time;
/* qdio_output_handler: number of times called, time spent in */
__u64 outbound_handler_start_time;
unsigned int outbound_handler_cnt;
unsigned int outbound_handler_time;
/* number of calls to and time spent in do_QDIO for inbound queue */
__u64 inbound_do_qdio_start_time;
unsigned int inbound_do_qdio_cnt;
unsigned int inbound_do_qdio_time;
/* number of calls to and time spent in do_QDIO for outbound queues */
__u64 outbound_do_qdio_start_time;
unsigned int outbound_do_qdio_cnt;
unsigned int outbound_do_qdio_time;
/* eddp data */
unsigned int large_send_bytes;
unsigned int large_send_cnt;
unsigned int sg_skbs_sent;
unsigned int sg_frags_sent;
/* initial values when measuring starts */
unsigned long initial_rx_packets;
unsigned long initial_tx_packets;
/* inbound scatter gather data */
unsigned int sg_skbs_rx;
unsigned int sg_frags_rx;
unsigned int sg_alloc_page_rx;
};
/* Routing stuff */
struct qeth_routing_info {
enum qeth_routing_types type;
};
/* IPA stuff */
struct qeth_ipa_info {
__u32 supported_funcs;
__u32 enabled_funcs;
};
static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
return (ipa->supported_funcs & func);
}
static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
return (ipa->supported_funcs & ipa->enabled_funcs & func);
}
#define qeth_adp_supported(c, f) \
qeth_is_ipa_supported(&c->options.adp, f)
#define qeth_adp_enabled(c, f) \
qeth_is_ipa_enabled(&c->options.adp, f)
#define qeth_is_supported(c, f) \
qeth_is_ipa_supported(&c->options.ipa4, f)
#define qeth_is_enabled(c, f) \
qeth_is_ipa_enabled(&c->options.ipa4, f)
#define qeth_is_supported6(c, f) \
qeth_is_ipa_supported(&c->options.ipa6, f)
#define qeth_is_enabled6(c, f) \
qeth_is_ipa_enabled(&c->options.ipa6, f)
#define qeth_is_ipafunc_supported(c, prot, f) \
((prot == QETH_PROT_IPV6) ? \
qeth_is_supported6(c, f) : qeth_is_supported(c, f))
#define qeth_is_ipafunc_enabled(c, prot, f) \
((prot == QETH_PROT_IPV6) ? \
qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
#define QETH_MODELLIST_ARRAY \
{{0x1731, 0x01, 0x1732, 0x01, QETH_CARD_TYPE_OSAE, 1, \
QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
QETH_MAX_QUEUES, 0}, \
{0x1731, 0x05, 0x1732, 0x05, QETH_CARD_TYPE_IQD, 0, \
QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
QETH_MAX_QUEUES, 0x103}, \
{0x1731, 0x06, 0x1732, 0x06, QETH_CARD_TYPE_OSN, 0, \
QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
QETH_MAX_QUEUES, 0}, \
{0, 0, 0, 0, 0, 0, 0, 0, 0} }
#define QETH_REAL_CARD 1
#define QETH_VLAN_CARD 2
#define QETH_BUFSIZE 4096
/**
* some more defs
*/
#define QETH_TX_TIMEOUT 100 * HZ
#define QETH_RCD_TIMEOUT 60 * HZ
#define QETH_HEADER_SIZE 32
#define QETH_MAX_PORTNO 15
/*IPv6 address autoconfiguration stuff*/
#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
#define UNIQUE_ID_NOT_BY_CARD 0x10000
/*****************************************************************************/
/* QDIO queue and buffer handling */
/*****************************************************************************/
#define QETH_MAX_QUEUES 4
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 16
#define QETH_IN_BUF_COUNT_MIN 8
#define QETH_IN_BUF_COUNT_MAX 128
#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
((card)->qdio.in_buf_pool.buf_count / 2)
/* buffers we have to be behind before we get a PCI */
#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
/*enqueued free buffers left before we get a PCI*/
#define QETH_PCI_THRESHOLD_B(card) 0
/*not used unless the microcode gets patched*/
#define QETH_PCI_TIMER_VALUE(card) 3
#define QETH_MIN_INPUT_THRESHOLD 1
#define QETH_MAX_INPUT_THRESHOLD 500
#define QETH_MIN_OUTPUT_THRESHOLD 1
#define QETH_MAX_OUTPUT_THRESHOLD 300
/* priority queing */
#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
#define QETH_DEFAULT_QUEUE 2
#define QETH_NO_PRIO_QUEUEING 0
#define QETH_PRIO_Q_ING_PREC 1
#define QETH_PRIO_Q_ING_TOS 2
#define IP_TOS_LOWDELAY 0x10
#define IP_TOS_HIGHTHROUGHPUT 0x08
#define IP_TOS_HIGHRELIABILITY 0x04
#define IP_TOS_NOTIMPORTANT 0x02
/* Packing */
#define QETH_LOW_WATERMARK_PACK 2
#define QETH_HIGH_WATERMARK_PACK 5
#define QETH_WATERMARK_PACK_FUZZ 1
#define QETH_IP_HEADER_SIZE 40
/* large receive scatter gather copy break */
#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
struct qeth_hdr_layer3 {
__u8 id;
__u8 flags;
__u16 inbound_checksum; /*TSO:__u16 seqno */
__u32 token; /*TSO: __u32 reserved */
__u16 length;
__u8 vlan_prio;
__u8 ext_flags;
__u16 vlan_id;
__u16 frame_offset;
__u8 dest_addr[16];
} __attribute__ ((packed));
struct qeth_hdr_layer2 {
__u8 id;
__u8 flags[3];
__u8 port_no;
__u8 hdr_length;
__u16 pkt_length;
__u16 seq_no;
__u16 vlan_id;
__u32 reserved;
__u8 reserved2[16];
} __attribute__ ((packed));
struct qeth_hdr_osn {
__u8 id;
__u8 reserved;
__u16 seq_no;
__u16 reserved2;
__u16 control_flags;
__u16 pdu_length;
__u8 reserved3[18];
__u32 ccid;
} __attribute__ ((packed));
struct qeth_hdr {
union {
struct qeth_hdr_layer2 l2;
struct qeth_hdr_layer3 l3;
struct qeth_hdr_osn osn;
} hdr;
} __attribute__ ((packed));
/*TCP Segmentation Offload header*/
struct qeth_hdr_ext_tso {
__u16 hdr_tot_len;
__u8 imb_hdr_no;
__u8 reserved;
__u8 hdr_type;
__u8 hdr_version;
__u16 hdr_len;
__u32 payload_len;
__u16 mss;
__u16 dg_hdr_len;
__u8 padding[16];
} __attribute__ ((packed));
struct qeth_hdr_tso {
struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
struct qeth_hdr_ext_tso ext;
} __attribute__ ((packed));
/* flags for qeth_hdr.flags */
#define QETH_HDR_PASSTHRU 0x10
#define QETH_HDR_IPV6 0x80
#define QETH_HDR_CAST_MASK 0x07
enum qeth_cast_flags {
QETH_CAST_UNICAST = 0x06,
QETH_CAST_MULTICAST = 0x04,
QETH_CAST_BROADCAST = 0x05,
QETH_CAST_ANYCAST = 0x07,
QETH_CAST_NOCAST = 0x00,
};
enum qeth_layer2_frame_flags {
QETH_LAYER2_FLAG_MULTICAST = 0x01,
QETH_LAYER2_FLAG_BROADCAST = 0x02,
QETH_LAYER2_FLAG_UNICAST = 0x04,
QETH_LAYER2_FLAG_VLAN = 0x10,
};
enum qeth_header_ids {
QETH_HEADER_TYPE_LAYER3 = 0x01,
QETH_HEADER_TYPE_LAYER2 = 0x02,
QETH_HEADER_TYPE_TSO = 0x03,
QETH_HEADER_TYPE_OSN = 0x04,
};
/* flags for qeth_hdr.ext_flags */
#define QETH_HDR_EXT_VLAN_FRAME 0x01
#define QETH_HDR_EXT_TOKEN_ID 0x02
#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
}
enum qeth_qdio_buffer_states {
/*
* inbound: read out by driver; owned by hardware in order to be filled
* outbound: owned by driver in order to be filled
*/
QETH_QDIO_BUF_EMPTY,
/*
* inbound: filled by hardware; owned by driver in order to be read out
* outbound: filled by driver; owned by hardware in order to be sent
*/
QETH_QDIO_BUF_PRIMED,
};
enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING
};
struct qeth_buffer_pool_entry {
struct list_head list;
struct list_head init_list;
void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
};
struct qeth_qdio_buffer_pool {
struct list_head entry_list;
int buf_count;
};
struct qeth_qdio_buffer {
struct qdio_buffer *buffer;
/* the buffer pool entry currently associated to this buffer */
struct qeth_buffer_pool_entry *pool_entry;
};
struct qeth_qdio_q {
struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
int next_buf_to_init;
} __attribute__ ((aligned(256)));
/* possible types of qeth large_send support */
enum qeth_large_send_types {
QETH_LARGE_SEND_NO,
QETH_LARGE_SEND_EDDP,
QETH_LARGE_SEND_TSO,
};
struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer;
atomic_t state;
int next_element_to_fill;
struct sk_buff_head skb_list;
struct list_head ctx_list;
};
struct qeth_card;
enum qeth_out_q_states {
QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED,
QETH_OUT_Q_LOCKED_FLUSH,
};
struct qeth_qdio_out_q {
struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
int queue_no;
struct qeth_card *card;
atomic_t state;
int do_pack;
/*
* index of buffer to be filled by driver; state EMPTY or PACKING
*/
int next_buf_to_fill;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
*/
atomic_t used_buffers;
/* indicates whether PCI flag must be set (or if one is outstanding) */
atomic_t set_pci_flags_count;
} __attribute__ ((aligned(256)));
struct qeth_qdio_info {
atomic_t state;
/* input */
struct qeth_qdio_q *in_q;
struct qeth_qdio_buffer_pool in_buf_pool;
struct qeth_qdio_buffer_pool init_pool;
int in_buf_size;
/* output */
int no_out_queues;
struct qeth_qdio_out_q **out_qs;
/* priority queueing */
int do_prio_queueing;
int default_out_queue;
};
enum qeth_send_errors {
QETH_SEND_ERROR_NONE,
QETH_SEND_ERROR_LINK_FAILURE,
QETH_SEND_ERROR_RETRY,
QETH_SEND_ERROR_KICK_IT,
};
#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
/* tr mc mac is longer, but that will be enough to detect mc frames */
#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
#define QETH_TR_MAC_C 0x0300 /* canonical */
#define DEFAULT_ADD_HHLEN 0
#define MAX_ADD_HHLEN 1024
/**
* buffer stuff for read channel
*/
#define QETH_CMD_BUFFER_NO 8
/**
* channel state machine
*/
enum qeth_channel_states {
CH_STATE_UP,
CH_STATE_DOWN,
CH_STATE_ACTIVATING,
CH_STATE_HALTED,
CH_STATE_STOPPED,
CH_STATE_RCD,
CH_STATE_RCD_DONE,
};
/**
* card state machine
*/
enum qeth_card_states {
CARD_STATE_DOWN,
CARD_STATE_HARDSETUP,
CARD_STATE_SOFTSETUP,
CARD_STATE_UP,
CARD_STATE_RECOVER,
};
/**
* Protocol versions
*/
enum qeth_prot_versions {
QETH_PROT_IPV4 = 0x0004,
QETH_PROT_IPV6 = 0x0006,
};
enum qeth_ip_types {
QETH_IP_TYPE_NORMAL,
QETH_IP_TYPE_VIPA,
QETH_IP_TYPE_RXIP,
QETH_IP_TYPE_DEL_ALL_MC,
};
enum qeth_cmd_buffer_state {
BUF_STATE_FREE,
BUF_STATE_LOCKED,
BUF_STATE_PROCESSED,
};
struct qeth_ipato {
int enabled;
int invert4;
int invert6;
struct list_head entries;
};
struct qeth_channel;
struct qeth_cmd_buffer {
enum qeth_cmd_buffer_state state;
struct qeth_channel *channel;
unsigned char *data;
int rc;
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
};
/**
* definition of a qeth channel, used for read and write
*/
struct qeth_channel {
enum qeth_channel_states state;
struct ccw1 ccw;
spinlock_t iob_lock;
wait_queue_head_t wait_q;
struct tasklet_struct irq_tasklet;
struct ccw_device *ccwdev;
/*command buffer for control data*/
struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
atomic_t irq_pending;
int io_buf_no;
int buf_no;
};
/**
* OSA card related definitions
*/
struct qeth_token {
__u32 issuer_rm_w;
__u32 issuer_rm_r;
__u32 cm_filter_w;
__u32 cm_filter_r;
__u32 cm_connection_w;
__u32 cm_connection_r;
__u32 ulp_filter_w;
__u32 ulp_filter_r;
__u32 ulp_connection_w;
__u32 ulp_connection_r;
};
struct qeth_seqno {
__u32 trans_hdr;
__u32 pdu_hdr;
__u32 pdu_hdr_ack;
__u16 ipa;
__u32 pkt_seqno;
};
struct qeth_reply {
struct list_head list;
wait_queue_head_t wait_q;
int (*callback)(struct qeth_card *, struct qeth_reply *,
unsigned long);
u32 seqno;
unsigned long offset;
atomic_t received;
int rc;
void *param;
struct qeth_card *card;
atomic_t refcnt;
};
struct qeth_card_blkt {
int time_total;
int inter_packet;
int inter_packet_jumbo;
};
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
#define QETH_LAYER2_MAC_READ 0x01
#define QETH_LAYER2_MAC_REGISTERED 0x02
struct qeth_card_info {
unsigned short unit_addr2;
unsigned short cula;
unsigned short chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
int guestlan;
int mac_bits;
int portname_required;
int portno;
char portname[9];
enum qeth_card_types type;
enum qeth_link_types link_type;
int is_multicast_different;
int initial_mtu;
int max_mtu;
int broadcast_capable;
int unique_id;
struct qeth_card_blkt blkt;
__u32 csum_mask;
enum qeth_ipa_promisc_modes promisc_mode;
};
struct qeth_card_options {
struct qeth_routing_info route4;
struct qeth_ipa_info ipa4;
struct qeth_ipa_info adp; /*Adapter parameters*/
struct qeth_routing_info route6;
struct qeth_ipa_info ipa6;
enum qeth_checksum_types checksum_type;
int broadcast_mode;
int macaddr_mode;
int fake_broadcast;
int add_hhlen;
int fake_ll;
int layer2;
enum qeth_large_send_types large_send;
int performance_stats;
int rx_sg_cb;
};
/*
* thread bits for qeth_card thread masks
*/
enum qeth_threads {
QETH_RECOVER_THREAD = 1,
};
struct qeth_osn_info {
int (*assist_cb)(struct net_device *dev, void *data);
int (*data_cb)(struct sk_buff *skb);
};
enum qeth_discipline_id {
QETH_DISCIPLINE_LAYER3 = 0,
QETH_DISCIPLINE_LAYER2 = 1,
};
struct qeth_discipline {
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
int (*recover)(void *ptr);
struct ccwgroup_driver *ccwgdriver;
};
struct qeth_vlan_vid {
struct list_head list;
unsigned short vid;
};
struct qeth_mc_mac {
struct list_head list;
__u8 mc_addr[MAX_ADDR_LEN];
unsigned char mc_addrlen;
};
struct qeth_card {
struct list_head list;
enum qeth_card_states state;
int lan_online;
spinlock_t lock;
struct ccwgroup_device *gdev;
struct qeth_channel read;
struct qeth_channel write;
struct qeth_channel data;
struct net_device *dev;
struct net_device_stats stats;
struct qeth_card_info info;
struct qeth_token token;
struct qeth_seqno seqno;
struct qeth_card_options options;
wait_queue_head_t wait_q;
spinlock_t vlanlock;
spinlock_t mclock;
struct vlan_group *vlangrp;
struct list_head vid_list;
struct list_head mc_list;
struct work_struct kernel_thread_starter;
spinlock_t thread_mask_lock;
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
spinlock_t ip_lock;
struct list_head ip_list;
struct list_head *ip_tbd_list;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
struct qeth_perf_stats perf_stats;
int use_hard_stop;
struct qeth_osn_info osn_info;
struct qeth_discipline discipline;
atomic_t force_alloc_skb;
};
struct qeth_card_list_struct {
struct list_head list;
rwlock_t rwlock;
};
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
{
struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
dev_get_drvdata(&cdev->dev))->dev);
return card;
}
static inline int qeth_get_micros(void)
{
return (int) (get_clock() >> 12);
}
static inline void *qeth_push_skb(struct qeth_card *card, struct sk_buff *skb,
int size)
{
void *hdr;
hdr = (void *) skb_push(skb, size);
/*
* sanity check, the Linux memory allocation scheme should
* never present us cases like this one (the qdio header size plus
* the first 40 bytes of the paket cross a 4k boundary)
*/
if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
(((unsigned long) hdr + size +
QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
PRINT_ERR("Misaligned packet on interface %s. Discarded.",
QETH_CARD_IFNAME(card));
return NULL;
}
return hdr;
}
static inline int qeth_get_ip_version(struct sk_buff *skb)
{
switch (skb->protocol) {
case ETH_P_IPV6:
return 6;
case ETH_P_IP:
return 4;
default:
return 0;
}
}
struct qeth_eddp_context;
extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_realloc_buffer_pool(struct qeth_card *, int);
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
void qeth_core_free_discipline(struct qeth_card *);
int qeth_core_create_device_attributes(struct device *);
void qeth_core_remove_device_attributes(struct device *);
int qeth_core_create_osn_attributes(struct device *);
void qeth_core_remove_osn_attributes(struct device *);
/* exports for qeth discipline device drivers */
extern struct qeth_card_list_struct qeth_core_card_list;
extern debug_info_t *qeth_dbf_setup;
extern debug_info_t *qeth_dbf_data;
extern debug_info_t *qeth_dbf_misc;
extern debug_info_t *qeth_dbf_control;
extern debug_info_t *qeth_dbf_trace;
extern debug_info_t *qeth_dbf_sense;
extern debug_info_t *qeth_dbf_qerr;
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
int qeth_wait_for_threads(struct qeth_card *, unsigned long);
int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_startlan(struct qeth_card *);
int qeth_send_stoplan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
void *);
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
int qeth_query_setadapterparms(struct qeth_card *);
int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
unsigned int, const char *);
void qeth_put_buffer_pool_entry(struct qeth_card *,
struct qeth_buffer_pool_entry *);
void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
unsigned int, unsigned int,
unsigned int, int, int,
unsigned long);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *);
void qeth_setadp_promisc_mode(struct qeth_card *);
struct net_device_stats *qeth_get_stats(struct net_device *);
int qeth_change_mtu(struct net_device *, int);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
void qeth_prepare_control_data(struct qeth_card *, int,
struct qeth_cmd_buffer *);
void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_mdio_read(struct net_device *, int, int);
int qeth_snmp_command(struct qeth_card *, char __user *);
int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
unsigned long);
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
int qeth_get_cast_type(struct qeth_card *, struct sk_buff *);
int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
struct sk_buff *qeth_prepare_skb(struct qeth_card *, struct sk_buff *,
struct qeth_hdr **);
int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
struct sk_buff *, struct qeth_hdr *, int,
struct qeth_eddp_context *);
int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
struct sk_buff *, struct qeth_hdr *,
int, struct qeth_eddp_context *);
int qeth_core_get_stats_count(struct net_device *);
void qeth_core_get_ethtool_stats(struct net_device *,
struct ethtool_stats *, u64 *);
void qeth_core_get_strings(struct net_device *, u32, u8 *);
void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
int (*assist_cb)(struct net_device *, void *),
int (*data_cb)(struct sk_buff *));
void qeth_osn_deregister(struct net_device *);
#endif /* __QETH_CORE_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,266 @@
/*
* drivers/s390/net/qeth_core_mpc.c
*
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#include <linux/module.h>
#include <asm/cio.h>
#include "qeth_core_mpc.h"
unsigned char IDX_ACTIVATE_READ[] = {
0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
0x00, 0x00
};
unsigned char IDX_ACTIVATE_WRITE[] = {
0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
0x00, 0x00
};
unsigned char CM_ENABLE[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x23,
0x00, 0x00, 0x23, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x02, 0x00, 0x17, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x0b, 0x04, 0x01,
0x7e, 0x04, 0x05, 0x00, 0x01, 0x01, 0x0f,
0x00,
0x0c, 0x04, 0x02, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff
};
unsigned char CM_SETUP[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x24,
0x00, 0x00, 0x24, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x04, 0x00, 0x18, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 0x04, 0x04,
0x05, 0x00, 0x01, 0x01, 0x11,
0x00, 0x09, 0x04,
0x05, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x06,
0x04, 0x06, 0xc8, 0x00
};
unsigned char ULP_ENABLE[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x2b,
0x00, 0x00, 0x2b, 0x05, 0x20, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x02, 0x00, 0x1f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x0b, 0x04, 0x01,
0x03, 0x04, 0x05, 0x00, 0x01, 0x01, 0x12,
0x00,
0x14, 0x04, 0x0a, 0x00, 0x20, 0x00, 0x00, 0xff,
0xff, 0x00, 0x08, 0xc8, 0xe8, 0xc4, 0xf1, 0xc7,
0xf1, 0x00, 0x00
};
unsigned char ULP_SETUP[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01, 0x00, 0x24, 0x00, 0x2c,
0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x04, 0x00, 0x20, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 0x04, 0x04,
0x05, 0x00, 0x01, 0x01, 0x14,
0x00, 0x09, 0x04,
0x05, 0x05, 0x30, 0x01, 0x00, 0x00,
0x00, 0x06,
0x04, 0x06, 0x40, 0x00,
0x00, 0x08, 0x04, 0x0b,
0x00, 0x00, 0x00, 0x00
};
unsigned char DM_ACT[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x15,
0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x43, 0x60, 0x00, 0x09, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 0x04, 0x04,
0x05, 0x40, 0x01, 0x01, 0x00
};
unsigned char IPA_PDU_HEADER[] = {
0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00,
(IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256,
(IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256,
0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24,
sizeof(struct qeth_ipa_cmd) / 256,
sizeof(struct qeth_ipa_cmd) % 256,
0x00,
sizeof(struct qeth_ipa_cmd) / 256,
sizeof(struct qeth_ipa_cmd) % 256,
0x05,
0x77, 0x77, 0x77, 0x77,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00,
sizeof(struct qeth_ipa_cmd) / 256,
sizeof(struct qeth_ipa_cmd) % 256,
0x00, 0x00, 0x00, 0x40,
};
EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
unsigned char WRITE_CCW[] = {
0x01, CCW_FLAG_SLI, 0, 0,
0, 0, 0, 0
};
unsigned char READ_CCW[] = {
0x02, CCW_FLAG_SLI, 0, 0,
0, 0, 0, 0
};
struct ipa_rc_msg {
enum qeth_ipa_return_codes rc;
char *msg;
};
static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_SUCCESS, "success"},
{IPA_RC_NOTSUPP, "Command not supported"},
{IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
{IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
{IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
{IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
{IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
{IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
{IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
{IPA_RC_ID_NOT_FOUND, "Identifier not found"},
{IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
{IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
{IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
{IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
{IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
{IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
{IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
{IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
{IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
{IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
{IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
{IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
{IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
{IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
{IPA_RC_INVALID_LANNUM, "Invalid LAN num"},
{IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
{IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
{IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"},
{IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"},
{IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"},
{IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
{IPA_RC_MULTICAST_FULL, "No task available, multicast full"},
{IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"},
{IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"},
{IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"},
{IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
{IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
{IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
{IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
{IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
{IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
{IPA_RC_FFFF, "Unknown Error"}
};
char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
{
int x = 0;
qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
sizeof(struct ipa_rc_msg) - 1].rc = rc;
while (qeth_ipa_rc_msg[x].rc != rc)
x++;
return qeth_ipa_rc_msg[x].msg;
}
struct ipa_cmd_names {
enum qeth_ipa_cmds cmd;
char *name;
};
static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_STARTLAN, "startlan"},
{IPA_CMD_STOPLAN, "stoplan"},
{IPA_CMD_SETVMAC, "setvmac"},
{IPA_CMD_DELVMAC, "delvmca"},
{IPA_CMD_SETGMAC, "setgmac"},
{IPA_CMD_DELGMAC, "delgmac"},
{IPA_CMD_SETVLAN, "setvlan"},
{IPA_CMD_DELVLAN, "delvlan"},
{IPA_CMD_SETCCID, "setccid"},
{IPA_CMD_DELCCID, "delccid"},
{IPA_CMD_MODCCID, "modccid"},
{IPA_CMD_SETIP, "setip"},
{IPA_CMD_QIPASSIST, "qipassist"},
{IPA_CMD_SETASSPARMS, "setassparms"},
{IPA_CMD_SETIPM, "setipm"},
{IPA_CMD_DELIPM, "delipm"},
{IPA_CMD_SETRTG, "setrtg"},
{IPA_CMD_DELIP, "delip"},
{IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
{IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
{IPA_CMD_CREATE_ADDR, "create_addr"},
{IPA_CMD_DESTROY_ADDR, "destroy_addr"},
{IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
{IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
{IPA_CMD_UNKNOWN, "unknown"},
};
char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
{
int x = 0;
qeth_ipa_cmd_names[
sizeof(qeth_ipa_cmd_names) /
sizeof(struct ipa_cmd_names)-1].cmd = cmd;
while (qeth_ipa_cmd_names[x].cmd != cmd)
x++;
return qeth_ipa_cmd_names[x].name;
}

View File

@ -0,0 +1,566 @@
/*
* drivers/s390/net/qeth_core_mpc.h
*
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#ifndef __QETH_CORE_MPC_H__
#define __QETH_CORE_MPC_H__
#include <asm/qeth.h>
#define IPA_PDU_HEADER_SIZE 0x40
#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26)
#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29)
#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a)
extern unsigned char IPA_PDU_HEADER[];
#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
#define QETH_SEQ_NO_LENGTH 4
#define QETH_MPC_TOKEN_LENGTH 4
#define QETH_MCL_LENGTH 4
#define OSA_ADDR_LEN 6
#define QETH_TIMEOUT (10 * HZ)
#define QETH_IPA_TIMEOUT (45 * HZ)
#define QETH_IDX_COMMAND_SEQNO 0xffff0000
#define SR_INFO_LEN 16
#define QETH_CLEAR_CHANNEL_PARM -10
#define QETH_HALT_CHANNEL_PARM -11
#define QETH_RCD_PARM -12
/*****************************************************************************/
/* IP Assist related definitions */
/*****************************************************************************/
#define IPA_CMD_INITIATOR_HOST 0x00
#define IPA_CMD_INITIATOR_OSA 0x01
#define IPA_CMD_INITIATOR_HOST_REPLY 0x80
#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
#define IPA_CMD_PRIM_VERSION_NO 0x01
enum qeth_card_types {
QETH_CARD_TYPE_UNKNOWN = 0,
QETH_CARD_TYPE_OSAE = 10,
QETH_CARD_TYPE_IQD = 1234,
QETH_CARD_TYPE_OSN = 11,
};
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
enum qeth_link_types {
QETH_LINK_TYPE_FAST_ETH = 0x01,
QETH_LINK_TYPE_HSTR = 0x02,
QETH_LINK_TYPE_GBIT_ETH = 0x03,
QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
QETH_LINK_TYPE_LANE_TR = 0x82,
QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
QETH_LINK_TYPE_LANE = 0x88,
QETH_LINK_TYPE_ATM_NATIVE = 0x90,
};
enum qeth_tr_macaddr_modes {
QETH_TR_MACADDR_NONCANONICAL = 0,
QETH_TR_MACADDR_CANONICAL = 1,
};
enum qeth_tr_broadcast_modes {
QETH_TR_BROADCAST_ALLRINGS = 0,
QETH_TR_BROADCAST_LOCAL = 1,
};
/* these values match CHECKSUM_* in include/linux/skbuff.h */
enum qeth_checksum_types {
SW_CHECKSUMMING = 0, /* TODO: set to bit flag used in IPA Command */
HW_CHECKSUMMING = 1,
NO_CHECKSUMMING = 2,
};
#define QETH_CHECKSUM_DEFAULT SW_CHECKSUMMING
/*
* Routing stuff
*/
#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
enum qeth_routing_types {
/* TODO: set to bit flag used in IPA Command */
NO_ROUTER = 0,
PRIMARY_ROUTER = 1,
SECONDARY_ROUTER = 2,
MULTICAST_ROUTER = 3,
PRIMARY_CONNECTOR = 4,
SECONDARY_CONNECTOR = 5,
};
/* IPA Commands */
enum qeth_ipa_cmds {
IPA_CMD_STARTLAN = 0x01,
IPA_CMD_STOPLAN = 0x02,
IPA_CMD_SETVMAC = 0x21,
IPA_CMD_DELVMAC = 0x22,
IPA_CMD_SETGMAC = 0x23,
IPA_CMD_DELGMAC = 0x24,
IPA_CMD_SETVLAN = 0x25,
IPA_CMD_DELVLAN = 0x26,
IPA_CMD_SETCCID = 0x41,
IPA_CMD_DELCCID = 0x42,
IPA_CMD_MODCCID = 0x43,
IPA_CMD_SETIP = 0xb1,
IPA_CMD_QIPASSIST = 0xb2,
IPA_CMD_SETASSPARMS = 0xb3,
IPA_CMD_SETIPM = 0xb4,
IPA_CMD_DELIPM = 0xb5,
IPA_CMD_SETRTG = 0xb6,
IPA_CMD_DELIP = 0xb7,
IPA_CMD_SETADAPTERPARMS = 0xb8,
IPA_CMD_SET_DIAG_ASS = 0xb9,
IPA_CMD_CREATE_ADDR = 0xc3,
IPA_CMD_DESTROY_ADDR = 0xc4,
IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1,
IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2,
IPA_CMD_UNKNOWN = 0x00
};
enum qeth_ip_ass_cmds {
IPA_CMD_ASS_START = 0x0001,
IPA_CMD_ASS_STOP = 0x0002,
IPA_CMD_ASS_CONFIGURE = 0x0003,
IPA_CMD_ASS_ENABLE = 0x0004,
};
enum qeth_arp_process_subcmds {
IPA_CMD_ASS_ARP_SET_NO_ENTRIES = 0x0003,
IPA_CMD_ASS_ARP_QUERY_CACHE = 0x0004,
IPA_CMD_ASS_ARP_ADD_ENTRY = 0x0005,
IPA_CMD_ASS_ARP_REMOVE_ENTRY = 0x0006,
IPA_CMD_ASS_ARP_FLUSH_CACHE = 0x0007,
IPA_CMD_ASS_ARP_QUERY_INFO = 0x0104,
IPA_CMD_ASS_ARP_QUERY_STATS = 0x0204,
};
/* Return Codes for IPA Commands
* according to OSA card Specs */
enum qeth_ipa_return_codes {
IPA_RC_SUCCESS = 0x0000,
IPA_RC_NOTSUPP = 0x0001,
IPA_RC_IP_TABLE_FULL = 0x0002,
IPA_RC_UNKNOWN_ERROR = 0x0003,
IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
IPA_RC_DUP_IPV6_REMOTE = 0x0008,
IPA_RC_DUP_IPV6_HOME = 0x0010,
IPA_RC_UNREGISTERED_ADDR = 0x0011,
IPA_RC_NO_ID_AVAILABLE = 0x0012,
IPA_RC_ID_NOT_FOUND = 0x0013,
IPA_RC_INVALID_IP_VERSION = 0x0020,
IPA_RC_LAN_FRAME_MISMATCH = 0x0040,
IPA_RC_L2_UNSUPPORTED_CMD = 0x2003,
IPA_RC_L2_DUP_MAC = 0x2005,
IPA_RC_L2_ADDR_TABLE_FULL = 0x2006,
IPA_RC_L2_DUP_LAYER3_MAC = 0x200a,
IPA_RC_L2_GMAC_NOT_FOUND = 0x200b,
IPA_RC_L2_MAC_NOT_FOUND = 0x2010,
IPA_RC_L2_INVALID_VLAN_ID = 0x2015,
IPA_RC_L2_DUP_VLAN_ID = 0x2016,
IPA_RC_L2_VLAN_ID_NOT_FOUND = 0x2017,
IPA_RC_DATA_MISMATCH = 0xe001,
IPA_RC_INVALID_MTU_SIZE = 0xe002,
IPA_RC_INVALID_LANTYPE = 0xe003,
IPA_RC_INVALID_LANNUM = 0xe004,
IPA_RC_DUPLICATE_IP_ADDRESS = 0xe005,
IPA_RC_IP_ADDR_TABLE_FULL = 0xe006,
IPA_RC_LAN_PORT_STATE_ERROR = 0xe007,
IPA_RC_SETIP_NO_STARTLAN = 0xe008,
IPA_RC_SETIP_ALREADY_RECEIVED = 0xe009,
IPA_RC_IP_ADDR_ALREADY_USED = 0xe00a,
IPA_RC_MULTICAST_FULL = 0xe00b,
IPA_RC_SETIP_INVALID_VERSION = 0xe00d,
IPA_RC_UNSUPPORTED_SUBCMD = 0xe00e,
IPA_RC_ARP_ASSIST_NO_ENABLE = 0xe00f,
IPA_RC_PRIMARY_ALREADY_DEFINED = 0xe010,
IPA_RC_SECOND_ALREADY_DEFINED = 0xe011,
IPA_RC_INVALID_SETRTG_INDICATOR = 0xe012,
IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013,
IPA_RC_LAN_OFFLINE = 0xe080,
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
IPA_RC_FFFF = 0xffff
};
/* IPA function flags; each flag marks availability of respective function */
enum qeth_ipa_funcs {
IPA_ARP_PROCESSING = 0x00000001L,
IPA_INBOUND_CHECKSUM = 0x00000002L,
IPA_OUTBOUND_CHECKSUM = 0x00000004L,
IPA_IP_FRAGMENTATION = 0x00000008L,
IPA_FILTERING = 0x00000010L,
IPA_IPV6 = 0x00000020L,
IPA_MULTICASTING = 0x00000040L,
IPA_IP_REASSEMBLY = 0x00000080L,
IPA_QUERY_ARP_COUNTERS = 0x00000100L,
IPA_QUERY_ARP_ADDR_INFO = 0x00000200L,
IPA_SETADAPTERPARMS = 0x00000400L,
IPA_VLAN_PRIO = 0x00000800L,
IPA_PASSTHRU = 0x00001000L,
IPA_FLUSH_ARP_SUPPORT = 0x00002000L,
IPA_FULL_VLAN = 0x00004000L,
IPA_INBOUND_PASSTHRU = 0x00008000L,
IPA_SOURCE_MAC = 0x00010000L,
IPA_OSA_MC_ROUTER = 0x00020000L,
IPA_QUERY_ARP_ASSIST = 0x00040000L,
IPA_INBOUND_TSO = 0x00080000L,
IPA_OUTBOUND_TSO = 0x00100000L,
};
/* SETIP/DELIP IPA Command: ***************************************************/
enum qeth_ipa_setdelip_flags {
QETH_IPA_SETDELIP_DEFAULT = 0x00L, /* default */
QETH_IPA_SETIP_VIPA_FLAG = 0x01L, /* no grat. ARP */
QETH_IPA_SETIP_TAKEOVER_FLAG = 0x02L, /* nofail on grat. ARP */
QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L,
QETH_IPA_DELIP_VIPA_FLAG = 0x40L,
QETH_IPA_DELIP_ADDR_NEEDS_SETIP = 0x80L,
};
/* SETADAPTER IPA Command: ****************************************************/
enum qeth_ipa_setadp_cmd {
IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001,
IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002,
IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004,
IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008,
IPA_SETADP_SET_ADDRESSING_MODE = 0x0010,
IPA_SETADP_SET_CONFIG_PARMS = 0x0020,
IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040,
IPA_SETADP_SET_BROADCAST_MODE = 0x0080,
IPA_SETADP_SEND_OSA_MESSAGE = 0x0100,
IPA_SETADP_SET_SNMP_CONTROL = 0x0200,
IPA_SETADP_QUERY_CARD_INFO = 0x0400,
IPA_SETADP_SET_PROMISC_MODE = 0x0800,
};
enum qeth_ipa_mac_ops {
CHANGE_ADDR_READ_MAC = 0,
CHANGE_ADDR_REPLACE_MAC = 1,
CHANGE_ADDR_ADD_MAC = 2,
CHANGE_ADDR_DEL_MAC = 4,
CHANGE_ADDR_RESET_MAC = 8,
};
enum qeth_ipa_addr_ops {
CHANGE_ADDR_READ_ADDR = 0,
CHANGE_ADDR_ADD_ADDR = 1,
CHANGE_ADDR_DEL_ADDR = 2,
CHANGE_ADDR_FLUSH_ADDR_TABLE = 4,
};
enum qeth_ipa_promisc_modes {
SET_PROMISC_MODE_OFF = 0,
SET_PROMISC_MODE_ON = 1,
};
/* (SET)DELIP(M) IPA stuff ***************************************************/
struct qeth_ipacmd_setdelip4 {
__u8 ip_addr[4];
__u8 mask[4];
__u32 flags;
} __attribute__ ((packed));
struct qeth_ipacmd_setdelip6 {
__u8 ip_addr[16];
__u8 mask[16];
__u32 flags;
} __attribute__ ((packed));
struct qeth_ipacmd_setdelipm {
__u8 mac[6];
__u8 padding[2];
__u8 ip6[12];
__u8 ip4[4];
} __attribute__ ((packed));
struct qeth_ipacmd_layer2setdelmac {
__u32 mac_length;
__u8 mac[6];
} __attribute__ ((packed));
struct qeth_ipacmd_layer2setdelvlan {
__u16 vlan_id;
} __attribute__ ((packed));
struct qeth_ipacmd_setassparms_hdr {
__u32 assist_no;
__u16 length;
__u16 command_code;
__u16 return_code;
__u8 number_of_replies;
__u8 seq_no;
} __attribute__((packed));
struct qeth_arp_query_data {
__u16 request_bits;
__u16 reply_bits;
__u32 no_entries;
char data;
} __attribute__((packed));
/* used as parameter for arp_query reply */
struct qeth_arp_query_info {
__u32 udata_len;
__u16 mask_bits;
__u32 udata_offset;
__u32 no_entries;
char *udata;
};
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
struct qeth_arp_cache_entry add_arp_entry;
struct qeth_arp_query_data query_arp;
__u8 ip[16];
} data;
} __attribute__ ((packed));
/* SETRTG IPA Command: ****************************************************/
struct qeth_set_routing {
__u8 type;
};
/* SETADAPTERPARMS IPA Command: *******************************************/
struct qeth_query_cmds_supp {
__u32 no_lantypes_supp;
__u8 lan_type;
__u8 reserved1[3];
__u32 supported_cmds;
__u8 reserved2[8];
} __attribute__ ((packed));
struct qeth_change_addr {
__u32 cmd;
__u32 addr_size;
__u32 no_macs;
__u8 addr[OSA_ADDR_LEN];
} __attribute__ ((packed));
struct qeth_snmp_cmd {
__u8 token[16];
__u32 request;
__u32 interface;
__u32 returncode;
__u32 firmwarelevel;
__u32 seqno;
__u8 data;
} __attribute__ ((packed));
struct qeth_snmp_ureq_hdr {
__u32 data_len;
__u32 req_len;
__u32 reserved1;
__u32 reserved2;
} __attribute__ ((packed));
struct qeth_snmp_ureq {
struct qeth_snmp_ureq_hdr hdr;
struct qeth_snmp_cmd cmd;
} __attribute__((packed));
struct qeth_ipacmd_setadpparms_hdr {
__u32 supp_hw_cmds;
__u32 reserved1;
__u16 cmdlength;
__u16 reserved2;
__u32 command_code;
__u16 return_code;
__u8 used_total;
__u8 seq_no;
__u32 reserved3;
} __attribute__ ((packed));
struct qeth_ipacmd_setadpparms {
struct qeth_ipacmd_setadpparms_hdr hdr;
union {
struct qeth_query_cmds_supp query_cmds_supp;
struct qeth_change_addr change_addr;
struct qeth_snmp_cmd snmp;
__u32 mode;
} data;
} __attribute__ ((packed));
/* CREATE_ADDR IPA Command: ***********************************************/
struct qeth_create_destroy_address {
__u8 unique_id[8];
} __attribute__ ((packed));
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
__u8 initiator;
__u16 seqno;
__u16 return_code;
__u8 adapter_type;
__u8 rel_adapter_no;
__u8 prim_version_no;
__u8 param_count;
__u16 prot_version;
__u32 ipa_supported;
__u32 ipa_enabled;
} __attribute__ ((packed));
/* The IPA command itself */
struct qeth_ipa_cmd {
struct qeth_ipacmd_hdr hdr;
union {
struct qeth_ipacmd_setdelip4 setdelip4;
struct qeth_ipacmd_setdelip6 setdelip6;
struct qeth_ipacmd_setdelipm setdelipm;
struct qeth_ipacmd_setassparms setassparms;
struct qeth_ipacmd_layer2setdelmac setdelmac;
struct qeth_ipacmd_layer2setdelvlan setdelvlan;
struct qeth_create_destroy_address create_destroy_addr;
struct qeth_ipacmd_setadpparms setadapterparms;
struct qeth_set_routing setrtg;
} data;
} __attribute__ ((packed));
/*
* special command for ARP processing.
* this is not included in setassparms command before, because we get
* problem with the size of struct qeth_ipacmd_setassparms otherwise
*/
enum qeth_ipa_arp_return_codes {
QETH_IPA_ARP_RC_SUCCESS = 0x0000,
QETH_IPA_ARP_RC_FAILED = 0x0001,
QETH_IPA_ARP_RC_NOTSUPP = 0x0002,
QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003,
QETH_IPA_ARP_RC_Q_NOTSUPP = 0x0004,
QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
};
extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
sizeof(struct qeth_ipacmd_setassparms_hdr))
#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
QETH_SETASS_BASE_LEN)
#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
sizeof(struct qeth_ipacmd_setadpparms_hdr))
#define QETH_SNMP_SETADP_CMDLENGTH 16
#define QETH_ARP_DATA_SIZE 3968
#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
/* Helper functions */
#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
(cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
/*****************************************************************************/
/* END OF IP Assist related definitions */
/*****************************************************************************/
extern unsigned char WRITE_CCW[];
extern unsigned char READ_CCW[];
extern unsigned char CM_ENABLE[];
#define CM_ENABLE_SIZE 0x63
#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer + 0x5b)
#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x13)
extern unsigned char CM_SETUP[];
#define CM_SETUP_SIZE 0x64
#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x1a)
extern unsigned char ULP_ENABLE[];
#define ULP_ENABLE_SIZE 0x6b
#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61)
#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c)
#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer + 0x62)
#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x13)
#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x1f)
#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x17)
#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x2b)
/* Layer 2 defintions */
#define QETH_PROT_LAYER2 0x08
#define QETH_PROT_TCPIP 0x03
#define QETH_PROT_OSN2 0x0a
#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
extern unsigned char ULP_SETUP[];
#define ULP_SETUP_SIZE 0x6c
#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
#define QETH_ULP_SETUP_CUA(buffer) (buffer + 0x68)
#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer + 0x6a)
#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x1a)
extern unsigned char DM_ACT[];
#define DM_ACT_SIZE 0x55
#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c)
#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51)
#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer + 4)
#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c)
#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20)
extern unsigned char IDX_ACTIVATE_READ[];
extern unsigned char IDX_ACTIVATE_WRITE[];
#define IDX_ACTIVATE_SIZE 0x22
#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer + 0x1e)
#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer + 0x20)
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \
*(buffer + *(buffer + 0x0b) + 0x11) + 0x07))
#define IS_IPA(buffer) \
((buffer) && \
(*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1))
#define ADDR_FRAME_TYPE_DIX 1
#define ADDR_FRAME_TYPE_802_3 2
#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10
#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20
#endif

View File

@ -0,0 +1,701 @@
/*
* drivers/s390/net/qeth_core_offl.c
*
* Copyright IBM Corp. 2007
* Author(s): Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#include <linux/errno.h>
#include <linux/ip.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/kernel.h>
#include <linux/tcp.h>
#include <net/tcp.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/ip6_checksum.h>
#include "qeth_core.h"
#include "qeth_core_mpc.h"
#include "qeth_core_offl.h"
int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
struct qeth_eddp_context *ctx)
{
int index = queue->next_buf_to_fill;
int elements_needed = ctx->num_elements;
int elements_in_buffer;
int skbs_in_buffer;
int buffers_needed = 0;
QETH_DBF_TEXT(trace, 5, "eddpcbfc");
while (elements_needed > 0) {
buffers_needed++;
if (atomic_read(&queue->bufs[index].state) !=
QETH_QDIO_BUF_EMPTY)
return -EBUSY;
elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
queue->bufs[index].next_element_to_fill;
skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
}
return buffers_needed;
}
static void qeth_eddp_free_context(struct qeth_eddp_context *ctx)
{
int i;
QETH_DBF_TEXT(trace, 5, "eddpfctx");
for (i = 0; i < ctx->num_pages; ++i)
free_page((unsigned long)ctx->pages[i]);
kfree(ctx->pages);
kfree(ctx->elements);
kfree(ctx);
}
static void qeth_eddp_get_context(struct qeth_eddp_context *ctx)
{
atomic_inc(&ctx->refcnt);
}
void qeth_eddp_put_context(struct qeth_eddp_context *ctx)
{
if (atomic_dec_return(&ctx->refcnt) == 0)
qeth_eddp_free_context(ctx);
}
EXPORT_SYMBOL_GPL(qeth_eddp_put_context);
void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
{
struct qeth_eddp_context_reference *ref;
QETH_DBF_TEXT(trace, 6, "eddprctx");
while (!list_empty(&buf->ctx_list)) {
ref = list_entry(buf->ctx_list.next,
struct qeth_eddp_context_reference, list);
qeth_eddp_put_context(ref->ctx);
list_del(&ref->list);
kfree(ref);
}
}
static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
struct qeth_eddp_context *ctx)
{
struct qeth_eddp_context_reference *ref;
QETH_DBF_TEXT(trace, 6, "eddprfcx");
ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
if (ref == NULL)
return -ENOMEM;
qeth_eddp_get_context(ctx);
ref->ctx = ctx;
list_add_tail(&ref->list, &buf->ctx_list);
return 0;
}
int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_eddp_context *ctx, int index)
{
struct qeth_qdio_out_buffer *buf = NULL;
struct qdio_buffer *buffer;
int elements = ctx->num_elements;
int element = 0;
int flush_cnt = 0;
int must_refcnt = 1;
int i;
QETH_DBF_TEXT(trace, 5, "eddpfibu");
while (elements > 0) {
buf = &queue->bufs[index];
if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) {
/* normally this should not happen since we checked for
* available elements in qeth_check_elements_for_context
*/
if (element == 0)
return -EBUSY;
else {
PRINT_WARN("could only partially fill eddp "
"buffer!\n");
goto out;
}
}
/* check if the whole next skb fits into current buffer */
if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
buf->next_element_to_fill)
< ctx->elements_per_skb){
/* no -> go to next buffer */
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
flush_cnt++;
/* new buffer, so we have to add ctx to buffer'ctx_list
* and increment ctx's refcnt */
must_refcnt = 1;
continue;
}
if (must_refcnt) {
must_refcnt = 0;
if (qeth_eddp_buf_ref_context(buf, ctx)) {
PRINT_WARN("no memory to create eddp context "
"reference\n");
goto out_check;
}
}
buffer = buf->buffer;
/* fill one skb into buffer */
for (i = 0; i < ctx->elements_per_skb; ++i) {
if (ctx->elements[element].length != 0) {
buffer->element[buf->next_element_to_fill].
addr = ctx->elements[element].addr;
buffer->element[buf->next_element_to_fill].
length = ctx->elements[element].length;
buffer->element[buf->next_element_to_fill].
flags = ctx->elements[element].flags;
buf->next_element_to_fill++;
}
element++;
elements--;
}
}
out_check:
if (!queue->do_pack) {
QETH_DBF_TEXT(trace, 6, "fillbfnp");
/* set state to PRIMED -> will be flushed */
if (buf->next_element_to_fill > 0) {
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
flush_cnt++;
}
} else {
if (queue->card->options.performance_stats)
queue->card->perf_stats.skbs_sent_pack++;
QETH_DBF_TEXT(trace, 6, "fillbfpa");
if (buf->next_element_to_fill >=
QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
/*
* packed buffer if full -> set state PRIMED
* -> will be flushed
*/
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
flush_cnt++;
}
}
out:
return flush_cnt;
}
static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
struct qeth_eddp_data *eddp, int data_len)
{
u8 *page;
int page_remainder;
int page_offset;
int pkt_len;
struct qeth_eddp_element *element;
QETH_DBF_TEXT(trace, 5, "eddpcrsh");
page = ctx->pages[ctx->offset >> PAGE_SHIFT];
page_offset = ctx->offset % PAGE_SIZE;
element = &ctx->elements[ctx->num_elements];
pkt_len = eddp->nhl + eddp->thl + data_len;
/* FIXME: layer2 and VLAN !!! */
if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
pkt_len += ETH_HLEN;
if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
pkt_len += VLAN_HLEN;
/* does complete packet fit in current page ? */
page_remainder = PAGE_SIZE - page_offset;
if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) {
/* no -> go to start of next page */
ctx->offset += page_remainder;
page = ctx->pages[ctx->offset >> PAGE_SHIFT];
page_offset = 0;
}
memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
element->addr = page + page_offset;
element->length = sizeof(struct qeth_hdr);
ctx->offset += sizeof(struct qeth_hdr);
page_offset += sizeof(struct qeth_hdr);
/* add mac header (?) */
if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
element->length += ETH_HLEN;
ctx->offset += ETH_HLEN;
page_offset += ETH_HLEN;
}
/* add VLAN tag */
if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
element->length += VLAN_HLEN;
ctx->offset += VLAN_HLEN;
page_offset += VLAN_HLEN;
}
/* add network header */
memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
element->length += eddp->nhl;
eddp->nh_in_ctx = page + page_offset;
ctx->offset += eddp->nhl;
page_offset += eddp->nhl;
/* add transport header */
memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
element->length += eddp->thl;
eddp->th_in_ctx = page + page_offset;
ctx->offset += eddp->thl;
}
static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp,
int len, __wsum *hcsum)
{
struct skb_frag_struct *frag;
int left_in_frag;
int copy_len;
u8 *src;
QETH_DBF_TEXT(trace, 5, "eddpcdtc");
if (skb_shinfo(eddp->skb)->nr_frags == 0) {
skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
dst, len);
*hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
*hcsum);
eddp->skb_offset += len;
} else {
while (len > 0) {
if (eddp->frag < 0) {
/* we're in skb->data */
left_in_frag = (eddp->skb->len -
eddp->skb->data_len)
- eddp->skb_offset;
src = eddp->skb->data + eddp->skb_offset;
} else {
frag = &skb_shinfo(eddp->skb)->frags[
eddp->frag];
left_in_frag = frag->size - eddp->frag_offset;
src = (u8 *)((page_to_pfn(frag->page) <<
PAGE_SHIFT) + frag->page_offset +
eddp->frag_offset);
}
if (left_in_frag <= 0) {
eddp->frag++;
eddp->frag_offset = 0;
continue;
}
copy_len = min(left_in_frag, len);
memcpy(dst, src, copy_len);
*hcsum = csum_partial(src, copy_len, *hcsum);
dst += copy_len;
eddp->frag_offset += copy_len;
eddp->skb_offset += copy_len;
len -= copy_len;
}
}
}
static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
struct qeth_eddp_data *eddp, int data_len, __wsum hcsum)
{
u8 *page;
int page_remainder;
int page_offset;
struct qeth_eddp_element *element;
int first_lap = 1;
QETH_DBF_TEXT(trace, 5, "eddpcsdt");
page = ctx->pages[ctx->offset >> PAGE_SHIFT];
page_offset = ctx->offset % PAGE_SIZE;
element = &ctx->elements[ctx->num_elements];
while (data_len) {
page_remainder = PAGE_SIZE - page_offset;
if (page_remainder < data_len) {
qeth_eddp_copy_data_tcp(page + page_offset, eddp,
page_remainder, &hcsum);
element->length += page_remainder;
if (first_lap)
element->flags = SBAL_FLAGS_FIRST_FRAG;
else
element->flags = SBAL_FLAGS_MIDDLE_FRAG;
ctx->num_elements++;
element++;
data_len -= page_remainder;
ctx->offset += page_remainder;
page = ctx->pages[ctx->offset >> PAGE_SHIFT];
page_offset = 0;
element->addr = page + page_offset;
} else {
qeth_eddp_copy_data_tcp(page + page_offset, eddp,
data_len, &hcsum);
element->length += data_len;
if (!first_lap)
element->flags = SBAL_FLAGS_LAST_FRAG;
ctx->num_elements++;
ctx->offset += data_len;
data_len = 0;
}
first_lap = 0;
}
((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
}
static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp,
int data_len)
{
__wsum phcsum; /* pseudo header checksum */
QETH_DBF_TEXT(trace, 5, "eddpckt4");
eddp->th.tcp.h.check = 0;
/* compute pseudo header checksum */
phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
eddp->thl + data_len, IPPROTO_TCP, 0);
/* compute checksum of tcp header */
return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
}
static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp,
int data_len)
{
__be32 proto;
__wsum phcsum; /* pseudo header checksum */
QETH_DBF_TEXT(trace, 5, "eddpckt6");
eddp->th.tcp.h.check = 0;
/* compute pseudo header checksum */
phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
sizeof(struct in6_addr), 0);
phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
sizeof(struct in6_addr), phcsum);
proto = htonl(IPPROTO_TCP);
phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
return phcsum;
}
static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh,
u8 *nh, u8 nhl, u8 *th, u8 thl)
{
struct qeth_eddp_data *eddp;
QETH_DBF_TEXT(trace, 5, "eddpcrda");
eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
if (eddp) {
eddp->nhl = nhl;
eddp->thl = thl;
memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
memcpy(&eddp->nh, nh, nhl);
memcpy(&eddp->th, th, thl);
eddp->frag = -1; /* initially we're in skb->data */
}
return eddp;
}
static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
struct qeth_eddp_data *eddp)
{
struct tcphdr *tcph;
int data_len;
__wsum hcsum;
QETH_DBF_TEXT(trace, 5, "eddpftcp");
eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
eddp->skb_offset += sizeof(struct ethhdr);
if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
eddp->skb_offset += VLAN_HLEN;
}
tcph = tcp_hdr(eddp->skb);
while (eddp->skb_offset < eddp->skb->len) {
data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
(int)(eddp->skb->len - eddp->skb_offset));
/* prepare qdio hdr */
if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
eddp->nhl + eddp->thl;
if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
} else
eddp->qh.hdr.l3.length = data_len + eddp->nhl +
eddp->thl;
/* prepare ip hdr */
if (eddp->skb->protocol == htons(ETH_P_IP)) {
eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
eddp->thl);
eddp->nh.ip4.h.check = 0;
eddp->nh.ip4.h.check =
ip_fast_csum((u8 *)&eddp->nh.ip4.h,
eddp->nh.ip4.h.ihl);
} else
eddp->nh.ip6.h.payload_len = htons(data_len +
eddp->thl);
/* prepare tcp hdr */
if (data_len == (eddp->skb->len - eddp->skb_offset)) {
/* last segment -> set FIN and PSH flags */
eddp->th.tcp.h.fin = tcph->fin;
eddp->th.tcp.h.psh = tcph->psh;
}
if (eddp->skb->protocol == htons(ETH_P_IP))
hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
else
hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
/* fill the next segment into the context */
qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
if (eddp->skb_offset >= eddp->skb->len)
break;
/* prepare headers for next round */
if (eddp->skb->protocol == htons(ETH_P_IP))
eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) +
data_len);
}
}
static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
struct sk_buff *skb, struct qeth_hdr *qhdr)
{
struct qeth_eddp_data *eddp = NULL;
QETH_DBF_TEXT(trace, 5, "eddpficx");
/* create our segmentation headers and copy original headers */
if (skb->protocol == htons(ETH_P_IP))
eddp = qeth_eddp_create_eddp_data(qhdr,
skb_network_header(skb),
ip_hdrlen(skb),
skb_transport_header(skb),
tcp_hdrlen(skb));
else
eddp = qeth_eddp_create_eddp_data(qhdr,
skb_network_header(skb),
sizeof(struct ipv6hdr),
skb_transport_header(skb),
tcp_hdrlen(skb));
if (eddp == NULL) {
QETH_DBF_TEXT(trace, 2, "eddpfcnm");
return -ENOMEM;
}
if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
skb_set_mac_header(skb, sizeof(struct qeth_hdr));
memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
eddp->vlan[0] = skb->protocol;
eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
}
}
/* the next flags will only be set on the last segment */
eddp->th.tcp.h.fin = 0;
eddp->th.tcp.h.psh = 0;
eddp->skb = skb;
/* begin segmentation and fill context */
__qeth_eddp_fill_context_tcp(ctx, eddp);
kfree(eddp);
return 0;
}
static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx,
struct sk_buff *skb, int hdr_len)
{
int skbs_per_page;
QETH_DBF_TEXT(trace, 5, "eddpcanp");
/* can we put multiple skbs in one page? */
skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
if (skbs_per_page > 1) {
ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
skbs_per_page + 1;
ctx->elements_per_skb = 1;
} else {
/* no -> how many elements per skb? */
ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
PAGE_SIZE) >> PAGE_SHIFT;
ctx->num_pages = ctx->elements_per_skb *
(skb_shinfo(skb)->gso_segs + 1);
}
ctx->num_elements = ctx->elements_per_skb *
(skb_shinfo(skb)->gso_segs + 1);
}
static struct qeth_eddp_context *qeth_eddp_create_context_generic(
struct qeth_card *card, struct sk_buff *skb, int hdr_len)
{
struct qeth_eddp_context *ctx = NULL;
u8 *addr;
int i;
QETH_DBF_TEXT(trace, 5, "creddpcg");
/* create the context and allocate pages */
ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
if (ctx == NULL) {
QETH_DBF_TEXT(trace, 2, "ceddpcn1");
return NULL;
}
ctx->type = QETH_LARGE_SEND_EDDP;
qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_TEXT(trace, 2, "ceddpcis");
kfree(ctx);
return NULL;
}
ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
if (ctx->pages == NULL) {
QETH_DBF_TEXT(trace, 2, "ceddpcn2");
kfree(ctx);
return NULL;
}
for (i = 0; i < ctx->num_pages; ++i) {
addr = (u8 *)get_zeroed_page(GFP_ATOMIC);
if (addr == NULL) {
QETH_DBF_TEXT(trace, 2, "ceddpcn3");
ctx->num_pages = i;
qeth_eddp_free_context(ctx);
return NULL;
}
ctx->pages[i] = addr;
}
ctx->elements = kcalloc(ctx->num_elements,
sizeof(struct qeth_eddp_element), GFP_ATOMIC);
if (ctx->elements == NULL) {
QETH_DBF_TEXT(trace, 2, "ceddpcn4");
qeth_eddp_free_context(ctx);
return NULL;
}
/* reset num_elements; will be incremented again in fill_buffer to
* reflect number of actually used elements */
ctx->num_elements = 0;
return ctx;
}
static struct qeth_eddp_context *qeth_eddp_create_context_tcp(
struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *qhdr)
{
struct qeth_eddp_context *ctx = NULL;
QETH_DBF_TEXT(trace, 5, "creddpct");
if (skb->protocol == htons(ETH_P_IP))
ctx = qeth_eddp_create_context_generic(card, skb,
(sizeof(struct qeth_hdr) +
ip_hdrlen(skb) +
tcp_hdrlen(skb)));
else if (skb->protocol == htons(ETH_P_IPV6))
ctx = qeth_eddp_create_context_generic(card, skb,
sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
tcp_hdrlen(skb));
else
QETH_DBF_TEXT(trace, 2, "cetcpinv");
if (ctx == NULL) {
QETH_DBF_TEXT(trace, 2, "creddpnl");
return NULL;
}
if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) {
QETH_DBF_TEXT(trace, 2, "ceddptfe");
qeth_eddp_free_context(ctx);
return NULL;
}
atomic_set(&ctx->refcnt, 1);
return ctx;
}
struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card,
struct sk_buff *skb, struct qeth_hdr *qhdr,
unsigned char sk_protocol)
{
QETH_DBF_TEXT(trace, 5, "creddpc");
switch (sk_protocol) {
case IPPROTO_TCP:
return qeth_eddp_create_context_tcp(card, skb, qhdr);
default:
QETH_DBF_TEXT(trace, 2, "eddpinvp");
}
return NULL;
}
EXPORT_SYMBOL_GPL(qeth_eddp_create_context);
void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr,
struct sk_buff *skb)
{
struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
struct tcphdr *tcph = tcp_hdr(skb);
struct iphdr *iph = ip_hdr(skb);
struct ipv6hdr *ip6h = ipv6_hdr(skb);
QETH_DBF_TEXT(trace, 5, "tsofhdr");
/*fix header to TSO values ...*/
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
/*set values which are fix for the first approach ...*/
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
hdr->ext.imb_hdr_no = 1;
hdr->ext.hdr_type = 1;
hdr->ext.hdr_version = 1;
hdr->ext.hdr_len = 28;
/*insert non-fix values */
hdr->ext.mss = skb_shinfo(skb)->gso_size;
hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
tcph->check = 0;
if (skb->protocol == ETH_P_IPV6) {
ip6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0, IPPROTO_TCP, 0);
} else {
/*OSA want us to set these values ...*/
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
iph->tot_len = 0;
iph->check = 0;
}
}
EXPORT_SYMBOL_GPL(qeth_tso_fill_header);
void qeth_tx_csum(struct sk_buff *skb)
{
int tlen;
if (skb->protocol == htons(ETH_P_IP)) {
tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
switch (ip_hdr(skb)->protocol) {
case IPPROTO_TCP:
tcp_hdr(skb)->check = 0;
tcp_hdr(skb)->check = csum_tcpudp_magic(
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
tlen, ip_hdr(skb)->protocol,
skb_checksum(skb, skb_transport_offset(skb),
tlen, 0));
break;
case IPPROTO_UDP:
udp_hdr(skb)->check = 0;
udp_hdr(skb)->check = csum_tcpudp_magic(
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
tlen, ip_hdr(skb)->protocol,
skb_checksum(skb, skb_transport_offset(skb),
tlen, 0));
break;
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
switch (ipv6_hdr(skb)->nexthdr) {
case IPPROTO_TCP:
tcp_hdr(skb)->check = 0;
tcp_hdr(skb)->check = csum_ipv6_magic(
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
ipv6_hdr(skb)->payload_len,
ipv6_hdr(skb)->nexthdr,
skb_checksum(skb, skb_transport_offset(skb),
ipv6_hdr(skb)->payload_len, 0));
break;
case IPPROTO_UDP:
udp_hdr(skb)->check = 0;
udp_hdr(skb)->check = csum_ipv6_magic(
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
ipv6_hdr(skb)->payload_len,
ipv6_hdr(skb)->nexthdr,
skb_checksum(skb, skb_transport_offset(skb),
ipv6_hdr(skb)->payload_len, 0));
break;
}
}
}
EXPORT_SYMBOL_GPL(qeth_tx_csum);

View File

@ -0,0 +1,76 @@
/*
* drivers/s390/net/qeth_core_offl.h
*
* Copyright IBM Corp. 2007
* Author(s): Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#ifndef __QETH_CORE_OFFL_H__
#define __QETH_CORE_OFFL_H__
struct qeth_eddp_element {
u32 flags;
u32 length;
void *addr;
};
struct qeth_eddp_context {
atomic_t refcnt;
enum qeth_large_send_types type;
int num_pages; /* # of allocated pages */
u8 **pages; /* pointers to pages */
int offset; /* offset in ctx during creation */
int num_elements; /* # of required 'SBALEs' */
struct qeth_eddp_element *elements; /* array of 'SBALEs' */
int elements_per_skb; /* # of 'SBALEs' per skb **/
};
struct qeth_eddp_context_reference {
struct list_head list;
struct qeth_eddp_context *ctx;
};
struct qeth_eddp_data {
struct qeth_hdr qh;
struct ethhdr mac;
__be16 vlan[2];
union {
struct {
struct iphdr h;
u8 options[40];
} ip4;
struct {
struct ipv6hdr h;
} ip6;
} nh;
u8 nhl;
void *nh_in_ctx; /* address of nh within the ctx */
union {
struct {
struct tcphdr h;
u8 options[40];
} tcp;
} th;
u8 thl;
void *th_in_ctx; /* address of th within the ctx */
struct sk_buff *skb;
int skb_offset;
int frag;
int frag_offset;
} __attribute__ ((packed));
extern struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *,
struct sk_buff *, struct qeth_hdr *, unsigned char);
extern void qeth_eddp_put_context(struct qeth_eddp_context *);
extern int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,
struct qeth_eddp_context *, int);
extern void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
extern int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
struct qeth_eddp_context *);
void qeth_tso_fill_header(struct qeth_card *, struct qeth_hdr *,
struct sk_buff *);
void qeth_tx_csum(struct sk_buff *skb);
#endif /* __QETH_CORE_EDDP_H__ */

View File

@ -0,0 +1,651 @@
/*
* drivers/s390/net/qeth_core_sys.c
*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#include <linux/list.h>
#include <linux/rwsem.h>
#include <asm/ebcdic.h>
#include "qeth_core.h"
static ssize_t qeth_dev_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
switch (card->state) {
case CARD_STATE_DOWN:
return sprintf(buf, "DOWN\n");
case CARD_STATE_HARDSETUP:
return sprintf(buf, "HARDSETUP\n");
case CARD_STATE_SOFTSETUP:
return sprintf(buf, "SOFTSETUP\n");
case CARD_STATE_UP:
if (card->lan_online)
return sprintf(buf, "UP (LAN ONLINE)\n");
else
return sprintf(buf, "UP (LAN OFFLINE)\n");
case CARD_STATE_RECOVER:
return sprintf(buf, "RECOVER\n");
default:
return sprintf(buf, "UNKNOWN\n");
}
}
static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
static ssize_t qeth_dev_chpid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%02X\n", card->info.chpid);
}
static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
static ssize_t qeth_dev_if_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
}
static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
static ssize_t qeth_dev_card_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
}
static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
static inline const char *qeth_get_bufsize_str(struct qeth_card *card)
{
if (card->qdio.in_buf_size == 16384)
return "16k";
else if (card->qdio.in_buf_size == 24576)
return "24k";
else if (card->qdio.in_buf_size == 32768)
return "32k";
else if (card->qdio.in_buf_size == 40960)
return "40k";
else
return "64k";
}
static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
}
static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
static ssize_t qeth_dev_portno_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->info.portno);
}
static ssize_t qeth_dev_portno_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
unsigned int portno;
if (!card)
return -EINVAL;
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER))
return -EPERM;
portno = simple_strtoul(buf, &tmp, 16);
if (portno > QETH_MAX_PORTNO) {
PRINT_WARN("portno 0x%X is out of range\n", portno);
return -EINVAL;
}
card->info.portno = portno;
return count;
}
static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
static ssize_t qeth_dev_portname_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
char portname[9] = {0, };
if (!card)
return -EINVAL;
if (card->info.portname_required) {
memcpy(portname, card->info.portname + 1, 8);
EBCASC(portname, 8);
return sprintf(buf, "%s\n", portname);
} else
return sprintf(buf, "no portname required\n");
}
static ssize_t qeth_dev_portname_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i;
if (!card)
return -EINVAL;
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER))
return -EPERM;
tmp = strsep((char **) &buf, "\n");
if ((strlen(tmp) > 8) || (strlen(tmp) == 0))
return -EINVAL;
card->info.portname[0] = strlen(tmp);
/* for beauty reasons */
for (i = 1; i < 9; i++)
card->info.portname[i] = ' ';
strcpy(card->info.portname + 1, tmp);
ASCEBC(card->info.portname + 1, 8);
return count;
}
static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
qeth_dev_portname_store);
static ssize_t qeth_dev_prioqing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_PREC:
return sprintf(buf, "%s\n", "by precedence");
case QETH_PRIO_Q_ING_TOS:
return sprintf(buf, "%s\n", "by type of service");
default:
return sprintf(buf, "always queue %i\n",
card->qdio.default_out_queue);
}
}
static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
if (!card)
return -EINVAL;
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER))
return -EPERM;
/* check if 1920 devices are supported ,
* if though we have to permit priority queueing
*/
if (card->qdio.no_out_queues == 1) {
PRINT_WARN("Priority queueing disabled due "
"to hardware limitations!\n");
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
return -EPERM;
}
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "prio_queueing_prec"))
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
else if (!strcmp(tmp, "prio_queueing_tos"))
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
else if (!strcmp(tmp, "no_prio_queueing:0")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 0;
} else if (!strcmp(tmp, "no_prio_queueing:1")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 1;
} else if (!strcmp(tmp, "no_prio_queueing:2")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2;
} else if (!strcmp(tmp, "no_prio_queueing:3")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3;
} else if (!strcmp(tmp, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else {
PRINT_WARN("Unknown queueing type '%s'\n", tmp);
return -EINVAL;
}
return count;
}
static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
qeth_dev_prioqing_store);
static ssize_t qeth_dev_bufcnt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
}
static ssize_t qeth_dev_bufcnt_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int cnt, old_cnt;
int rc;
if (!card)
return -EINVAL;
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER))
return -EPERM;
old_cnt = card->qdio.in_buf_pool.buf_count;
cnt = simple_strtoul(buf, &tmp, 10);
cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
if (old_cnt != cnt) {
rc = qeth_realloc_buffer_pool(card, cnt);
if (rc)
PRINT_WARN("Error (%d) while setting "
"buffer count.\n", rc);
}
return count;
}
static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
qeth_dev_bufcnt_store);
static ssize_t qeth_dev_recover_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i;
if (!card)
return -EINVAL;
if (card->state != CARD_STATE_UP)
return -EPERM;
i = simple_strtoul(buf, &tmp, 16);
if (i == 1)
qeth_schedule_recovery(card);
return count;
}
static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
static ssize_t qeth_dev_performance_stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
}
static ssize_t qeth_dev_performance_stats_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i;
if (!card)
return -EINVAL;
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1)) {
if (i == card->options.performance_stats)
return count;
card->options.performance_stats = i;
if (i == 0)
memset(&card->perf_stats, 0,
sizeof(struct qeth_perf_stats));
card->perf_stats.initial_rx_packets = card->stats.rx_packets;
card->perf_stats.initial_tx_packets = card->stats.tx_packets;
} else {
PRINT_WARN("performance_stats: write 0 or 1 to this file!\n");
return -EINVAL;
}
return count;
}
static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
qeth_dev_performance_stats_store);
static ssize_t qeth_dev_layer2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->options.layer2 ? 1:0);
}
static ssize_t qeth_dev_layer2_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i, rc;
enum qeth_discipline_id newdis;
if (!card)
return -EINVAL;
if (((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER)))
return -EPERM;
i = simple_strtoul(buf, &tmp, 16);
switch (i) {
case 0:
newdis = QETH_DISCIPLINE_LAYER3;
break;
case 1:
newdis = QETH_DISCIPLINE_LAYER2;
break;
default:
PRINT_WARN("layer2: write 0 or 1 to this file!\n");
return -EINVAL;
}
if (card->options.layer2 == newdis) {
return count;
} else {
if (card->discipline.ccwgdriver) {
card->discipline.ccwgdriver->remove(card->gdev);
qeth_core_free_discipline(card);
}
}
rc = qeth_core_load_discipline(card, newdis);
if (rc)
return rc;
rc = card->discipline.ccwgdriver->probe(card->gdev);
if (rc)
return rc;
return count;
}
static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
qeth_dev_layer2_store);
static ssize_t qeth_dev_large_send_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
switch (card->options.large_send) {
case QETH_LARGE_SEND_NO:
return sprintf(buf, "%s\n", "no");
case QETH_LARGE_SEND_EDDP:
return sprintf(buf, "%s\n", "EDDP");
case QETH_LARGE_SEND_TSO:
return sprintf(buf, "%s\n", "TSO");
default:
return sprintf(buf, "%s\n", "N/A");
}
}
static ssize_t qeth_dev_large_send_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
enum qeth_large_send_types type;
int rc = 0;
char *tmp;
if (!card)
return -EINVAL;
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "no")) {
type = QETH_LARGE_SEND_NO;
} else if (!strcmp(tmp, "EDDP")) {
type = QETH_LARGE_SEND_EDDP;
} else if (!strcmp(tmp, "TSO")) {
type = QETH_LARGE_SEND_TSO;
} else {
PRINT_WARN("large_send: invalid mode %s!\n", tmp);
return -EINVAL;
}
if (card->options.large_send == type)
return count;
rc = qeth_set_large_send(card, type);
if (rc)
return rc;
return count;
}
static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
qeth_dev_large_send_store);
static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
{
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", value);
}
static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
char *tmp;
int i;
if (!card)
return -EINVAL;
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER))
return -EPERM;
i = simple_strtoul(buf, &tmp, 10);
if (i <= max_value) {
*value = i;
} else {
PRINT_WARN("blkt total time: write values between"
" 0 and %d to this file!\n", max_value);
return -EINVAL;
}
return count;
}
static ssize_t qeth_dev_blkt_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
}
static ssize_t qeth_dev_blkt_total_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
&card->info.blkt.time_total, 1000);
}
static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
qeth_dev_blkt_total_store);
static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
}
static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
&card->info.blkt.inter_packet, 100);
}
static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
qeth_dev_blkt_inter_store);
static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_show(buf, card,
card->info.blkt.inter_packet_jumbo);
}
static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
&card->info.blkt.inter_packet_jumbo, 100);
}
static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
qeth_dev_blkt_inter_jumbo_store);
static struct attribute *qeth_blkt_device_attrs[] = {
&dev_attr_total.attr,
&dev_attr_inter.attr,
&dev_attr_inter_jumbo.attr,
NULL,
};
static struct attribute_group qeth_device_blkt_group = {
.name = "blkt",
.attrs = qeth_blkt_device_attrs,
};
static struct attribute *qeth_device_attrs[] = {
&dev_attr_state.attr,
&dev_attr_chpid.attr,
&dev_attr_if_name.attr,
&dev_attr_card_type.attr,
&dev_attr_inbuf_size.attr,
&dev_attr_portno.attr,
&dev_attr_portname.attr,
&dev_attr_priority_queueing.attr,
&dev_attr_buffer_count.attr,
&dev_attr_recover.attr,
&dev_attr_performance_stats.attr,
&dev_attr_layer2.attr,
&dev_attr_large_send.attr,
NULL,
};
static struct attribute_group qeth_device_attr_group = {
.attrs = qeth_device_attrs,
};
static struct attribute *qeth_osn_device_attrs[] = {
&dev_attr_state.attr,
&dev_attr_chpid.attr,
&dev_attr_if_name.attr,
&dev_attr_card_type.attr,
&dev_attr_buffer_count.attr,
&dev_attr_recover.attr,
NULL,
};
static struct attribute_group qeth_osn_device_attr_group = {
.attrs = qeth_osn_device_attrs,
};
int qeth_core_create_device_attributes(struct device *dev)
{
int ret;
ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group);
if (ret)
return ret;
ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group);
if (ret)
sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
return 0;
}
void qeth_core_remove_device_attributes(struct device *dev)
{
sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
}
int qeth_core_create_osn_attributes(struct device *dev)
{
return sysfs_create_group(&dev->kobj, &qeth_osn_device_attr_group);
}
void qeth_core_remove_osn_attributes(struct device *dev)
{
sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
return;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
/*
* drivers/s390/net/qeth_l3.h
*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#ifndef __QETH_L3_H__
#define __QETH_L3_H__
#include "qeth_core.h"
#define QETH_DBF_TEXT_(name, level, text...) \
do { \
if (qeth_dbf_passes(qeth_dbf_##name, level)) { \
char *dbf_txt_buf = get_cpu_var(qeth_l3_dbf_txt_buf); \
sprintf(dbf_txt_buf, text); \
debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \
put_cpu_var(qeth_l3_dbf_txt_buf); \
} \
} while (0)
DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
struct qeth_ipaddr {
struct list_head entry;
enum qeth_ip_types type;
enum qeth_ipa_setdelip_flags set_flags;
enum qeth_ipa_setdelip_flags del_flags;
int is_multicast;
int users;
enum qeth_prot_versions proto;
unsigned char mac[OSA_ADDR_LEN];
union {
struct {
unsigned int addr;
unsigned int mask;
} a4;
struct {
struct in6_addr addr;
unsigned int pfxlen;
} a6;
} u;
};
struct qeth_ipato_entry {
struct list_head entry;
enum qeth_prot_versions proto;
char addr[16];
int mask_bits;
};
void qeth_l3_ipaddr4_to_string(const __u8 *, char *);
int qeth_l3_string_to_ipaddr4(const char *, __u8 *);
void qeth_l3_ipaddr6_to_string(const __u8 *, char *);
int qeth_l3_string_to_ipaddr6(const char *, __u8 *);
void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
int qeth_l3_create_device_attributes(struct device *);
void qeth_l3_remove_device_attributes(struct device *);
int qeth_l3_setrouting_v4(struct qeth_card *);
int qeth_l3_setrouting_v6(struct qeth_card *);
int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
u8 *, int);
int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
const u8 *);
#endif /* __QETH_L3_H__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff