OpenCloudOS-Kernel/drivers/net/ibmveth.h

202 lines
6.4 KiB
C
Raw Normal View History

/**************************************************************************/
/* */
/* IBM eServer i/[Series Virtual Ethernet Device Driver */
/* Copyright (C) 2003 IBM Corp. */
/* Dave Larson (larson1@us.ibm.com) */
/* Santiago Leon (santil@us.ibm.com) */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
/* USA */
/* */
/**************************************************************************/
#ifndef _IBMVETH_H
#define _IBMVETH_H
#define IbmVethMaxSendFrags 6
/* constants for H_MULTICAST_CTRL */
#define IbmVethMcastReceptionModifyBit 0x80000UL
#define IbmVethMcastReceptionEnableBit 0x20000UL
#define IbmVethMcastFilterModifyBit 0x40000UL
#define IbmVethMcastFilterEnableBit 0x10000UL
#define IbmVethMcastEnableRecv (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
#define IbmVethMcastDisableRecv (IbmVethMcastReceptionModifyBit)
#define IbmVethMcastEnableFiltering (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
#define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
#define IbmVethMcastAddFilter 0x1UL
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
/* hcall macros */
#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
#define h_free_logical_lan(ua) \
plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
static inline long h_send_logical_lan(unsigned long unit_address,
unsigned long desc1, unsigned long desc2, unsigned long desc3,
unsigned long desc4, unsigned long desc5, unsigned long desc6,
unsigned long corellator_in, unsigned long *corellator_out)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
desc2, desc3, desc4, desc5, desc6, corellator_in);
*corellator_out = retbuf[0];
return rc;
}
static inline long h_illan_attributes(unsigned long unit_address,
unsigned long reset_mask, unsigned long set_mask,
unsigned long *ret_attributes)
{
long rc;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
reset_mask, set_mask);
*ret_attributes = retbuf[0];
return rc;
}
#define h_multicast_ctrl(ua, cmd, mac) \
plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
#define h_change_logical_lan_mac(ua, mac) \
plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
#define IbmVethNumBufferPools 5
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
#define IBMVETH_MAX_MTU 68
#define IBMVETH_MAX_POOL_COUNT 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 768, 256, 256, 256 };
static int pool_active[] = { 1, 1, 0, 0, 0};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
struct ibmveth_buff_pool {
u32 size;
u32 index;
u32 buff_size;
u32 threshold;
atomic_t available;
u32 consumer_index;
u32 producer_index;
u16 *free_map;
dma_addr_t *dma_addr;
struct sk_buff **skbuff;
int active;
struct kobject kobj;
};
struct ibmveth_rx_q {
u64 index;
u64 num_slots;
u64 toggle;
dma_addr_t queue_dma;
u32 queue_len;
struct ibmveth_rx_q_entry *queue_addr;
};
struct ibmveth_adapter {
struct vio_dev *vdev;
struct net_device *netdev;
[NET]: Make NAPI polling independent of struct net_device objects. Several devices have multiple independant RX queues per net device, and some have a single interrupt doorbell for several queues. In either case, it's easier to support layouts like that if the structure representing the poll is independant from the net device itself. The signature of the ->poll() call back goes from: int foo_poll(struct net_device *dev, int *budget) to int foo_poll(struct napi_struct *napi, int budget) The caller is returned the number of RX packets processed (or the number of "NAPI credits" consumed if you want to get abstract). The callee no longer messes around bumping dev->quota, *budget, etc. because that is all handled in the caller upon return. The napi_struct is to be embedded in the device driver private data structures. Furthermore, it is the driver's responsibility to disable all NAPI instances in it's ->stop() device close handler. Since the napi_struct is privatized into the driver's private data structures, only the driver knows how to get at all of the napi_struct instances it may have per-device. With lots of help and suggestions from Rusty Russell, Roland Dreier, Michael Chan, Jeff Garzik, and Jamal Hadi Salim. Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra, Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan. [ Ported to current tree and all drivers converted. Integrated Stephen's follow-on kerneldoc additions, and restored poll_list handling to the old style to fix mutual exclusion issues. -DaveM ] Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
struct napi_struct napi;
struct net_device_stats stats;
unsigned int mcastFilterSize;
unsigned long mac_addr;
void * buffer_list_addr;
void * filter_list_addr;
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
struct ibmveth_rx_q rx_queue;
int pool_config;
/* adapter specific stats */
u64 replenish_task_cycles;
u64 replenish_no_mem;
u64 replenish_add_buff_failure;
u64 replenish_add_buff_success;
u64 rx_invalid_buffer;
u64 rx_no_buffer;
u64 tx_multidesc_send;
u64 tx_linearized;
u64 tx_linearize_failed;
u64 tx_map_failed;
u64 tx_send_failed;
spinlock_t stats_lock;
};
struct ibmveth_buf_desc_fields {
u32 valid : 1;
u32 toggle : 1;
u32 reserved : 4;
u32 no_csum : 1;
u32 csum_good : 1;
u32 length : 24;
u32 address;
};
union ibmveth_buf_desc {
u64 desc;
struct ibmveth_buf_desc_fields fields;
};
struct ibmveth_illan_attributes_fields {
u32 reserved;
u32 reserved2 : 18;
u32 csum_offload_padded_pkt_support : 1;
u32 reserved3 : 1;
u32 trunk_priority : 4;
u32 reserved4 : 5;
u32 tcp_csum_offload_ipv6 : 1;
u32 tcp_csum_offload_ipv4 : 1;
u32 active_trunk : 1;
};
union ibmveth_illan_attributes {
u64 desc;
struct ibmveth_illan_attributes_fields fields;
};
struct ibmveth_rx_q_entry {
u16 toggle : 1;
u16 valid : 1;
u16 reserved : 4;
u16 no_csum : 1;
u16 csum_good : 1;
u16 reserved2 : 8;
u16 offset;
u32 length;
u64 correlator;
};
#endif /* _IBMVETH_H */