Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (95 commits) b44: GFP_DMA skb should not escape from driver korina: do not use IRQF_SHARED with IRQF_DISABLED korina: do not stop queue here korina: fix handling tx_chain_tail korina: do tx at the right position korina: do schedule napi after testing for it korina: rework korina_rx() for use with napi korina: disable napi on close and restart korina: reset resource buffer size to 1536 korina: fix usage of driver_data bnx2x: First slow path interrupt race bnx2x: MTU Filter bnx2x: Indirection table initialization index bnx2x: Missing brackets bnx2x: Fixing the doorbell size bnx2x: Endianness issues bnx2x: VLAN tagged packets without VLAN offload bnx2x: Protecting the link change indication bnx2x: Flow control updated before reporting the link bnx2x: Missing mask when calculating flow control ...
This commit is contained in:
commit
3feeba1e53
|
@ -292,7 +292,9 @@ isdn_net_unbind_channel(isdn_net_local * lp)
|
|||
lp->dialstate = 0;
|
||||
dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
|
||||
dev->st_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
|
||||
isdn_free_channel(lp->isdn_device, lp->isdn_channel, ISDN_USAGE_NET);
|
||||
if (lp->isdn_device != -1 && lp->isdn_channel != -1)
|
||||
isdn_free_channel(lp->isdn_device, lp->isdn_channel,
|
||||
ISDN_USAGE_NET);
|
||||
lp->flags &= ~ISDN_NET_CONNECTED;
|
||||
lp->isdn_device = -1;
|
||||
lp->isdn_channel = -1;
|
||||
|
@ -2513,7 +2515,6 @@ static const struct net_device_ops isdn_netdev_ops = {
|
|||
.ndo_stop = isdn_net_close,
|
||||
.ndo_do_ioctl = isdn_net_ioctl,
|
||||
|
||||
.ndo_validate_addr = NULL,
|
||||
.ndo_start_xmit = isdn_net_start_xmit,
|
||||
.ndo_get_stats = isdn_net_get_stats,
|
||||
.ndo_tx_timeout = isdn_net_tx_timeout,
|
||||
|
@ -2528,12 +2529,8 @@ static void _isdn_setup(struct net_device *dev)
|
|||
|
||||
ether_setup(dev);
|
||||
|
||||
dev->flags = IFF_NOARP | IFF_POINTOPOINT;
|
||||
/* Setup the generic properties */
|
||||
dev->mtu = 1500;
|
||||
dev->flags = IFF_NOARP|IFF_POINTOPOINT;
|
||||
dev->type = ARPHRD_ETHER;
|
||||
dev->addr_len = ETH_ALEN;
|
||||
dev->header_ops = NULL;
|
||||
dev->netdev_ops = &isdn_netdev_ops;
|
||||
|
||||
|
|
|
@ -646,7 +646,7 @@ static const struct net_device_ops etherh_netdev_ops = {
|
|||
.ndo_get_stats = ei_get_stats,
|
||||
.ndo_set_multicast_list = ei_set_multicast_list,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_addr = eth_set_mac_addr,
|
||||
.ndo_set_mac_address = eth_set_mac_addr,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ei_poll,
|
||||
|
|
|
@ -37,7 +37,10 @@ static int phy_debug = 0;
|
|||
#define __ei_open ax_ei_open
|
||||
#define __ei_close ax_ei_close
|
||||
#define __ei_poll ax_ei_poll
|
||||
#define __ei_start_xmit ax_ei_start_xmit
|
||||
#define __ei_tx_timeout ax_ei_tx_timeout
|
||||
#define __ei_get_stats ax_ei_get_stats
|
||||
#define __ei_set_multicast_list ax_ei_set_multicast_list
|
||||
#define __ei_interrupt ax_ei_interrupt
|
||||
#define ____alloc_ei_netdev ax__alloc_ei_netdev
|
||||
#define __NS8390_init ax_NS8390_init
|
||||
|
@ -623,6 +626,23 @@ static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom)
|
|||
}
|
||||
#endif
|
||||
|
||||
static const struct net_device_ops ax_netdev_ops = {
|
||||
.ndo_open = ax_open,
|
||||
.ndo_stop = ax_close,
|
||||
.ndo_do_ioctl = ax_ioctl,
|
||||
|
||||
.ndo_start_xmit = ax_ei_start_xmit,
|
||||
.ndo_tx_timeout = ax_ei_tx_timeout,
|
||||
.ndo_get_stats = ax_ei_get_stats,
|
||||
.ndo_set_multicast_list = ax_ei_set_multicast_list,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ax_ei_poll,
|
||||
#endif
|
||||
};
|
||||
|
||||
/* setup code */
|
||||
|
||||
static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
|
||||
|
@ -738,9 +758,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
|
|||
ei_status.get_8390_hdr = &ax_get_8390_hdr;
|
||||
ei_status.priv = 0;
|
||||
|
||||
dev->open = ax_open;
|
||||
dev->stop = ax_close;
|
||||
dev->do_ioctl = ax_ioctl;
|
||||
dev->netdev_ops = &ax_netdev_ops;
|
||||
dev->ethtool_ops = &ax_ethtool_ops;
|
||||
|
||||
ax->msg_enable = NETIF_MSG_LINK;
|
||||
|
@ -753,9 +771,6 @@ static int ax_init_dev(struct net_device *dev, int first_init)
|
|||
ax->mii.mdio_write = ax_phy_write;
|
||||
ax->mii.dev = dev;
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
dev->poll_controller = ax_ei_poll;
|
||||
#endif
|
||||
ax_NS8390_init(dev, 0);
|
||||
|
||||
if (first_init)
|
||||
|
|
|
@ -679,6 +679,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
|
|||
dev_kfree_skb_any(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
bp->force_copybreak = 1;
|
||||
}
|
||||
|
||||
rh = (struct rx_header *) skb->data;
|
||||
|
@ -800,7 +801,7 @@ static int b44_rx(struct b44 *bp, int budget)
|
|||
/* Omit CRC. */
|
||||
len -= 4;
|
||||
|
||||
if (len > RX_COPY_THRESHOLD) {
|
||||
if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
|
||||
int skb_size;
|
||||
skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
|
||||
if (skb_size < 0)
|
||||
|
@ -2152,6 +2153,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
|
|||
bp = netdev_priv(dev);
|
||||
bp->sdev = sdev;
|
||||
bp->dev = dev;
|
||||
bp->force_copybreak = 0;
|
||||
|
||||
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
|
||||
|
||||
|
|
|
@ -395,7 +395,7 @@ struct b44 {
|
|||
u32 rx_pending;
|
||||
u32 tx_pending;
|
||||
u8 phy_addr;
|
||||
|
||||
u8 force_copybreak;
|
||||
struct mii_if_info mii_if;
|
||||
};
|
||||
|
||||
|
|
|
@ -20,6 +20,11 @@
|
|||
* (you will need to reboot afterwards) */
|
||||
/* #define BNX2X_STOP_ON_ERROR */
|
||||
|
||||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||||
#define BCM_VLAN 1
|
||||
#endif
|
||||
|
||||
|
||||
/* error/debug prints */
|
||||
|
||||
#define DRV_MODULE_NAME "bnx2x"
|
||||
|
@ -78,11 +83,6 @@
|
|||
#endif
|
||||
|
||||
|
||||
#ifdef NETIF_F_HW_VLAN_TX
|
||||
#define BCM_VLAN 1
|
||||
#endif
|
||||
|
||||
|
||||
#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
|
||||
#define U64_HI(x) (u32)(((u64)(x)) >> 32)
|
||||
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
|
||||
|
@ -150,6 +150,9 @@ struct sw_rx_page {
|
|||
|
||||
#define PAGES_PER_SGE_SHIFT 0
|
||||
#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
|
||||
#define SGE_PAGE_SIZE PAGE_SIZE
|
||||
#define SGE_PAGE_SHIFT PAGE_SHIFT
|
||||
#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
|
||||
|
||||
#define BCM_RX_ETH_PAYLOAD_ALIGN 64
|
||||
|
||||
|
@ -736,7 +739,7 @@ struct bnx2x {
|
|||
struct bnx2x_fastpath fp[MAX_CONTEXT];
|
||||
void __iomem *regview;
|
||||
void __iomem *doorbells;
|
||||
#define BNX2X_DB_SIZE (16*2048)
|
||||
#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
|
||||
|
||||
struct net_device *dev;
|
||||
struct pci_dev *pdev;
|
||||
|
@ -801,6 +804,8 @@ struct bnx2x {
|
|||
#define TPA_ENABLE_FLAG 0x80
|
||||
#define NO_MCP_FLAG 0x100
|
||||
#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
|
||||
#define HW_VLAN_TX_FLAG 0x400
|
||||
#define HW_VLAN_RX_FLAG 0x800
|
||||
|
||||
int func;
|
||||
#define BP_PORT(bp) (bp->func % PORT_MAX)
|
||||
|
@ -811,7 +816,7 @@ struct bnx2x {
|
|||
int pm_cap;
|
||||
int pcie_cap;
|
||||
|
||||
struct work_struct sp_task;
|
||||
struct delayed_work sp_task;
|
||||
struct work_struct reset_task;
|
||||
|
||||
struct timer_list timer;
|
||||
|
|
|
@ -38,9 +38,7 @@
|
|||
#include <linux/time.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/mii.h>
|
||||
#ifdef NETIF_F_HW_VLAN_TX
|
||||
#include <linux/if_vlan.h>
|
||||
#endif
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/tcp.h>
|
||||
#include <net/checksum.h>
|
||||
|
@ -95,6 +93,7 @@ MODULE_PARM_DESC(debug, "default debug msglevel");
|
|||
module_param(use_multi, int, 0);
|
||||
MODULE_PARM_DESC(use_multi, "use per-CPU queues");
|
||||
#endif
|
||||
static struct workqueue_struct *bnx2x_wq;
|
||||
|
||||
enum bnx2x_board_type {
|
||||
BCM57710 = 0,
|
||||
|
@ -671,7 +670,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
|
|||
synchronize_irq(bp->pdev->irq);
|
||||
|
||||
/* make sure sp_task is not running */
|
||||
cancel_work_sync(&bp->sp_task);
|
||||
cancel_delayed_work(&bp->sp_task);
|
||||
flush_workqueue(bnx2x_wq);
|
||||
}
|
||||
|
||||
/* fast path */
|
||||
|
@ -972,7 +972,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
|
|||
return;
|
||||
|
||||
pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
|
||||
BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
|
||||
SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
|
||||
__free_pages(page, PAGES_PER_SGE_SHIFT);
|
||||
|
||||
sw_buf->page = NULL;
|
||||
|
@ -1000,7 +1000,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
|
|||
if (unlikely(page == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
|
||||
mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
|
||||
__free_pages(page, PAGES_PER_SGE_SHIFT);
|
||||
|
@ -1096,9 +1096,9 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
|
|||
struct eth_fast_path_rx_cqe *fp_cqe)
|
||||
{
|
||||
struct bnx2x *bp = fp->bp;
|
||||
u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
|
||||
u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
|
||||
le16_to_cpu(fp_cqe->len_on_bd)) >>
|
||||
BCM_PAGE_SHIFT;
|
||||
SGE_PAGE_SHIFT;
|
||||
u16 last_max, last_elem, first_elem;
|
||||
u16 delta = 0;
|
||||
u16 i;
|
||||
|
@ -1203,22 +1203,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||
u16 cqe_idx)
|
||||
{
|
||||
struct sw_rx_page *rx_pg, old_rx_pg;
|
||||
struct page *sge;
|
||||
u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
|
||||
u32 i, frag_len, frag_size, pages;
|
||||
int err;
|
||||
int j;
|
||||
|
||||
frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
|
||||
pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
|
||||
pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
|
||||
|
||||
/* This is needed in order to enable forwarding support */
|
||||
if (frag_size)
|
||||
skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
|
||||
skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
|
||||
max(frag_size, (u32)len_on_bd));
|
||||
|
||||
#ifdef BNX2X_STOP_ON_ERROR
|
||||
if (pages > 8*PAGES_PER_SGE) {
|
||||
if (pages >
|
||||
min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
|
||||
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
|
||||
pages, cqe_idx);
|
||||
BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
|
||||
|
@ -1234,9 +1234,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||
|
||||
/* FW gives the indices of the SGE as if the ring is an array
|
||||
(meaning that "next" element will consume 2 indices) */
|
||||
frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
|
||||
frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
|
||||
rx_pg = &fp->rx_page_ring[sge_idx];
|
||||
sge = rx_pg->page;
|
||||
old_rx_pg = *rx_pg;
|
||||
|
||||
/* If we fail to allocate a substitute page, we simply stop
|
||||
|
@ -1249,7 +1248,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||
|
||||
/* Unmap the page as we r going to pass it to the stack */
|
||||
pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
|
||||
BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
|
||||
SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
|
||||
|
||||
/* Add one frag and update the appropriate fields in the skb */
|
||||
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
|
||||
|
@ -1282,6 +1281,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||
if (likely(new_skb)) {
|
||||
/* fix ip xsum and give it to the stack */
|
||||
/* (no need to map the new skb) */
|
||||
#ifdef BCM_VLAN
|
||||
int is_vlan_cqe =
|
||||
(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
|
||||
PARSING_FLAGS_VLAN);
|
||||
int is_not_hwaccel_vlan_cqe =
|
||||
(is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
|
||||
#endif
|
||||
|
||||
prefetch(skb);
|
||||
prefetch(((char *)(skb)) + 128);
|
||||
|
@ -1306,6 +1312,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||
struct iphdr *iph;
|
||||
|
||||
iph = (struct iphdr *)skb->data;
|
||||
#ifdef BCM_VLAN
|
||||
/* If there is no Rx VLAN offloading -
|
||||
take VLAN tag into an account */
|
||||
if (unlikely(is_not_hwaccel_vlan_cqe))
|
||||
iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
|
||||
#endif
|
||||
iph->check = 0;
|
||||
iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
|
||||
}
|
||||
|
@ -1313,9 +1325,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||
if (!bnx2x_fill_frag_skb(bp, fp, skb,
|
||||
&cqe->fast_path_cqe, cqe_idx)) {
|
||||
#ifdef BCM_VLAN
|
||||
if ((bp->vlgrp != NULL) &&
|
||||
(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
|
||||
PARSING_FLAGS_VLAN))
|
||||
if ((bp->vlgrp != NULL) && is_vlan_cqe &&
|
||||
(!is_not_hwaccel_vlan_cqe))
|
||||
vlan_hwaccel_receive_skb(skb, bp->vlgrp,
|
||||
le16_to_cpu(cqe->fast_path_cqe.
|
||||
vlan_tag));
|
||||
|
@ -1355,11 +1366,23 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
|
|||
rx_prods.cqe_prod = rx_comp_prod;
|
||||
rx_prods.sge_prod = rx_sge_prod;
|
||||
|
||||
/*
|
||||
* Make sure that the BD and SGE data is updated before updating the
|
||||
* producers since FW might read the BD/SGE right after the producer
|
||||
* is updated.
|
||||
* This is only applicable for weak-ordered memory model archs such
|
||||
* as IA-64. The following barrier is also mandatory since FW will
|
||||
* assumes BDs must have buffers.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
|
||||
REG_WR(bp, BAR_TSTRORM_INTMEM +
|
||||
TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
|
||||
((u32 *)&rx_prods)[i]);
|
||||
|
||||
mmiowb(); /* keep prod updates ordered */
|
||||
|
||||
DP(NETIF_MSG_RX_STATUS,
|
||||
"Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
|
||||
bd_prod, rx_comp_prod, rx_sge_prod);
|
||||
|
@ -1415,7 +1438,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
|||
DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
|
||||
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
|
||||
cqe_fp_flags, cqe->fast_path_cqe.status_flags,
|
||||
cqe->fast_path_cqe.rss_hash_result,
|
||||
le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
|
||||
le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
|
||||
le16_to_cpu(cqe->fast_path_cqe.pkt_len));
|
||||
|
||||
|
@ -1547,7 +1570,7 @@ reuse_rx:
|
|||
}
|
||||
|
||||
#ifdef BCM_VLAN
|
||||
if ((bp->vlgrp != NULL) &&
|
||||
if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
|
||||
(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
|
||||
PARSING_FLAGS_VLAN))
|
||||
vlan_hwaccel_receive_skb(skb, bp->vlgrp,
|
||||
|
@ -1580,7 +1603,6 @@ next_cqe:
|
|||
/* Update producers */
|
||||
bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
|
||||
fp->rx_sge_prod);
|
||||
mmiowb(); /* keep prod updates ordered */
|
||||
|
||||
fp->rx_pkt += rx_pkt;
|
||||
fp->rx_calls++;
|
||||
|
@ -1660,7 +1682,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
|
|||
|
||||
|
||||
if (unlikely(status & 0x1)) {
|
||||
schedule_work(&bp->sp_task);
|
||||
queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
|
||||
|
||||
status &= ~0x1;
|
||||
if (!status)
|
||||
|
@ -1887,7 +1909,8 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
|
|||
|
||||
static void bnx2x_calc_fc_adv(struct bnx2x *bp)
|
||||
{
|
||||
switch (bp->link_vars.ieee_fc) {
|
||||
switch (bp->link_vars.ieee_fc &
|
||||
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
|
||||
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
|
||||
bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
|
||||
ADVERTISED_Pause);
|
||||
|
@ -1957,10 +1980,11 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
|
|||
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
|
||||
bnx2x_release_phy_lock(bp);
|
||||
|
||||
bnx2x_calc_fc_adv(bp);
|
||||
|
||||
if (bp->link_vars.link_up)
|
||||
bnx2x_link_report(bp);
|
||||
|
||||
bnx2x_calc_fc_adv(bp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -2220,9 +2244,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
|
|||
/* Make sure that we are synced with the current statistics */
|
||||
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
|
||||
|
||||
bnx2x_acquire_phy_lock(bp);
|
||||
bnx2x_link_update(&bp->link_params, &bp->link_vars);
|
||||
bnx2x_release_phy_lock(bp);
|
||||
|
||||
if (bp->link_vars.link_up) {
|
||||
|
||||
|
@ -2471,6 +2493,8 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
|
|||
if (asserted & ATTN_HARD_WIRED_MASK) {
|
||||
if (asserted & ATTN_NIG_FOR_FUNC) {
|
||||
|
||||
bnx2x_acquire_phy_lock(bp);
|
||||
|
||||
/* save nig interrupt mask */
|
||||
bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
|
||||
REG_WR(bp, nig_int_mask_addr, 0);
|
||||
|
@ -2526,8 +2550,10 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
|
|||
REG_WR(bp, hc_addr, asserted);
|
||||
|
||||
/* now set back the mask */
|
||||
if (asserted & ATTN_NIG_FOR_FUNC)
|
||||
if (asserted & ATTN_NIG_FOR_FUNC) {
|
||||
REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
|
||||
bnx2x_release_phy_lock(bp);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
|
||||
|
@ -2795,8 +2821,10 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
|
|||
static void bnx2x_attn_int(struct bnx2x *bp)
|
||||
{
|
||||
/* read local copy of bits */
|
||||
u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
|
||||
u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
|
||||
u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
|
||||
attn_bits);
|
||||
u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
|
||||
attn_bits_ack);
|
||||
u32 attn_state = bp->attn_state;
|
||||
|
||||
/* look for changed bits */
|
||||
|
@ -2820,7 +2848,7 @@ static void bnx2x_attn_int(struct bnx2x *bp)
|
|||
|
||||
static void bnx2x_sp_task(struct work_struct *work)
|
||||
{
|
||||
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
|
||||
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
|
||||
u16 status;
|
||||
|
||||
|
||||
|
@ -2844,7 +2872,7 @@ static void bnx2x_sp_task(struct work_struct *work)
|
|||
if (status & 0x2)
|
||||
bp->stats_pending = 0;
|
||||
|
||||
bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
|
||||
bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
|
||||
IGU_INT_NOP, 1);
|
||||
bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
|
||||
IGU_INT_NOP, 1);
|
||||
|
@ -2875,7 +2903,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
|
|||
return IRQ_HANDLED;
|
||||
#endif
|
||||
|
||||
schedule_work(&bp->sp_task);
|
||||
queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -2892,7 +2920,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
|
|||
#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
|
||||
do { \
|
||||
s_lo += a_lo; \
|
||||
s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
|
||||
s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
|
||||
} while (0)
|
||||
|
||||
/* difference = minuend - subtrahend */
|
||||
|
@ -4496,7 +4524,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
|
|||
|
||||
static void bnx2x_init_ind_table(struct bnx2x *bp)
|
||||
{
|
||||
int port = BP_PORT(bp);
|
||||
int func = BP_FUNC(bp);
|
||||
int i;
|
||||
|
||||
if (!is_multi(bp))
|
||||
|
@ -4505,10 +4533,8 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
|
|||
DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
|
||||
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
|
||||
REG_WR8(bp, BAR_TSTRORM_INTMEM +
|
||||
TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
|
||||
i % bp->num_queues);
|
||||
|
||||
REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
|
||||
TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
|
||||
BP_CL_ID(bp) + (i % bp->num_queues));
|
||||
}
|
||||
|
||||
static void bnx2x_set_client_config(struct bnx2x *bp)
|
||||
|
@ -4517,12 +4543,12 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
|
|||
int port = BP_PORT(bp);
|
||||
int i;
|
||||
|
||||
tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
|
||||
tstorm_client.mtu = bp->dev->mtu;
|
||||
tstorm_client.statistics_counter_id = BP_CL_ID(bp);
|
||||
tstorm_client.config_flags =
|
||||
TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
|
||||
#ifdef BCM_VLAN
|
||||
if (bp->rx_mode && bp->vlgrp) {
|
||||
if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
|
||||
tstorm_client.config_flags |=
|
||||
TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
|
||||
DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
|
||||
|
@ -4531,7 +4557,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
|
|||
|
||||
if (bp->flags & TPA_ENABLE_FLAG) {
|
||||
tstorm_client.max_sges_for_packet =
|
||||
BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
|
||||
SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
|
||||
tstorm_client.max_sges_for_packet =
|
||||
((tstorm_client.max_sges_for_packet +
|
||||
PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
|
||||
|
@ -4714,10 +4740,11 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
|
|||
bp->e1hov);
|
||||
}
|
||||
|
||||
/* Init CQ ring mapping and aggregation size */
|
||||
max_agg_size = min((u32)(bp->rx_buf_size +
|
||||
8*BCM_PAGE_SIZE*PAGES_PER_SGE),
|
||||
(u32)0xffff);
|
||||
/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
|
||||
max_agg_size =
|
||||
min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
|
||||
SGE_PAGE_SIZE * PAGES_PER_SGE),
|
||||
(u32)0xffff);
|
||||
for_each_queue(bp, i) {
|
||||
struct bnx2x_fastpath *fp = &bp->fp[i];
|
||||
|
||||
|
@ -4785,6 +4812,15 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
|
|||
bnx2x_init_context(bp);
|
||||
bnx2x_init_internal(bp, load_code);
|
||||
bnx2x_init_ind_table(bp);
|
||||
bnx2x_stats_init(bp);
|
||||
|
||||
/* At this point, we are ready for interrupts */
|
||||
atomic_set(&bp->intr_sem, 0);
|
||||
|
||||
/* flush all before enabling interrupts */
|
||||
mb();
|
||||
mmiowb();
|
||||
|
||||
bnx2x_int_enable(bp);
|
||||
}
|
||||
|
||||
|
@ -5134,7 +5170,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
|
|||
REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
|
||||
REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
|
||||
REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
|
||||
REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
|
||||
|
||||
/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
|
||||
REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
|
||||
|
@ -5212,6 +5247,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
|
|||
}
|
||||
|
||||
bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
|
||||
REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
|
||||
/* set NIC mode */
|
||||
REG_WR(bp, PRS_REG_NIC_MODE, 1);
|
||||
if (CHIP_IS_E1H(bp))
|
||||
|
@ -6393,17 +6429,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
|||
}
|
||||
}
|
||||
|
||||
bnx2x_stats_init(bp);
|
||||
|
||||
bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
|
||||
|
||||
/* Enable Rx interrupt handling before sending the ramrod
|
||||
as it's completed on Rx FP queue */
|
||||
bnx2x_napi_enable(bp);
|
||||
|
||||
/* Enable interrupt handling */
|
||||
atomic_set(&bp->intr_sem, 0);
|
||||
|
||||
rc = bnx2x_setup_leading(bp);
|
||||
if (rc) {
|
||||
BNX2X_ERR("Setup leading failed!\n");
|
||||
|
@ -7501,7 +7528,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
|
|||
|
||||
mutex_init(&bp->port.phy_mutex);
|
||||
|
||||
INIT_WORK(&bp->sp_task, bnx2x_sp_task);
|
||||
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
|
||||
INIT_WORK(&bp->reset_task, bnx2x_reset_task);
|
||||
|
||||
rc = bnx2x_get_hwinfo(bp);
|
||||
|
@ -8727,6 +8754,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
|
|||
tx_bd->general_data = ((UNICAST_ADDRESS <<
|
||||
ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
|
||||
|
||||
wmb();
|
||||
|
||||
fp->hw_tx_prods->bds_prod =
|
||||
cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
|
||||
mb(); /* FW restriction: must not reorder writing nbd and packets */
|
||||
|
@ -8778,7 +8807,6 @@ test_loopback_rx_exit:
|
|||
/* Update producers */
|
||||
bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
|
||||
fp->rx_sge_prod);
|
||||
mmiowb(); /* keep prod updates ordered */
|
||||
|
||||
test_loopback_exit:
|
||||
bp->link_params.loopback_mode = LOOPBACK_NONE;
|
||||
|
@ -9549,11 +9577,14 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
"sending pkt %u @%p next_idx %u bd %u @%p\n",
|
||||
pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
|
||||
|
||||
if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
|
||||
#ifdef BCM_VLAN
|
||||
if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
|
||||
(bp->flags & HW_VLAN_TX_FLAG)) {
|
||||
tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
|
||||
tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
|
||||
vlan_off += 4;
|
||||
} else
|
||||
#endif
|
||||
tx_bd->vlan = cpu_to_le16(pkt_prod);
|
||||
|
||||
if (xmit_type) {
|
||||
|
@ -9705,6 +9736,15 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
|
||||
|
||||
/*
|
||||
* Make sure that the BD data is updated before updating the producer
|
||||
* since FW might read the BD right after the producer is updated.
|
||||
* This is only applicable for weak-ordered memory model archs such
|
||||
* as IA-64. The following barrier is also mandatory since FW will
|
||||
* assumes packets must have BDs.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
fp->hw_tx_prods->bds_prod =
|
||||
cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
|
||||
mb(); /* FW restriction: must not reorder writing nbd and packets */
|
||||
|
@ -9718,6 +9758,9 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
dev->trans_start = jiffies;
|
||||
|
||||
if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
|
||||
/* We want bnx2x_tx_int to "see" the updated tx_bd_prod
|
||||
if we put Tx into XOFF state. */
|
||||
smp_mb();
|
||||
netif_stop_queue(dev);
|
||||
bp->eth_stats.driver_xoff++;
|
||||
if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
|
||||
|
@ -9987,6 +10030,16 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
|
|||
struct bnx2x *bp = netdev_priv(dev);
|
||||
|
||||
bp->vlgrp = vlgrp;
|
||||
|
||||
/* Set flags according to the required capabilities */
|
||||
bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
|
||||
|
||||
if (dev->features & NETIF_F_HW_VLAN_TX)
|
||||
bp->flags |= HW_VLAN_TX_FLAG;
|
||||
|
||||
if (dev->features & NETIF_F_HW_VLAN_RX)
|
||||
bp->flags |= HW_VLAN_RX_FLAG;
|
||||
|
||||
if (netif_running(dev))
|
||||
bnx2x_set_client_config(bp);
|
||||
}
|
||||
|
@ -10143,6 +10196,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
|
|||
dev->features |= NETIF_F_HIGHDMA;
|
||||
#ifdef BCM_VLAN
|
||||
dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
|
||||
bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
|
||||
#endif
|
||||
dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
|
||||
dev->features |= NETIF_F_TSO6;
|
||||
|
@ -10519,12 +10573,20 @@ static struct pci_driver bnx2x_pci_driver = {
|
|||
|
||||
static int __init bnx2x_init(void)
|
||||
{
|
||||
bnx2x_wq = create_singlethread_workqueue("bnx2x");
|
||||
if (bnx2x_wq == NULL) {
|
||||
printk(KERN_ERR PFX "Cannot create workqueue\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return pci_register_driver(&bnx2x_pci_driver);
|
||||
}
|
||||
|
||||
static void __exit bnx2x_cleanup(void)
|
||||
{
|
||||
pci_unregister_driver(&bnx2x_pci_driver);
|
||||
|
||||
destroy_workqueue(bnx2x_wq);
|
||||
}
|
||||
|
||||
module_init(bnx2x_init);
|
||||
|
|
|
@ -795,6 +795,7 @@ static int fs_enet_open(struct net_device *dev)
|
|||
|
||||
err = fs_init_phy(dev);
|
||||
if (err) {
|
||||
free_irq(fep->interrupt, dev);
|
||||
if (fep->fpi->use_napi)
|
||||
napi_disable(&fep->napi);
|
||||
return err;
|
||||
|
|
|
@ -1622,10 +1622,18 @@ static int gfar_clean_tx_ring(struct net_device *dev)
|
|||
static void gfar_schedule_cleanup(struct net_device *dev)
|
||||
{
|
||||
struct gfar_private *priv = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->txlock, flags);
|
||||
spin_lock(&priv->rxlock);
|
||||
|
||||
if (netif_rx_schedule_prep(&priv->napi)) {
|
||||
gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
|
||||
__netif_rx_schedule(&priv->napi);
|
||||
}
|
||||
|
||||
spin_unlock(&priv->rxlock);
|
||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
||||
}
|
||||
|
||||
/* Interrupt Handler for Transmit complete */
|
||||
|
|
|
@ -613,7 +613,9 @@ static int __devinit mal_probe(struct of_device *ofdev,
|
|||
INIT_LIST_HEAD(&mal->list);
|
||||
spin_lock_init(&mal->lock);
|
||||
|
||||
netif_napi_add(NULL, &mal->napi, mal_poll,
|
||||
init_dummy_netdev(&mal->dummy_dev);
|
||||
|
||||
netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
|
||||
CONFIG_IBM_NEW_EMAC_POLL_WEIGHT);
|
||||
|
||||
/* Load power-on reset defaults */
|
||||
|
|
|
@ -214,6 +214,8 @@ struct mal_instance {
|
|||
int index;
|
||||
spinlock_t lock;
|
||||
|
||||
struct net_device dummy_dev;
|
||||
|
||||
unsigned int features;
|
||||
};
|
||||
|
||||
|
|
|
@ -1073,7 +1073,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
|
|||
{
|
||||
unsigned int i;
|
||||
int ret;
|
||||
char stir421x_fw_name[11];
|
||||
char stir421x_fw_name[12];
|
||||
const struct firmware *fw;
|
||||
const unsigned char *fw_version_ptr; /* pointer to version string */
|
||||
unsigned long fw_version = 0;
|
||||
|
|
|
@ -84,7 +84,10 @@
|
|||
#define KORINA_NUM_RDS 64 /* number of receive descriptors */
|
||||
#define KORINA_NUM_TDS 64 /* number of transmit descriptors */
|
||||
|
||||
#define KORINA_RBSIZE 536 /* size of one resource buffer = Ether MTU */
|
||||
/* KORINA_RBSIZE is the hardware's default maximum receive
|
||||
* frame size in bytes. Having this hardcoded means that there
|
||||
* is no support for MTU sizes greater than 1500. */
|
||||
#define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */
|
||||
#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
|
||||
#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
|
||||
#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
|
||||
|
@ -196,7 +199,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
struct korina_private *lp = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
u32 length;
|
||||
u32 chain_index;
|
||||
u32 chain_prev, chain_next;
|
||||
struct dma_desc *td;
|
||||
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
|
@ -228,8 +231,8 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
/* Setup the transmit descriptor. */
|
||||
dma_cache_inv((u32) td, sizeof(*td));
|
||||
td->ca = CPHYSADDR(skb->data);
|
||||
chain_index = (lp->tx_chain_tail - 1) &
|
||||
KORINA_TDS_MASK;
|
||||
chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
|
||||
chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
|
||||
|
||||
if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
|
||||
if (lp->tx_chain_status == desc_empty) {
|
||||
|
@ -237,7 +240,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
td->control = DMA_COUNT(length) |
|
||||
DMA_DESC_COF | DMA_DESC_IOF;
|
||||
/* Move tail */
|
||||
lp->tx_chain_tail = chain_index;
|
||||
lp->tx_chain_tail = chain_next;
|
||||
/* Write to NDPTR */
|
||||
writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
|
||||
&lp->tx_dma_regs->dmandptr);
|
||||
|
@ -248,12 +251,12 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
td->control = DMA_COUNT(length) |
|
||||
DMA_DESC_COF | DMA_DESC_IOF;
|
||||
/* Link to prev */
|
||||
lp->td_ring[chain_index].control &=
|
||||
lp->td_ring[chain_prev].control &=
|
||||
~DMA_DESC_COF;
|
||||
/* Link to prev */
|
||||
lp->td_ring[chain_index].link = CPHYSADDR(td);
|
||||
lp->td_ring[chain_prev].link = CPHYSADDR(td);
|
||||
/* Move tail */
|
||||
lp->tx_chain_tail = chain_index;
|
||||
lp->tx_chain_tail = chain_next;
|
||||
/* Write to NDPTR */
|
||||
writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
|
||||
&(lp->tx_dma_regs->dmandptr));
|
||||
|
@ -267,17 +270,16 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
td->control = DMA_COUNT(length) |
|
||||
DMA_DESC_COF | DMA_DESC_IOF;
|
||||
/* Move tail */
|
||||
lp->tx_chain_tail = chain_index;
|
||||
lp->tx_chain_tail = chain_next;
|
||||
lp->tx_chain_status = desc_filled;
|
||||
netif_stop_queue(dev);
|
||||
} else {
|
||||
/* Update tail */
|
||||
td->control = DMA_COUNT(length) |
|
||||
DMA_DESC_COF | DMA_DESC_IOF;
|
||||
lp->td_ring[chain_index].control &=
|
||||
lp->td_ring[chain_prev].control &=
|
||||
~DMA_DESC_COF;
|
||||
lp->td_ring[chain_index].link = CPHYSADDR(td);
|
||||
lp->tx_chain_tail = chain_index;
|
||||
lp->td_ring[chain_prev].link = CPHYSADDR(td);
|
||||
lp->tx_chain_tail = chain_next;
|
||||
}
|
||||
}
|
||||
dma_cache_wback((u32) td, sizeof(*td));
|
||||
|
@ -327,13 +329,13 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
|
|||
|
||||
dmas = readl(&lp->rx_dma_regs->dmas);
|
||||
if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
|
||||
netif_rx_schedule_prep(&lp->napi);
|
||||
|
||||
dmasm = readl(&lp->rx_dma_regs->dmasm);
|
||||
writel(dmasm | (DMA_STAT_DONE |
|
||||
DMA_STAT_HALT | DMA_STAT_ERR),
|
||||
&lp->rx_dma_regs->dmasm);
|
||||
|
||||
netif_rx_schedule(&lp->napi);
|
||||
|
||||
if (dmas & DMA_STAT_ERR)
|
||||
printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
|
||||
|
||||
|
@ -350,15 +352,20 @@ static int korina_rx(struct net_device *dev, int limit)
|
|||
struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
|
||||
struct sk_buff *skb, *skb_new;
|
||||
u8 *pkt_buf;
|
||||
u32 devcs, pkt_len, dmas, rx_free_desc;
|
||||
u32 devcs, pkt_len, dmas;
|
||||
int count;
|
||||
|
||||
dma_cache_inv((u32)rd, sizeof(*rd));
|
||||
|
||||
for (count = 0; count < limit; count++) {
|
||||
skb = lp->rx_skb[lp->rx_next_done];
|
||||
skb_new = NULL;
|
||||
|
||||
devcs = rd->devcs;
|
||||
|
||||
if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
|
||||
break;
|
||||
|
||||
/* Update statistics counters */
|
||||
if (devcs & ETH_RX_CRC)
|
||||
dev->stats.rx_crc_errors++;
|
||||
|
@ -381,63 +388,55 @@ static int korina_rx(struct net_device *dev, int limit)
|
|||
* in Rc32434 (errata ref #077) */
|
||||
dev->stats.rx_errors++;
|
||||
dev->stats.rx_dropped++;
|
||||
}
|
||||
|
||||
while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
|
||||
/* init the var. used for the later
|
||||
* operations within the while loop */
|
||||
skb_new = NULL;
|
||||
} else if ((devcs & ETH_RX_ROK)) {
|
||||
pkt_len = RCVPKT_LENGTH(devcs);
|
||||
skb = lp->rx_skb[lp->rx_next_done];
|
||||
|
||||
if ((devcs & ETH_RX_ROK)) {
|
||||
/* must be the (first and) last
|
||||
* descriptor then */
|
||||
pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
|
||||
/* must be the (first and) last
|
||||
* descriptor then */
|
||||
pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
|
||||
|
||||
/* invalidate the cache */
|
||||
dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
|
||||
/* invalidate the cache */
|
||||
dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
|
||||
|
||||
/* Malloc up new buffer. */
|
||||
skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
|
||||
/* Malloc up new buffer. */
|
||||
skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
|
||||
|
||||
if (!skb_new)
|
||||
break;
|
||||
/* Do not count the CRC */
|
||||
skb_put(skb, pkt_len - 4);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
if (!skb_new)
|
||||
break;
|
||||
/* Do not count the CRC */
|
||||
skb_put(skb, pkt_len - 4);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
/* Pass the packet to upper layers */
|
||||
netif_receive_skb(skb);
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += pkt_len;
|
||||
/* Pass the packet to upper layers */
|
||||
netif_receive_skb(skb);
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += pkt_len;
|
||||
|
||||
/* Update the mcast stats */
|
||||
if (devcs & ETH_RX_MP)
|
||||
dev->stats.multicast++;
|
||||
/* Update the mcast stats */
|
||||
if (devcs & ETH_RX_MP)
|
||||
dev->stats.multicast++;
|
||||
|
||||
lp->rx_skb[lp->rx_next_done] = skb_new;
|
||||
}
|
||||
|
||||
rd->devcs = 0;
|
||||
|
||||
/* Restore descriptor's curr_addr */
|
||||
if (skb_new)
|
||||
rd->ca = CPHYSADDR(skb_new->data);
|
||||
else
|
||||
rd->ca = CPHYSADDR(skb->data);
|
||||
|
||||
rd->control = DMA_COUNT(KORINA_RBSIZE) |
|
||||
DMA_DESC_COD | DMA_DESC_IOD;
|
||||
lp->rd_ring[(lp->rx_next_done - 1) &
|
||||
KORINA_RDS_MASK].control &=
|
||||
~DMA_DESC_COD;
|
||||
|
||||
lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
|
||||
dma_cache_wback((u32)rd, sizeof(*rd));
|
||||
rd = &lp->rd_ring[lp->rx_next_done];
|
||||
writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
|
||||
lp->rx_skb[lp->rx_next_done] = skb_new;
|
||||
}
|
||||
|
||||
rd->devcs = 0;
|
||||
|
||||
/* Restore descriptor's curr_addr */
|
||||
if (skb_new)
|
||||
rd->ca = CPHYSADDR(skb_new->data);
|
||||
else
|
||||
rd->ca = CPHYSADDR(skb->data);
|
||||
|
||||
rd->control = DMA_COUNT(KORINA_RBSIZE) |
|
||||
DMA_DESC_COD | DMA_DESC_IOD;
|
||||
lp->rd_ring[(lp->rx_next_done - 1) &
|
||||
KORINA_RDS_MASK].control &=
|
||||
~DMA_DESC_COD;
|
||||
|
||||
lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
|
||||
dma_cache_wback((u32)rd, sizeof(*rd));
|
||||
rd = &lp->rd_ring[lp->rx_next_done];
|
||||
writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
|
||||
}
|
||||
|
||||
dmas = readl(&lp->rx_dma_regs->dmas);
|
||||
|
@ -623,12 +622,12 @@ korina_tx_dma_interrupt(int irq, void *dev_id)
|
|||
dmas = readl(&lp->tx_dma_regs->dmas);
|
||||
|
||||
if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
|
||||
korina_tx(dev);
|
||||
|
||||
dmasm = readl(&lp->tx_dma_regs->dmasm);
|
||||
writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
|
||||
&lp->tx_dma_regs->dmasm);
|
||||
|
||||
korina_tx(dev);
|
||||
|
||||
if (lp->tx_chain_status == desc_filled &&
|
||||
(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
|
||||
writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
|
||||
|
@ -901,6 +900,8 @@ static int korina_restart(struct net_device *dev)
|
|||
|
||||
korina_free_ring(dev);
|
||||
|
||||
napi_disable(&lp->napi);
|
||||
|
||||
ret = korina_init(dev);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR DRV_NAME "%s: cannot restart device\n",
|
||||
|
@ -999,14 +1000,14 @@ static int korina_open(struct net_device *dev)
|
|||
* that handles the Done Finished
|
||||
* Ovr and Und Events */
|
||||
ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt,
|
||||
IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Rx", dev);
|
||||
IRQF_DISABLED, "Korina ethernet Rx", dev);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n",
|
||||
dev->name, lp->rx_irq);
|
||||
goto err_release;
|
||||
}
|
||||
ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt,
|
||||
IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Tx", dev);
|
||||
IRQF_DISABLED, "Korina ethernet Tx", dev);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n",
|
||||
dev->name, lp->tx_irq);
|
||||
|
@ -1015,7 +1016,7 @@ static int korina_open(struct net_device *dev)
|
|||
|
||||
/* Install handler for overrun error. */
|
||||
ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt,
|
||||
IRQF_SHARED | IRQF_DISABLED, "Ethernet Overflow", dev);
|
||||
IRQF_DISABLED, "Ethernet Overflow", dev);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n",
|
||||
dev->name, lp->ovr_irq);
|
||||
|
@ -1024,7 +1025,7 @@ static int korina_open(struct net_device *dev)
|
|||
|
||||
/* Install handler for underflow error. */
|
||||
ret = request_irq(lp->und_irq, &korina_und_interrupt,
|
||||
IRQF_SHARED | IRQF_DISABLED, "Ethernet Underflow", dev);
|
||||
IRQF_DISABLED, "Ethernet Underflow", dev);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n",
|
||||
dev->name, lp->und_irq);
|
||||
|
@ -1067,6 +1068,8 @@ static int korina_close(struct net_device *dev)
|
|||
|
||||
korina_free_ring(dev);
|
||||
|
||||
napi_disable(&lp->napi);
|
||||
|
||||
free_irq(lp->rx_irq, dev);
|
||||
free_irq(lp->tx_irq, dev);
|
||||
free_irq(lp->ovr_irq, dev);
|
||||
|
@ -1089,7 +1092,6 @@ static int korina_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
platform_set_drvdata(pdev, dev);
|
||||
lp = netdev_priv(dev);
|
||||
|
||||
bif->dev = dev;
|
||||
|
|
|
@ -308,27 +308,16 @@ struct netxen_ring_ctx {
|
|||
#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \
|
||||
((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
|
||||
|
||||
#define netxen_set_cmd_desc_flags(cmd_desc, val) \
|
||||
(cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \
|
||||
~cpu_to_le16(0x7f)) | cpu_to_le16((val) & 0x7f)
|
||||
#define netxen_set_cmd_desc_opcode(cmd_desc, val) \
|
||||
(cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \
|
||||
~cpu_to_le16((u16)0x3f << 7)) | cpu_to_le16(((val) & 0x3f) << 7)
|
||||
#define netxen_set_tx_port(_desc, _port) \
|
||||
(_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)
|
||||
|
||||
#define netxen_set_cmd_desc_num_of_buff(cmd_desc, val) \
|
||||
(cmd_desc)->num_of_buffers_total_length = \
|
||||
((cmd_desc)->num_of_buffers_total_length & \
|
||||
~cpu_to_le32(0xff)) | cpu_to_le32((val) & 0xff)
|
||||
#define netxen_set_cmd_desc_totallength(cmd_desc, val) \
|
||||
(cmd_desc)->num_of_buffers_total_length = \
|
||||
((cmd_desc)->num_of_buffers_total_length & \
|
||||
~cpu_to_le32((u32)0xffffff << 8)) | \
|
||||
cpu_to_le32(((val) & 0xffffff) << 8)
|
||||
#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \
|
||||
(_desc)->flags_opcode = \
|
||||
cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))
|
||||
|
||||
#define netxen_get_cmd_desc_opcode(cmd_desc) \
|
||||
((le16_to_cpu((cmd_desc)->flags_opcode) >> 7) & 0x003f)
|
||||
#define netxen_get_cmd_desc_totallength(cmd_desc) \
|
||||
((le32_to_cpu((cmd_desc)->num_of_buffers_total_length) >> 8) & 0xffffff)
|
||||
#define netxen_set_tx_frags_len(_desc, _frags, _len) \
|
||||
(_desc)->num_of_buffers_total_length = \
|
||||
cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))
|
||||
|
||||
struct cmd_desc_type0 {
|
||||
u8 tcp_hdr_offset; /* For LSO only */
|
||||
|
@ -510,7 +499,8 @@ typedef enum {
|
|||
NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a,
|
||||
NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b,
|
||||
NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
|
||||
NETXEN_BRDTYPE_P3_10G_XFP = 0x0032
|
||||
NETXEN_BRDTYPE_P3_10G_XFP = 0x0032,
|
||||
NETXEN_BRDTYPE_P3_10G_TP = 0x0080
|
||||
|
||||
} netxen_brdtype_t;
|
||||
|
||||
|
@ -757,7 +747,7 @@ extern char netxen_nic_driver_name[];
|
|||
*/
|
||||
struct netxen_skb_frag {
|
||||
u64 dma;
|
||||
u32 length;
|
||||
ulong length;
|
||||
};
|
||||
|
||||
#define _netxen_set_bits(config_word, start, bits, val) {\
|
||||
|
@ -783,13 +773,7 @@ struct netxen_skb_frag {
|
|||
struct netxen_cmd_buffer {
|
||||
struct sk_buff *skb;
|
||||
struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
|
||||
u32 total_length;
|
||||
u32 mss;
|
||||
u16 port;
|
||||
u8 cmd;
|
||||
u8 frag_count;
|
||||
unsigned long time_stamp;
|
||||
u32 state;
|
||||
u32 frag_count;
|
||||
};
|
||||
|
||||
/* In rx_buffer, we do not need multiple fragments as is a single buffer */
|
||||
|
@ -876,7 +860,6 @@ struct nx_host_rds_ring {
|
|||
u32 skb_size;
|
||||
struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */
|
||||
struct list_head free_list;
|
||||
int begin_alloc;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -995,31 +978,31 @@ struct netxen_recv_context {
|
|||
*/
|
||||
|
||||
typedef struct {
|
||||
u64 host_phys_addr; /* Ring base addr */
|
||||
u32 ring_size; /* Ring entries */
|
||||
u16 msi_index;
|
||||
u16 rsvd; /* Padding */
|
||||
__le64 host_phys_addr; /* Ring base addr */
|
||||
__le32 ring_size; /* Ring entries */
|
||||
__le16 msi_index;
|
||||
__le16 rsvd; /* Padding */
|
||||
} nx_hostrq_sds_ring_t;
|
||||
|
||||
typedef struct {
|
||||
u64 host_phys_addr; /* Ring base addr */
|
||||
u64 buff_size; /* Packet buffer size */
|
||||
u32 ring_size; /* Ring entries */
|
||||
u32 ring_kind; /* Class of ring */
|
||||
__le64 host_phys_addr; /* Ring base addr */
|
||||
__le64 buff_size; /* Packet buffer size */
|
||||
__le32 ring_size; /* Ring entries */
|
||||
__le32 ring_kind; /* Class of ring */
|
||||
} nx_hostrq_rds_ring_t;
|
||||
|
||||
typedef struct {
|
||||
u64 host_rsp_dma_addr; /* Response dma'd here */
|
||||
u32 capabilities[4]; /* Flag bit vector */
|
||||
u32 host_int_crb_mode; /* Interrupt crb usage */
|
||||
u32 host_rds_crb_mode; /* RDS crb usage */
|
||||
__le64 host_rsp_dma_addr; /* Response dma'd here */
|
||||
__le32 capabilities[4]; /* Flag bit vector */
|
||||
__le32 host_int_crb_mode; /* Interrupt crb usage */
|
||||
__le32 host_rds_crb_mode; /* RDS crb usage */
|
||||
/* These ring offsets are relative to data[0] below */
|
||||
u32 rds_ring_offset; /* Offset to RDS config */
|
||||
u32 sds_ring_offset; /* Offset to SDS config */
|
||||
u16 num_rds_rings; /* Count of RDS rings */
|
||||
u16 num_sds_rings; /* Count of SDS rings */
|
||||
u16 rsvd1; /* Padding */
|
||||
u16 rsvd2; /* Padding */
|
||||
__le32 rds_ring_offset; /* Offset to RDS config */
|
||||
__le32 sds_ring_offset; /* Offset to SDS config */
|
||||
__le16 num_rds_rings; /* Count of RDS rings */
|
||||
__le16 num_sds_rings; /* Count of SDS rings */
|
||||
__le16 rsvd1; /* Padding */
|
||||
__le16 rsvd2; /* Padding */
|
||||
u8 reserved[128]; /* reserve space for future expansion*/
|
||||
/* MUST BE 64-bit aligned.
|
||||
The following is packed:
|
||||
|
@ -1029,24 +1012,24 @@ typedef struct {
|
|||
} nx_hostrq_rx_ctx_t;
|
||||
|
||||
typedef struct {
|
||||
u32 host_producer_crb; /* Crb to use */
|
||||
u32 rsvd1; /* Padding */
|
||||
__le32 host_producer_crb; /* Crb to use */
|
||||
__le32 rsvd1; /* Padding */
|
||||
} nx_cardrsp_rds_ring_t;
|
||||
|
||||
typedef struct {
|
||||
u32 host_consumer_crb; /* Crb to use */
|
||||
u32 interrupt_crb; /* Crb to use */
|
||||
__le32 host_consumer_crb; /* Crb to use */
|
||||
__le32 interrupt_crb; /* Crb to use */
|
||||
} nx_cardrsp_sds_ring_t;
|
||||
|
||||
typedef struct {
|
||||
/* These ring offsets are relative to data[0] below */
|
||||
u32 rds_ring_offset; /* Offset to RDS config */
|
||||
u32 sds_ring_offset; /* Offset to SDS config */
|
||||
u32 host_ctx_state; /* Starting State */
|
||||
u32 num_fn_per_port; /* How many PCI fn share the port */
|
||||
u16 num_rds_rings; /* Count of RDS rings */
|
||||
u16 num_sds_rings; /* Count of SDS rings */
|
||||
u16 context_id; /* Handle for context */
|
||||
__le32 rds_ring_offset; /* Offset to RDS config */
|
||||
__le32 sds_ring_offset; /* Offset to SDS config */
|
||||
__le32 host_ctx_state; /* Starting State */
|
||||
__le32 num_fn_per_port; /* How many PCI fn share the port */
|
||||
__le16 num_rds_rings; /* Count of RDS rings */
|
||||
__le16 num_sds_rings; /* Count of SDS rings */
|
||||
__le16 context_id; /* Handle for context */
|
||||
u8 phys_port; /* Physical id of port */
|
||||
u8 virt_port; /* Virtual/Logical id of port */
|
||||
u8 reserved[128]; /* save space for future expansion */
|
||||
|
@ -1072,34 +1055,34 @@ typedef struct {
|
|||
*/
|
||||
|
||||
typedef struct {
|
||||
u64 host_phys_addr; /* Ring base addr */
|
||||
u32 ring_size; /* Ring entries */
|
||||
u32 rsvd; /* Padding */
|
||||
__le64 host_phys_addr; /* Ring base addr */
|
||||
__le32 ring_size; /* Ring entries */
|
||||
__le32 rsvd; /* Padding */
|
||||
} nx_hostrq_cds_ring_t;
|
||||
|
||||
typedef struct {
|
||||
u64 host_rsp_dma_addr; /* Response dma'd here */
|
||||
u64 cmd_cons_dma_addr; /* */
|
||||
u64 dummy_dma_addr; /* */
|
||||
u32 capabilities[4]; /* Flag bit vector */
|
||||
u32 host_int_crb_mode; /* Interrupt crb usage */
|
||||
u32 rsvd1; /* Padding */
|
||||
u16 rsvd2; /* Padding */
|
||||
u16 interrupt_ctl;
|
||||
u16 msi_index;
|
||||
u16 rsvd3; /* Padding */
|
||||
__le64 host_rsp_dma_addr; /* Response dma'd here */
|
||||
__le64 cmd_cons_dma_addr; /* */
|
||||
__le64 dummy_dma_addr; /* */
|
||||
__le32 capabilities[4]; /* Flag bit vector */
|
||||
__le32 host_int_crb_mode; /* Interrupt crb usage */
|
||||
__le32 rsvd1; /* Padding */
|
||||
__le16 rsvd2; /* Padding */
|
||||
__le16 interrupt_ctl;
|
||||
__le16 msi_index;
|
||||
__le16 rsvd3; /* Padding */
|
||||
nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */
|
||||
u8 reserved[128]; /* future expansion */
|
||||
} nx_hostrq_tx_ctx_t;
|
||||
|
||||
typedef struct {
|
||||
u32 host_producer_crb; /* Crb to use */
|
||||
u32 interrupt_crb; /* Crb to use */
|
||||
__le32 host_producer_crb; /* Crb to use */
|
||||
__le32 interrupt_crb; /* Crb to use */
|
||||
} nx_cardrsp_cds_ring_t;
|
||||
|
||||
typedef struct {
|
||||
u32 host_ctx_state; /* Starting state */
|
||||
u16 context_id; /* Handle for context */
|
||||
__le32 host_ctx_state; /* Starting state */
|
||||
__le16 context_id; /* Handle for context */
|
||||
u8 phys_port; /* Physical id of port */
|
||||
u8 virt_port; /* Virtual/Logical id of port */
|
||||
nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */
|
||||
|
@ -1202,9 +1185,9 @@ enum {
|
|||
#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
|
||||
|
||||
typedef struct {
|
||||
u64 qhdr;
|
||||
u64 req_hdr;
|
||||
u64 words[6];
|
||||
__le64 qhdr;
|
||||
__le64 req_hdr;
|
||||
__le64 words[6];
|
||||
} nx_nic_req_t;
|
||||
|
||||
typedef struct {
|
||||
|
@ -1486,8 +1469,6 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter);
|
|||
|
||||
void netxen_initialize_adapter_ops(struct netxen_adapter *adapter);
|
||||
int netxen_init_firmware(struct netxen_adapter *adapter);
|
||||
void netxen_tso_check(struct netxen_adapter *adapter,
|
||||
struct cmd_desc_type0 *desc, struct sk_buff *skb);
|
||||
void netxen_nic_clear_stats(struct netxen_adapter *adapter);
|
||||
void netxen_watchdog_task(struct work_struct *work);
|
||||
void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
|
||||
|
@ -1496,6 +1477,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter);
|
|||
u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
|
||||
void netxen_p2_nic_set_multi(struct net_device *netdev);
|
||||
void netxen_p3_nic_set_multi(struct net_device *netdev);
|
||||
void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
|
||||
int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
|
||||
int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ netxen_api_unlock(struct netxen_adapter *adapter)
|
|||
static u32
|
||||
netxen_poll_rsp(struct netxen_adapter *adapter)
|
||||
{
|
||||
u32 raw_rsp, rsp = NX_CDRP_RSP_OK;
|
||||
u32 rsp = NX_CDRP_RSP_OK;
|
||||
int timeout = 0;
|
||||
|
||||
do {
|
||||
|
@ -86,10 +86,7 @@ netxen_poll_rsp(struct netxen_adapter *adapter)
|
|||
if (++timeout > NX_OS_CRB_RETRY_COUNT)
|
||||
return NX_CDRP_RSP_TIMEOUT;
|
||||
|
||||
netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET,
|
||||
&raw_rsp);
|
||||
|
||||
rsp = le32_to_cpu(raw_rsp);
|
||||
netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET, &rsp);
|
||||
} while (!NX_CDRP_IS_RSP(rsp));
|
||||
|
||||
return rsp;
|
||||
|
@ -109,20 +106,16 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
|
|||
if (netxen_api_lock(adapter))
|
||||
return NX_RCODE_TIMEOUT;
|
||||
|
||||
netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET,
|
||||
cpu_to_le32(signature));
|
||||
netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET, signature);
|
||||
|
||||
netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET,
|
||||
cpu_to_le32(arg1));
|
||||
netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET, arg1);
|
||||
|
||||
netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET,
|
||||
cpu_to_le32(arg2));
|
||||
netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET, arg2);
|
||||
|
||||
netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET,
|
||||
cpu_to_le32(arg3));
|
||||
netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET, arg3);
|
||||
|
||||
netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET,
|
||||
cpu_to_le32(NX_CDRP_FORM_CMD(cmd)));
|
||||
NX_CDRP_FORM_CMD(cmd));
|
||||
|
||||
rsp = netxen_poll_rsp(adapter);
|
||||
|
||||
|
@ -133,7 +126,6 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
|
|||
rcode = NX_RCODE_TIMEOUT;
|
||||
} else if (rsp == NX_CDRP_RSP_FAIL) {
|
||||
netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode);
|
||||
rcode = le32_to_cpu(rcode);
|
||||
|
||||
printk(KERN_ERR "%s: failed card response code:0x%x\n",
|
||||
netxen_nic_driver_name, rcode);
|
||||
|
@ -183,7 +175,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
|
|||
|
||||
int i, nrds_rings, nsds_rings;
|
||||
size_t rq_size, rsp_size;
|
||||
u32 cap, reg;
|
||||
u32 cap, reg, val;
|
||||
|
||||
int err;
|
||||
|
||||
|
@ -225,11 +217,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
|
|||
|
||||
prq->num_rds_rings = cpu_to_le16(nrds_rings);
|
||||
prq->num_sds_rings = cpu_to_le16(nsds_rings);
|
||||
prq->rds_ring_offset = 0;
|
||||
prq->sds_ring_offset = prq->rds_ring_offset +
|
||||
(sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
|
||||
prq->rds_ring_offset = cpu_to_le32(0);
|
||||
|
||||
prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + prq->rds_ring_offset);
|
||||
val = le32_to_cpu(prq->rds_ring_offset) +
|
||||
(sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
|
||||
prq->sds_ring_offset = cpu_to_le32(val);
|
||||
|
||||
prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
|
||||
le32_to_cpu(prq->rds_ring_offset));
|
||||
|
||||
for (i = 0; i < nrds_rings; i++) {
|
||||
|
||||
|
@ -241,17 +236,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
|
|||
prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
|
||||
}
|
||||
|
||||
prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + prq->sds_ring_offset);
|
||||
prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
|
||||
le32_to_cpu(prq->sds_ring_offset));
|
||||
|
||||
prq_sds[0].host_phys_addr =
|
||||
cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
|
||||
prq_sds[0].ring_size = cpu_to_le32(adapter->max_rx_desc_count);
|
||||
/* only one msix vector for now */
|
||||
prq_sds[0].msi_index = cpu_to_le32(0);
|
||||
|
||||
/* now byteswap offsets */
|
||||
prq->rds_ring_offset = cpu_to_le32(prq->rds_ring_offset);
|
||||
prq->sds_ring_offset = cpu_to_le32(prq->sds_ring_offset);
|
||||
prq_sds[0].msi_index = cpu_to_le16(0);
|
||||
|
||||
phys_addr = hostrq_phys_addr;
|
||||
err = netxen_issue_cmd(adapter,
|
||||
|
@ -269,9 +261,9 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
|
|||
|
||||
|
||||
prsp_rds = ((nx_cardrsp_rds_ring_t *)
|
||||
&prsp->data[prsp->rds_ring_offset]);
|
||||
&prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
|
||||
|
||||
for (i = 0; i < le32_to_cpu(prsp->num_rds_rings); i++) {
|
||||
for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
|
||||
rds_ring = &recv_ctx->rds_rings[i];
|
||||
|
||||
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
|
||||
|
@ -279,7 +271,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
|
|||
}
|
||||
|
||||
prsp_sds = ((nx_cardrsp_sds_ring_t *)
|
||||
&prsp->data[prsp->sds_ring_offset]);
|
||||
&prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
|
||||
reg = le32_to_cpu(prsp_sds[0].host_consumer_crb);
|
||||
recv_ctx->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
|
||||
|
||||
|
@ -288,7 +280,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
|
|||
|
||||
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
|
||||
recv_ctx->context_id = le16_to_cpu(prsp->context_id);
|
||||
recv_ctx->virt_port = le16_to_cpu(prsp->virt_port);
|
||||
recv_ctx->virt_port = prsp->virt_port;
|
||||
|
||||
out_free_rsp:
|
||||
pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
|
||||
|
|
|
@ -136,11 +136,9 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
|||
|
||||
ecmd->port = PORT_TP;
|
||||
|
||||
if (netif_running(dev)) {
|
||||
ecmd->speed = adapter->link_speed;
|
||||
ecmd->duplex = adapter->link_duplex;
|
||||
ecmd->autoneg = adapter->link_autoneg;
|
||||
}
|
||||
ecmd->speed = adapter->link_speed;
|
||||
ecmd->duplex = adapter->link_duplex;
|
||||
ecmd->autoneg = adapter->link_autoneg;
|
||||
|
||||
} else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
|
||||
u32 val;
|
||||
|
@ -171,7 +169,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
|||
} else
|
||||
return -EIO;
|
||||
|
||||
ecmd->phy_address = adapter->portnum;
|
||||
ecmd->phy_address = adapter->physical_port;
|
||||
ecmd->transceiver = XCVR_EXTERNAL;
|
||||
|
||||
switch ((netxen_brdtype_t) boardinfo->board_type) {
|
||||
|
@ -180,13 +178,13 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
|||
case NETXEN_BRDTYPE_P3_REF_QG:
|
||||
case NETXEN_BRDTYPE_P3_4_GB:
|
||||
case NETXEN_BRDTYPE_P3_4_GB_MM:
|
||||
case NETXEN_BRDTYPE_P3_10000_BASE_T:
|
||||
|
||||
ecmd->supported |= SUPPORTED_Autoneg;
|
||||
ecmd->advertising |= ADVERTISED_Autoneg;
|
||||
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
|
||||
case NETXEN_BRDTYPE_P3_10G_CX4:
|
||||
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
|
||||
case NETXEN_BRDTYPE_P3_10000_BASE_T:
|
||||
ecmd->supported |= SUPPORTED_TP;
|
||||
ecmd->advertising |= ADVERTISED_TP;
|
||||
ecmd->port = PORT_TP;
|
||||
|
@ -204,16 +202,33 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
|||
ecmd->port = PORT_FIBRE;
|
||||
ecmd->autoneg = AUTONEG_DISABLE;
|
||||
break;
|
||||
case NETXEN_BRDTYPE_P2_SB31_10G:
|
||||
case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
|
||||
case NETXEN_BRDTYPE_P3_10G_SFP_CT:
|
||||
case NETXEN_BRDTYPE_P3_10G_SFP_QT:
|
||||
ecmd->advertising |= ADVERTISED_TP;
|
||||
ecmd->supported |= SUPPORTED_TP;
|
||||
case NETXEN_BRDTYPE_P2_SB31_10G:
|
||||
case NETXEN_BRDTYPE_P3_10G_XFP:
|
||||
ecmd->supported |= SUPPORTED_FIBRE;
|
||||
ecmd->advertising |= ADVERTISED_FIBRE;
|
||||
ecmd->port = PORT_FIBRE;
|
||||
ecmd->autoneg = AUTONEG_DISABLE;
|
||||
break;
|
||||
case NETXEN_BRDTYPE_P3_10G_TP:
|
||||
if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
|
||||
ecmd->autoneg = AUTONEG_DISABLE;
|
||||
ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
|
||||
ecmd->advertising |=
|
||||
(ADVERTISED_FIBRE | ADVERTISED_TP);
|
||||
ecmd->port = PORT_FIBRE;
|
||||
} else {
|
||||
ecmd->autoneg = AUTONEG_ENABLE;
|
||||
ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
|
||||
ecmd->advertising |=
|
||||
(ADVERTISED_TP | ADVERTISED_Autoneg);
|
||||
ecmd->port = PORT_TP;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
|
||||
(netxen_brdtype_t) boardinfo->board_type);
|
||||
|
|
|
@ -503,17 +503,15 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
|
|||
|
||||
i = 0;
|
||||
|
||||
netif_tx_lock_bh(adapter->netdev);
|
||||
|
||||
producer = adapter->cmd_producer;
|
||||
do {
|
||||
cmd_desc = &cmd_desc_arr[i];
|
||||
|
||||
pbuf = &adapter->cmd_buf_arr[producer];
|
||||
pbuf->mss = 0;
|
||||
pbuf->total_length = 0;
|
||||
pbuf->skb = NULL;
|
||||
pbuf->cmd = 0;
|
||||
pbuf->frag_count = 0;
|
||||
pbuf->port = 0;
|
||||
|
||||
/* adapter->ahw.cmd_desc_head[producer] = *cmd_desc; */
|
||||
memcpy(&adapter->ahw.cmd_desc_head[producer],
|
||||
|
@ -531,6 +529,8 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
|
|||
|
||||
netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
|
||||
|
||||
netif_tx_unlock_bh(adapter->netdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -539,16 +539,19 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev,
|
|||
{
|
||||
struct netxen_adapter *adapter = netdev_priv(dev);
|
||||
nx_nic_req_t req;
|
||||
nx_mac_req_t mac_req;
|
||||
nx_mac_req_t *mac_req;
|
||||
u64 word;
|
||||
int rv;
|
||||
|
||||
memset(&req, 0, sizeof(nx_nic_req_t));
|
||||
req.qhdr |= (NX_NIC_REQUEST << 23);
|
||||
req.req_hdr |= NX_MAC_EVENT;
|
||||
req.req_hdr |= ((u64)adapter->portnum << 16);
|
||||
mac_req.op = op;
|
||||
memcpy(&mac_req.mac_addr, addr, 6);
|
||||
req.words[0] = cpu_to_le64(*(u64 *)&mac_req);
|
||||
req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
|
||||
|
||||
word = NX_MAC_EVENT | ((u64)adapter->portnum << 16);
|
||||
req.req_hdr = cpu_to_le64(word);
|
||||
|
||||
mac_req = (nx_mac_req_t *)&req.words[0];
|
||||
mac_req->op = op;
|
||||
memcpy(mac_req->mac_addr, addr, 6);
|
||||
|
||||
rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
|
||||
if (rv != 0) {
|
||||
|
@ -612,18 +615,35 @@ send_fw_cmd:
|
|||
int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
|
||||
{
|
||||
nx_nic_req_t req;
|
||||
u64 word;
|
||||
|
||||
memset(&req, 0, sizeof(nx_nic_req_t));
|
||||
|
||||
req.qhdr |= (NX_HOST_REQUEST << 23);
|
||||
req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE;
|
||||
req.req_hdr |= ((u64)adapter->portnum << 16);
|
||||
req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
|
||||
|
||||
word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
|
||||
((u64)adapter->portnum << 16);
|
||||
req.req_hdr = cpu_to_le64(word);
|
||||
|
||||
req.words[0] = cpu_to_le64(mode);
|
||||
|
||||
return netxen_send_cmd_descs(adapter,
|
||||
(struct cmd_desc_type0 *)&req, 1);
|
||||
}
|
||||
|
||||
void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
|
||||
{
|
||||
nx_mac_list_t *cur, *next;
|
||||
|
||||
cur = adapter->mac_list;
|
||||
|
||||
while (cur) {
|
||||
next = cur->next;
|
||||
kfree(cur);
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
|
||||
#define NETXEN_CONFIG_INTR_COALESCE 3
|
||||
|
||||
/*
|
||||
|
@ -632,13 +652,15 @@ int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
|
|||
int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
|
||||
{
|
||||
nx_nic_req_t req;
|
||||
u64 word;
|
||||
int rv;
|
||||
|
||||
memset(&req, 0, sizeof(nx_nic_req_t));
|
||||
|
||||
req.qhdr |= (NX_NIC_REQUEST << 23);
|
||||
req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE;
|
||||
req.req_hdr |= ((u64)adapter->portnum << 16);
|
||||
req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
|
||||
|
||||
word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
|
||||
req.req_hdr = cpu_to_le64(word);
|
||||
|
||||
memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal));
|
||||
|
||||
|
@ -772,13 +794,10 @@ int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
|
|||
adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4);
|
||||
adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4);
|
||||
|
||||
mac_hi = cpu_to_le32(mac_hi);
|
||||
mac_lo = cpu_to_le32(mac_lo);
|
||||
|
||||
if (pci_func & 1)
|
||||
*mac = ((mac_lo >> 16) | ((u64)mac_hi << 16));
|
||||
*mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
|
||||
else
|
||||
*mac = ((mac_lo) | ((u64)mac_hi << 32));
|
||||
*mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -937,7 +956,7 @@ int netxen_load_firmware(struct netxen_adapter *adapter)
|
|||
{
|
||||
int i;
|
||||
u32 data, size = 0;
|
||||
u32 flashaddr = NETXEN_BOOTLD_START, memaddr = NETXEN_BOOTLD_START;
|
||||
u32 flashaddr = NETXEN_BOOTLD_START;
|
||||
|
||||
size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START)/4;
|
||||
|
||||
|
@ -949,10 +968,8 @@ int netxen_load_firmware(struct netxen_adapter *adapter)
|
|||
if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0)
|
||||
return -EIO;
|
||||
|
||||
adapter->pci_mem_write(adapter, memaddr, &data, 4);
|
||||
adapter->pci_mem_write(adapter, flashaddr, &data, 4);
|
||||
flashaddr += 4;
|
||||
memaddr += 4;
|
||||
cond_resched();
|
||||
}
|
||||
msleep(1);
|
||||
|
||||
|
@ -2034,7 +2051,13 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
|
|||
rv = -1;
|
||||
}
|
||||
|
||||
DPRINTK(INFO, "Discovered board type:0x%x ", boardinfo->board_type);
|
||||
if (boardinfo->board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
|
||||
u32 gpio = netxen_nic_reg_read(adapter,
|
||||
NETXEN_ROMUSB_GLB_PAD_GPIO_I);
|
||||
if ((gpio & 0x8000) == 0)
|
||||
boardinfo->board_type = NETXEN_BRDTYPE_P3_10G_TP;
|
||||
}
|
||||
|
||||
switch ((netxen_brdtype_t) boardinfo->board_type) {
|
||||
case NETXEN_BRDTYPE_P2_SB35_4G:
|
||||
adapter->ahw.board_type = NETXEN_NIC_GBE;
|
||||
|
@ -2053,7 +2076,6 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
|
|||
case NETXEN_BRDTYPE_P3_10G_SFP_QT:
|
||||
case NETXEN_BRDTYPE_P3_10G_XFP:
|
||||
case NETXEN_BRDTYPE_P3_10000_BASE_T:
|
||||
|
||||
adapter->ahw.board_type = NETXEN_NIC_XGBE;
|
||||
break;
|
||||
case NETXEN_BRDTYPE_P1_BD:
|
||||
|
@ -2063,9 +2085,12 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
|
|||
case NETXEN_BRDTYPE_P3_REF_QG:
|
||||
case NETXEN_BRDTYPE_P3_4_GB:
|
||||
case NETXEN_BRDTYPE_P3_4_GB_MM:
|
||||
|
||||
adapter->ahw.board_type = NETXEN_NIC_GBE;
|
||||
break;
|
||||
case NETXEN_BRDTYPE_P3_10G_TP:
|
||||
adapter->ahw.board_type = (adapter->portnum < 2) ?
|
||||
NETXEN_NIC_XGBE : NETXEN_NIC_GBE;
|
||||
break;
|
||||
default:
|
||||
printk("%s: Unknown(%x)\n", netxen_nic_driver_name,
|
||||
boardinfo->board_type);
|
||||
|
@ -2110,12 +2135,16 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
|
|||
{
|
||||
__u32 status;
|
||||
__u32 autoneg;
|
||||
__u32 mode;
|
||||
__u32 port_mode;
|
||||
|
||||
netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
|
||||
if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */
|
||||
if (!netif_carrier_ok(adapter->netdev)) {
|
||||
adapter->link_speed = 0;
|
||||
adapter->link_duplex = -1;
|
||||
adapter->link_autoneg = AUTONEG_ENABLE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
|
||||
adapter->hw_read_wx(adapter,
|
||||
NETXEN_PORT_MODE_ADDR, &port_mode, 4);
|
||||
if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
|
||||
|
@ -2141,7 +2170,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
|
|||
adapter->link_speed = SPEED_1000;
|
||||
break;
|
||||
default:
|
||||
adapter->link_speed = -1;
|
||||
adapter->link_speed = 0;
|
||||
break;
|
||||
}
|
||||
switch (netxen_get_phy_duplex(status)) {
|
||||
|
@ -2164,7 +2193,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
|
|||
goto link_down;
|
||||
} else {
|
||||
link_down:
|
||||
adapter->link_speed = -1;
|
||||
adapter->link_speed = 0;
|
||||
adapter->link_duplex = -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -308,7 +308,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
|
|||
}
|
||||
memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
|
||||
INIT_LIST_HEAD(&rds_ring->free_list);
|
||||
rds_ring->begin_alloc = 0;
|
||||
/*
|
||||
* Now go through all of them, set reference handles
|
||||
* and put them in the queues.
|
||||
|
@ -439,6 +438,8 @@ static int netxen_wait_rom_done(struct netxen_adapter *adapter)
|
|||
long timeout = 0;
|
||||
long done = 0;
|
||||
|
||||
cond_resched();
|
||||
|
||||
while (done == 0) {
|
||||
done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
|
||||
done &= 2;
|
||||
|
@ -533,12 +534,9 @@ static int do_rom_fast_write(struct netxen_adapter *adapter, int addr,
|
|||
static int do_rom_fast_read(struct netxen_adapter *adapter,
|
||||
int addr, int *valp)
|
||||
{
|
||||
cond_resched();
|
||||
|
||||
netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
|
||||
netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
|
||||
udelay(100); /* prevent bursting on CRB */
|
||||
netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
|
||||
netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
|
||||
netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
|
||||
if (netxen_wait_rom_done(adapter)) {
|
||||
printk("Error waiting for rom done\n");
|
||||
|
@ -546,7 +544,7 @@ static int do_rom_fast_read(struct netxen_adapter *adapter,
|
|||
}
|
||||
/* reset abyte_cnt and dummy_byte_cnt */
|
||||
netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
|
||||
udelay(100); /* prevent bursting on CRB */
|
||||
udelay(10);
|
||||
netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
|
||||
|
||||
*valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
|
||||
|
@ -884,14 +882,16 @@ int netxen_flash_unlock(struct netxen_adapter *adapter)
|
|||
int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
|
||||
{
|
||||
int addr, val;
|
||||
int i, init_delay = 0;
|
||||
int i, n, init_delay = 0;
|
||||
struct crb_addr_pair *buf;
|
||||
unsigned offset, n;
|
||||
unsigned offset;
|
||||
u32 off;
|
||||
|
||||
/* resetall */
|
||||
rom_lock(adapter);
|
||||
netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
|
||||
0xffffffff);
|
||||
netxen_rom_unlock(adapter);
|
||||
|
||||
if (verbose) {
|
||||
if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
|
||||
|
@ -910,7 +910,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
|
|||
|
||||
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
|
||||
if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
|
||||
(n != 0xcafecafeUL) ||
|
||||
(n != 0xcafecafe) ||
|
||||
netxen_rom_fast_read(adapter, 4, &n) != 0) {
|
||||
printk(KERN_ERR "%s: ERROR Reading crb_init area: "
|
||||
"n: %08x\n", netxen_nic_driver_name, n);
|
||||
|
@ -975,6 +975,14 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
|
|||
/* do not reset PCI */
|
||||
if (off == (ROMUSB_GLB + 0xbc))
|
||||
continue;
|
||||
if (off == (ROMUSB_GLB + 0xa8))
|
||||
continue;
|
||||
if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
|
||||
continue;
|
||||
if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
|
||||
continue;
|
||||
if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
|
||||
continue;
|
||||
if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
|
||||
buf[i].data = 0x1020;
|
||||
/* skip the function enable register */
|
||||
|
@ -992,23 +1000,21 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
|
|||
continue;
|
||||
}
|
||||
|
||||
init_delay = 1;
|
||||
/* After writing this register, HW needs time for CRB */
|
||||
/* to quiet down (else crb_window returns 0xffffffff) */
|
||||
if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
|
||||
init_delay = 1;
|
||||
init_delay = 1000;
|
||||
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
|
||||
/* hold xdma in reset also */
|
||||
buf[i].data = NETXEN_NIC_XDMA_RESET;
|
||||
buf[i].data = 0x8000ff;
|
||||
}
|
||||
}
|
||||
|
||||
adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
|
||||
|
||||
if (init_delay == 1) {
|
||||
msleep(1000);
|
||||
init_delay = 0;
|
||||
}
|
||||
msleep(1);
|
||||
msleep(init_delay);
|
||||
}
|
||||
kfree(buf);
|
||||
|
||||
|
@ -1277,7 +1283,7 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
|
|||
|
||||
dev_kfree_skb_any(skb);
|
||||
for (i = 0; i < nr_frags; i++) {
|
||||
index = frag_desc->frag_handles[i];
|
||||
index = le16_to_cpu(frag_desc->frag_handles[i]);
|
||||
skb = netxen_process_rxbuf(adapter,
|
||||
rds_ring, index, cksum);
|
||||
if (skb)
|
||||
|
@ -1428,7 +1434,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
|
|||
struct rcv_desc *pdesc;
|
||||
struct netxen_rx_buffer *buffer;
|
||||
int count = 0;
|
||||
int index = 0;
|
||||
netxen_ctx_msg msg = 0;
|
||||
dma_addr_t dma;
|
||||
struct list_head *head;
|
||||
|
@ -1436,7 +1441,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
|
|||
rds_ring = &recv_ctx->rds_rings[ringid];
|
||||
|
||||
producer = rds_ring->producer;
|
||||
index = rds_ring->begin_alloc;
|
||||
head = &rds_ring->free_list;
|
||||
|
||||
/* We can start writing rx descriptors into the phantom memory. */
|
||||
|
@ -1444,39 +1448,37 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
|
|||
|
||||
skb = dev_alloc_skb(rds_ring->skb_size);
|
||||
if (unlikely(!skb)) {
|
||||
rds_ring->begin_alloc = index;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!adapter->ahw.cut_through)
|
||||
skb_reserve(skb, 2);
|
||||
|
||||
dma = pci_map_single(pdev, skb->data,
|
||||
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(pdev, dma)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
break;
|
||||
}
|
||||
|
||||
count++;
|
||||
buffer = list_entry(head->next, struct netxen_rx_buffer, list);
|
||||
list_del(&buffer->list);
|
||||
|
||||
count++; /* now there should be no failure */
|
||||
pdesc = &rds_ring->desc_head[producer];
|
||||
|
||||
if (!adapter->ahw.cut_through)
|
||||
skb_reserve(skb, 2);
|
||||
/* This will be setup when we receive the
|
||||
* buffer after it has been filled FSL TBD TBD
|
||||
* skb->dev = netdev;
|
||||
*/
|
||||
dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
pdesc->addr_buffer = cpu_to_le64(dma);
|
||||
buffer->skb = skb;
|
||||
buffer->state = NETXEN_BUFFER_BUSY;
|
||||
buffer->dma = dma;
|
||||
|
||||
/* make a rcv descriptor */
|
||||
pdesc = &rds_ring->desc_head[producer];
|
||||
pdesc->addr_buffer = cpu_to_le64(dma);
|
||||
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
|
||||
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
|
||||
DPRINTK(INFO, "done writing descripter\n");
|
||||
producer =
|
||||
get_next_index(producer, rds_ring->max_rx_desc_count);
|
||||
index = get_next_index(index, rds_ring->max_rx_desc_count);
|
||||
|
||||
producer = get_next_index(producer, rds_ring->max_rx_desc_count);
|
||||
}
|
||||
/* if we did allocate buffers, then write the count to Phantom */
|
||||
if (count) {
|
||||
rds_ring->begin_alloc = index;
|
||||
rds_ring->producer = producer;
|
||||
/* Window = 1 */
|
||||
adapter->pci_write_normalize(adapter,
|
||||
|
@ -1515,49 +1517,50 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
|
|||
struct rcv_desc *pdesc;
|
||||
struct netxen_rx_buffer *buffer;
|
||||
int count = 0;
|
||||
int index = 0;
|
||||
struct list_head *head;
|
||||
dma_addr_t dma;
|
||||
|
||||
rds_ring = &recv_ctx->rds_rings[ringid];
|
||||
|
||||
producer = rds_ring->producer;
|
||||
index = rds_ring->begin_alloc;
|
||||
head = &rds_ring->free_list;
|
||||
/* We can start writing rx descriptors into the phantom memory. */
|
||||
while (!list_empty(head)) {
|
||||
|
||||
skb = dev_alloc_skb(rds_ring->skb_size);
|
||||
if (unlikely(!skb)) {
|
||||
rds_ring->begin_alloc = index;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!adapter->ahw.cut_through)
|
||||
skb_reserve(skb, 2);
|
||||
|
||||
dma = pci_map_single(pdev, skb->data,
|
||||
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(pdev, dma)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
break;
|
||||
}
|
||||
|
||||
count++;
|
||||
buffer = list_entry(head->next, struct netxen_rx_buffer, list);
|
||||
list_del(&buffer->list);
|
||||
|
||||
count++; /* now there should be no failure */
|
||||
pdesc = &rds_ring->desc_head[producer];
|
||||
if (!adapter->ahw.cut_through)
|
||||
skb_reserve(skb, 2);
|
||||
buffer->skb = skb;
|
||||
buffer->state = NETXEN_BUFFER_BUSY;
|
||||
buffer->dma = pci_map_single(pdev, skb->data,
|
||||
rds_ring->dma_size,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
buffer->dma = dma;
|
||||
|
||||
/* make a rcv descriptor */
|
||||
pdesc = &rds_ring->desc_head[producer];
|
||||
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
|
||||
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
|
||||
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
|
||||
producer =
|
||||
get_next_index(producer, rds_ring->max_rx_desc_count);
|
||||
index = get_next_index(index, rds_ring->max_rx_desc_count);
|
||||
buffer = &rds_ring->rx_buf_arr[index];
|
||||
|
||||
producer = get_next_index(producer, rds_ring->max_rx_desc_count);
|
||||
}
|
||||
|
||||
/* if we did allocate buffers, then write the count to Phantom */
|
||||
if (count) {
|
||||
rds_ring->begin_alloc = index;
|
||||
rds_ring->producer = producer;
|
||||
/* Window = 1 */
|
||||
adapter->pci_write_normalize(adapter,
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "netxen_nic_phan_reg.h"
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
|
||||
|
@ -242,7 +243,7 @@ static void netxen_check_options(struct netxen_adapter *adapter)
|
|||
case NETXEN_BRDTYPE_P3_4_GB:
|
||||
case NETXEN_BRDTYPE_P3_4_GB_MM:
|
||||
adapter->msix_supported = !!use_msi_x;
|
||||
adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
|
||||
adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
|
||||
break;
|
||||
|
||||
case NETXEN_BRDTYPE_P2_SB35_4G:
|
||||
|
@ -251,6 +252,14 @@ static void netxen_check_options(struct netxen_adapter *adapter)
|
|||
adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
|
||||
break;
|
||||
|
||||
case NETXEN_BRDTYPE_P3_10G_TP:
|
||||
adapter->msix_supported = !!use_msi_x;
|
||||
if (adapter->ahw.board_type == NETXEN_NIC_XGBE)
|
||||
adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
|
||||
else
|
||||
adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
|
||||
break;
|
||||
|
||||
default:
|
||||
adapter->msix_supported = 0;
|
||||
adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
|
||||
|
@ -271,10 +280,15 @@ static void netxen_check_options(struct netxen_adapter *adapter)
|
|||
static int
|
||||
netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 val, timeout;
|
||||
|
||||
if (first_boot == 0x55555555) {
|
||||
/* This is the first boot after power up */
|
||||
adapter->pci_write_normalize(adapter,
|
||||
NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
|
||||
|
||||
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
|
||||
return 0;
|
||||
|
||||
/* PCI bus master workaround */
|
||||
adapter->hw_read_wx(adapter,
|
||||
|
@ -294,18 +308,26 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
|
|||
/* clear the register for future unloads/loads */
|
||||
adapter->pci_write_normalize(adapter,
|
||||
NETXEN_CAM_RAM(0x1fc), 0);
|
||||
ret = -1;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
|
||||
/* Start P2 boot loader */
|
||||
adapter->pci_write_normalize(adapter,
|
||||
NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
|
||||
adapter->pci_write_normalize(adapter,
|
||||
NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1);
|
||||
}
|
||||
/* Start P2 boot loader */
|
||||
val = adapter->pci_read_normalize(adapter,
|
||||
NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
|
||||
adapter->pci_write_normalize(adapter,
|
||||
NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
|
||||
timeout = 0;
|
||||
do {
|
||||
msleep(1);
|
||||
val = adapter->pci_read_normalize(adapter,
|
||||
NETXEN_CAM_RAM(0x1fc));
|
||||
|
||||
if (++timeout > 5000)
|
||||
return -EIO;
|
||||
|
||||
} while (val == NETXEN_BDINFO_MAGIC);
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void netxen_set_port_mode(struct netxen_adapter *adapter)
|
||||
|
@ -784,8 +806,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
CRB_CMDPEG_STATE, 0);
|
||||
netxen_pinit_from_rom(adapter, 0);
|
||||
msleep(1);
|
||||
netxen_load_firmware(adapter);
|
||||
}
|
||||
netxen_load_firmware(adapter);
|
||||
|
||||
if (NX_IS_REVISION_P3(revision_id))
|
||||
netxen_pcie_strap_init(adapter);
|
||||
|
@ -801,13 +823,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
}
|
||||
|
||||
if ((first_boot == 0x55555555) &&
|
||||
(NX_IS_REVISION_P2(revision_id))) {
|
||||
/* Unlock the HW, prompting the boot sequence */
|
||||
adapter->pci_write_normalize(adapter,
|
||||
NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1);
|
||||
}
|
||||
|
||||
err = netxen_initialize_adapter_offload(adapter);
|
||||
if (err)
|
||||
goto err_out_iounmap;
|
||||
|
@ -821,7 +836,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, i);
|
||||
|
||||
/* Handshake with the card before we register the devices. */
|
||||
netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
|
||||
err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
|
||||
if (err)
|
||||
goto err_out_free_offload;
|
||||
|
||||
} /* first_driver */
|
||||
|
||||
|
@ -925,6 +942,7 @@ err_out_disable_msi:
|
|||
if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
|
||||
pci_disable_msi(pdev);
|
||||
|
||||
err_out_free_offload:
|
||||
if (first_driver)
|
||||
netxen_free_adapter_offload(adapter);
|
||||
|
||||
|
@ -968,6 +986,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
|
|||
netxen_free_hw_resources(adapter);
|
||||
netxen_release_rx_buffers(adapter);
|
||||
netxen_free_sw_resources(adapter);
|
||||
|
||||
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
|
||||
netxen_p3_free_mac_list(adapter);
|
||||
}
|
||||
|
||||
if (adapter->portnum == 0)
|
||||
|
@ -1137,29 +1158,64 @@ static int netxen_nic_close(struct net_device *netdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void netxen_tso_check(struct netxen_adapter *adapter,
|
||||
static bool netxen_tso_check(struct net_device *netdev,
|
||||
struct cmd_desc_type0 *desc, struct sk_buff *skb)
|
||||
{
|
||||
if (desc->mss) {
|
||||
desc->total_hdr_length = (sizeof(struct ethhdr) +
|
||||
ip_hdrlen(skb) + tcp_hdrlen(skb));
|
||||
bool tso = false;
|
||||
u8 opcode = TX_ETHER_PKT;
|
||||
|
||||
if ((NX_IS_REVISION_P3(adapter->ahw.revision_id)) &&
|
||||
(skb->protocol == htons(ETH_P_IPV6)))
|
||||
netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO6);
|
||||
else
|
||||
netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
|
||||
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
|
||||
skb_shinfo(skb)->gso_size > 0) {
|
||||
|
||||
desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
|
||||
desc->total_hdr_length =
|
||||
skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
|
||||
opcode = (skb->protocol == htons(ETH_P_IPV6)) ?
|
||||
TX_TCP_LSO6 : TX_TCP_LSO;
|
||||
tso = true;
|
||||
|
||||
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
||||
netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
|
||||
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
|
||||
netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT);
|
||||
else
|
||||
return;
|
||||
u8 l4proto;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
l4proto = ip_hdr(skb)->protocol;
|
||||
|
||||
if (l4proto == IPPROTO_TCP)
|
||||
opcode = TX_TCP_PKT;
|
||||
else if(l4proto == IPPROTO_UDP)
|
||||
opcode = TX_UDP_PKT;
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
l4proto = ipv6_hdr(skb)->nexthdr;
|
||||
|
||||
if (l4proto == IPPROTO_TCP)
|
||||
opcode = TX_TCPV6_PKT;
|
||||
else if(l4proto == IPPROTO_UDP)
|
||||
opcode = TX_UDPV6_PKT;
|
||||
}
|
||||
}
|
||||
desc->tcp_hdr_offset = skb_transport_offset(skb);
|
||||
desc->ip_hdr_offset = skb_network_offset(skb);
|
||||
netxen_set_tx_flags_opcode(desc, 0, opcode);
|
||||
return tso;
|
||||
}
|
||||
|
||||
static void
|
||||
netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
|
||||
struct netxen_cmd_buffer *pbuf, int last)
|
||||
{
|
||||
int k;
|
||||
struct netxen_skb_frag *buffrag;
|
||||
|
||||
buffrag = &pbuf->frag_array[0];
|
||||
pci_unmap_single(pdev, buffrag->dma,
|
||||
buffrag->length, PCI_DMA_TODEVICE);
|
||||
|
||||
for (k = 1; k < last; k++) {
|
||||
buffrag = &pbuf->frag_array[k];
|
||||
pci_unmap_page(pdev, buffrag->dma,
|
||||
buffrag->length, PCI_DMA_TODEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
@ -1167,33 +1223,22 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
struct netxen_adapter *adapter = netdev_priv(netdev);
|
||||
struct netxen_hardware_context *hw = &adapter->ahw;
|
||||
unsigned int first_seg_len = skb->len - skb->data_len;
|
||||
struct netxen_cmd_buffer *pbuf;
|
||||
struct netxen_skb_frag *buffrag;
|
||||
unsigned int i;
|
||||
struct cmd_desc_type0 *hwdesc;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
dma_addr_t temp_dma;
|
||||
int i, k;
|
||||
|
||||
u32 producer, consumer;
|
||||
u32 saved_producer = 0;
|
||||
struct cmd_desc_type0 *hwdesc;
|
||||
int k;
|
||||
struct netxen_cmd_buffer *pbuf = NULL;
|
||||
int frag_count;
|
||||
int no_of_desc;
|
||||
int frag_count, no_of_desc;
|
||||
u32 num_txd = adapter->max_tx_desc_count;
|
||||
bool is_tso = false;
|
||||
|
||||
frag_count = skb_shinfo(skb)->nr_frags + 1;
|
||||
|
||||
/* There 4 fragments per descriptor */
|
||||
no_of_desc = (frag_count + 3) >> 2;
|
||||
if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) {
|
||||
if (skb_shinfo(skb)->gso_size > 0) {
|
||||
|
||||
no_of_desc++;
|
||||
if ((ip_hdrlen(skb) + tcp_hdrlen(skb) +
|
||||
sizeof(struct ethhdr)) >
|
||||
(sizeof(struct cmd_desc_type0) - 2)) {
|
||||
no_of_desc++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
producer = adapter->cmd_producer;
|
||||
smp_mb();
|
||||
|
@ -1205,34 +1250,26 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
}
|
||||
|
||||
/* Copy the descriptors into the hardware */
|
||||
saved_producer = producer;
|
||||
hwdesc = &hw->cmd_desc_head[producer];
|
||||
memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
|
||||
/* Take skb->data itself */
|
||||
pbuf = &adapter->cmd_buf_arr[producer];
|
||||
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
|
||||
skb_shinfo(skb)->gso_size > 0) {
|
||||
pbuf->mss = skb_shinfo(skb)->gso_size;
|
||||
hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
|
||||
} else {
|
||||
pbuf->mss = 0;
|
||||
hwdesc->mss = 0;
|
||||
}
|
||||
pbuf->total_length = skb->len;
|
||||
pbuf->skb = skb;
|
||||
pbuf->cmd = TX_ETHER_PKT;
|
||||
pbuf->frag_count = frag_count;
|
||||
pbuf->port = adapter->portnum;
|
||||
buffrag = &pbuf->frag_array[0];
|
||||
buffrag->dma = pci_map_single(adapter->pdev, skb->data, first_seg_len,
|
||||
PCI_DMA_TODEVICE);
|
||||
buffrag->length = first_seg_len;
|
||||
netxen_set_cmd_desc_totallength(hwdesc, skb->len);
|
||||
netxen_set_cmd_desc_num_of_buff(hwdesc, frag_count);
|
||||
netxen_set_cmd_desc_opcode(hwdesc, TX_ETHER_PKT);
|
||||
|
||||
netxen_set_cmd_desc_port(hwdesc, adapter->portnum);
|
||||
netxen_set_cmd_desc_ctxid(hwdesc, adapter->portnum);
|
||||
is_tso = netxen_tso_check(netdev, hwdesc, skb);
|
||||
|
||||
pbuf->skb = skb;
|
||||
pbuf->frag_count = frag_count;
|
||||
buffrag = &pbuf->frag_array[0];
|
||||
temp_dma = pci_map_single(pdev, skb->data, first_seg_len,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (pci_dma_mapping_error(pdev, temp_dma))
|
||||
goto drop_packet;
|
||||
|
||||
buffrag->dma = temp_dma;
|
||||
buffrag->length = first_seg_len;
|
||||
netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
|
||||
netxen_set_tx_port(hwdesc, adapter->portnum);
|
||||
|
||||
hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
|
||||
hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
|
||||
|
||||
|
@ -1240,7 +1277,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
struct skb_frag_struct *frag;
|
||||
int len, temp_len;
|
||||
unsigned long offset;
|
||||
dma_addr_t temp_dma;
|
||||
|
||||
/* move to next desc. if there is a need */
|
||||
if ((i & 0x3) == 0) {
|
||||
|
@ -1256,8 +1292,12 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
offset = frag->page_offset;
|
||||
|
||||
temp_len = len;
|
||||
temp_dma = pci_map_page(adapter->pdev, frag->page, offset,
|
||||
temp_dma = pci_map_page(pdev, frag->page, offset,
|
||||
len, PCI_DMA_TODEVICE);
|
||||
if (pci_dma_mapping_error(pdev, temp_dma)) {
|
||||
netxen_clean_tx_dma_mapping(pdev, pbuf, i);
|
||||
goto drop_packet;
|
||||
}
|
||||
|
||||
buffrag++;
|
||||
buffrag->dma = temp_dma;
|
||||
|
@ -1285,16 +1325,12 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
}
|
||||
producer = get_next_index(producer, num_txd);
|
||||
|
||||
/* might change opcode to TX_TCP_LSO */
|
||||
netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb);
|
||||
|
||||
/* For LSO, we need to copy the MAC/IP/TCP headers into
|
||||
* the descriptor ring
|
||||
*/
|
||||
if (netxen_get_cmd_desc_opcode(&hw->cmd_desc_head[saved_producer])
|
||||
== TX_TCP_LSO) {
|
||||
if (is_tso) {
|
||||
int hdr_len, first_hdr_len, more_hdr;
|
||||
hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length;
|
||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
|
||||
first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
|
||||
more_hdr = 1;
|
||||
|
@ -1336,6 +1372,11 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
netdev->trans_start = jiffies;
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
drop_packet:
|
||||
adapter->stats.txdropped++;
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static int netxen_nic_check_temp(struct netxen_adapter *adapter)
|
||||
|
@ -1407,6 +1448,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
|
|||
netif_carrier_off(netdev);
|
||||
netif_stop_queue(netdev);
|
||||
}
|
||||
|
||||
netxen_nic_set_link_parameters(adapter);
|
||||
} else if (!adapter->ahw.linkup && linkup) {
|
||||
printk(KERN_INFO "%s: %s NIC Link is up\n",
|
||||
netxen_nic_driver_name, netdev->name);
|
||||
|
@ -1415,6 +1458,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
|
|||
netif_carrier_on(netdev);
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
netxen_nic_set_link_parameters(adapter);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -231,15 +231,6 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
|
|||
if ((phy_id & 0x1fffffff) == 0x1fffffff)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Broken hardware is sometimes missing the pull-up resistor on the
|
||||
* MDIO line, which results in reads to non-existent devices returning
|
||||
* 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
|
||||
* device as well.
|
||||
*/
|
||||
if (phy_id == 0)
|
||||
return NULL;
|
||||
|
||||
dev = phy_device_create(bus, addr, phy_id);
|
||||
|
||||
return dev;
|
||||
|
|
|
@ -250,6 +250,7 @@ static int ppp_connect_channel(struct channel *pch, int unit);
|
|||
static int ppp_disconnect_channel(struct channel *pch);
|
||||
static void ppp_destroy_channel(struct channel *pch);
|
||||
static int unit_get(struct idr *p, void *ptr);
|
||||
static int unit_set(struct idr *p, void *ptr, int n);
|
||||
static void unit_put(struct idr *p, int n);
|
||||
static void *unit_find(struct idr *p, int n);
|
||||
|
||||
|
@ -2432,11 +2433,18 @@ ppp_create_interface(int unit, int *retp)
|
|||
} else {
|
||||
if (unit_find(&ppp_units_idr, unit))
|
||||
goto out2; /* unit already exists */
|
||||
else {
|
||||
/* darn, someone is cheating us? */
|
||||
*retp = -EINVAL;
|
||||
/*
|
||||
* if caller need a specified unit number
|
||||
* lets try to satisfy him, otherwise --
|
||||
* he should better ask us for new unit number
|
||||
*
|
||||
* NOTE: yes I know that returning EEXIST it's not
|
||||
* fair but at least pppd will ask us to allocate
|
||||
* new unit in this case so user is happy :)
|
||||
*/
|
||||
unit = unit_set(&ppp_units_idr, ppp, unit);
|
||||
if (unit < 0)
|
||||
goto out2;
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize the new ppp unit */
|
||||
|
@ -2677,14 +2685,37 @@ static void __exit ppp_cleanup(void)
|
|||
* by holding all_ppp_mutex
|
||||
*/
|
||||
|
||||
/* associate pointer with specified number */
|
||||
static int unit_set(struct idr *p, void *ptr, int n)
|
||||
{
|
||||
int unit, err;
|
||||
|
||||
again:
|
||||
if (!idr_pre_get(p, GFP_KERNEL)) {
|
||||
printk(KERN_ERR "PPP: No free memory for idr\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = idr_get_new_above(p, ptr, n, &unit);
|
||||
if (err == -EAGAIN)
|
||||
goto again;
|
||||
|
||||
if (unit != n) {
|
||||
idr_remove(p, unit);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return unit;
|
||||
}
|
||||
|
||||
/* get new free unit number and associate pointer with it */
|
||||
static int unit_get(struct idr *p, void *ptr)
|
||||
{
|
||||
int unit, err;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(p, GFP_KERNEL) == 0) {
|
||||
printk(KERN_ERR "Out of memory expanding drawable idr\n");
|
||||
if (!idr_pre_get(p, GFP_KERNEL)) {
|
||||
printk(KERN_ERR "PPP: No free memory for idr\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -509,10 +509,10 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
|
|||
else
|
||||
ret = sis900_get_mac_addr(pci_dev, net_dev);
|
||||
|
||||
if (ret == 0) {
|
||||
printk(KERN_WARNING "%s: Cannot read MAC address.\n", dev_name);
|
||||
ret = -ENODEV;
|
||||
goto err_unmap_rx;
|
||||
if (!ret || !is_valid_ether_addr(net_dev->dev_addr)) {
|
||||
random_ether_addr(net_dev->dev_addr);
|
||||
printk(KERN_WARNING "%s: Unreadable or invalid MAC address,"
|
||||
"using random generated one\n", dev_name);
|
||||
}
|
||||
|
||||
/* 630ET : set the mii access mode as software-mode */
|
||||
|
|
|
@ -1793,8 +1793,8 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
|
|||
|
||||
/* initialize */
|
||||
ctrl_req->wValue = 0;
|
||||
ctrl_req->wIndex = hso_port_to_mux(port);
|
||||
ctrl_req->wLength = size;
|
||||
ctrl_req->wIndex = cpu_to_le16(hso_port_to_mux(port));
|
||||
ctrl_req->wLength = cpu_to_le16(size);
|
||||
|
||||
if (type == USB_CDC_GET_ENCAPSULATED_RESPONSE) {
|
||||
/* Reading command */
|
||||
|
|
|
@ -622,7 +622,7 @@ static void hss_hdlc_rx_irq(void *pdev)
|
|||
printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
|
||||
#endif
|
||||
qmgr_disable_irq(queue_ids[port->id].rx);
|
||||
netif_rx_schedule(dev, &port->napi);
|
||||
netif_rx_schedule(&port->napi);
|
||||
}
|
||||
|
||||
static int hss_hdlc_poll(struct napi_struct *napi, int budget)
|
||||
|
@ -651,7 +651,7 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
|
|||
printk(KERN_DEBUG "%s: hss_hdlc_poll"
|
||||
" netif_rx_complete\n", dev->name);
|
||||
#endif
|
||||
netif_rx_complete(dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
qmgr_enable_irq(rxq);
|
||||
if (!qmgr_stat_empty(rxq) &&
|
||||
netif_rx_reschedule(napi)) {
|
||||
|
@ -1069,7 +1069,7 @@ static int hss_hdlc_open(struct net_device *dev)
|
|||
hss_start_hdlc(port);
|
||||
|
||||
/* we may already have RX data, enables IRQ */
|
||||
netif_rx_schedule(dev, &port->napi);
|
||||
netif_rx_schedule(&port->napi);
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
|
|
|
@ -111,7 +111,7 @@ config WLAN_80211
|
|||
lets you choose drivers.
|
||||
|
||||
config PCMCIA_RAYCS
|
||||
tristate "Aviator/Raytheon 2.4MHz wireless support"
|
||||
tristate "Aviator/Raytheon 2.4GHz wireless support"
|
||||
depends on PCMCIA && WLAN_80211
|
||||
select WIRELESS_EXT
|
||||
---help---
|
||||
|
|
|
@ -2644,7 +2644,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|||
if (skb_headroom(skb) < padsize) {
|
||||
ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
|
||||
" headroom to pad %d\n", hdrlen, padsize);
|
||||
return -1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
skb_push(skb, padsize);
|
||||
memmove(skb->data, skb->data+padsize, hdrlen);
|
||||
|
@ -2655,7 +2655,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|||
ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
|
||||
spin_unlock_irqrestore(&sc->txbuflock, flags);
|
||||
ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
|
||||
return -1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
|
||||
list_del(&bf->list);
|
||||
|
@ -2673,10 +2673,10 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|||
sc->txbuf_len++;
|
||||
spin_unlock_irqrestore(&sc->txbuflock, flags);
|
||||
dev_kfree_skb_any(skb);
|
||||
return 0;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -65,7 +65,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
|
|||
if (ah->ah_version == AR5K_AR5210)
|
||||
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
|
||||
else
|
||||
AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_ADHOC);
|
||||
AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
|
||||
break;
|
||||
|
||||
case NL80211_IFTYPE_AP:
|
||||
|
@ -75,7 +75,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
|
|||
if (ah->ah_version == AR5K_AR5210)
|
||||
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
|
||||
else
|
||||
AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_ADHOC);
|
||||
AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
|
||||
break;
|
||||
|
||||
case NL80211_IFTYPE_STATION:
|
||||
|
|
|
@ -73,7 +73,7 @@
|
|||
#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */
|
||||
#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer */
|
||||
#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register access */
|
||||
#define AR5K_CFG_ADHOC 0x00000020 /* AP/Adhoc indication [5211+] */
|
||||
#define AR5K_CFG_IBSS 0x00000020 /* 0-BSS, 1-IBSS [5211+] */
|
||||
#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */
|
||||
#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */
|
||||
#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (Disable dynamic clock) */
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
config ATH9K
|
||||
tristate "Atheros 802.11n wireless cards support"
|
||||
depends on PCI && MAC80211 && WLAN_80211
|
||||
depends on RFKILL || RFKILL=n
|
||||
select MAC80211_LEDS
|
||||
select LEDS_CLASS
|
||||
select NEW_LEDS
|
||||
|
|
|
@ -2164,13 +2164,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
conf->ht.channel_type);
|
||||
}
|
||||
|
||||
ath_update_chainmask(sc, conf->ht.enabled);
|
||||
|
||||
if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0) {
|
||||
DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
|
||||
mutex_unlock(&sc->mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ath_update_chainmask(sc, conf->ht.enabled);
|
||||
}
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_POWER)
|
||||
|
|
|
@ -126,15 +126,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
|
|||
tx_info->flags |= IEEE80211_TX_STAT_ACK;
|
||||
}
|
||||
|
||||
tx_info->status.rates[0].count = tx_status->retries;
|
||||
if (tx_info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
|
||||
/* Change idx from internal table index to MCS index */
|
||||
int idx = tx_info->status.rates[0].idx;
|
||||
struct ath_rate_table *rate_table = sc->cur_rate_table;
|
||||
if (idx >= 0 && idx < rate_table->rate_cnt)
|
||||
tx_info->status.rates[0].idx =
|
||||
rate_table->info[idx].ratecode & 0x7f;
|
||||
}
|
||||
tx_info->status.rates[0].count = tx_status->retries + 1;
|
||||
|
||||
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
|
||||
padsize = hdrlen & 3;
|
||||
|
@ -264,25 +256,22 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* Get seqno */
|
||||
|
||||
if (ieee80211_is_data(fc) && !is_pae(skb)) {
|
||||
/* For HT capable stations, we save tidno for later use.
|
||||
* We also override seqno set by upper layer with the one
|
||||
* in tx aggregation state.
|
||||
*
|
||||
* If fragmentation is on, the sequence number is
|
||||
* not overridden, since it has been
|
||||
* incremented by the fragmentation routine.
|
||||
*
|
||||
* FIXME: check if the fragmentation threshold exceeds
|
||||
* IEEE80211 max.
|
||||
*/
|
||||
tid = ATH_AN_2_TID(an, bf->bf_tidno);
|
||||
hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
|
||||
IEEE80211_SEQ_SEQ_SHIFT);
|
||||
bf->bf_seqno = tid->seq_next;
|
||||
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
|
||||
}
|
||||
/* For HT capable stations, we save tidno for later use.
|
||||
* We also override seqno set by upper layer with the one
|
||||
* in tx aggregation state.
|
||||
*
|
||||
* If fragmentation is on, the sequence number is
|
||||
* not overridden, since it has been
|
||||
* incremented by the fragmentation routine.
|
||||
*
|
||||
* FIXME: check if the fragmentation threshold exceeds
|
||||
* IEEE80211 max.
|
||||
*/
|
||||
tid = ATH_AN_2_TID(an, bf->bf_tidno);
|
||||
hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
|
||||
IEEE80211_SEQ_SEQ_SHIFT);
|
||||
bf->bf_seqno = tid->seq_next;
|
||||
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
|
||||
}
|
||||
|
||||
static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
|
||||
|
@ -1718,11 +1707,10 @@ static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
|
|||
|
||||
/* Assign seqno, tidno */
|
||||
|
||||
if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
|
||||
if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
|
||||
assign_aggr_tid_seqno(skb, bf);
|
||||
|
||||
/* DMA setup */
|
||||
|
||||
bf->bf_mpdu = skb;
|
||||
|
||||
bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
|
||||
|
|
|
@ -3261,7 +3261,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
|
|||
struct b43_wldev *down_dev;
|
||||
struct b43_wldev *d;
|
||||
int err;
|
||||
bool gmode;
|
||||
bool uninitialized_var(gmode);
|
||||
int prev_status;
|
||||
|
||||
/* Find a device and PHY which supports the band. */
|
||||
|
|
|
@ -2465,7 +2465,7 @@ static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev)
|
|||
static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
|
||||
unsigned int new_mode)
|
||||
{
|
||||
struct b43legacy_wldev *up_dev;
|
||||
struct b43legacy_wldev *uninitialized_var(up_dev);
|
||||
struct b43legacy_wldev *down_dev;
|
||||
int err;
|
||||
bool gmode = 0;
|
||||
|
|
|
@ -2219,7 +2219,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl3945_priv *priv)
|
|||
/* set tx power value for all OFDM rates */
|
||||
for (rate_index = 0; rate_index < IWL_OFDM_RATES;
|
||||
rate_index++) {
|
||||
s32 power_idx;
|
||||
s32 uninitialized_var(power_idx);
|
||||
int rc;
|
||||
|
||||
/* use channel group's clip-power table,
|
||||
|
|
|
@ -255,7 +255,7 @@ struct iwl_cmd_header {
|
|||
* 0x3) 54 Mbps
|
||||
*
|
||||
* Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
|
||||
* 3-0: 10) 1 Mbps
|
||||
* 6-0: 10) 1 Mbps
|
||||
* 20) 2 Mbps
|
||||
* 55) 5.5 Mbps
|
||||
* 110) 11 Mbps
|
||||
|
|
|
@ -51,6 +51,7 @@ const char *get_cmd_string(u8 cmd)
|
|||
IWL_CMD(REPLY_REMOVE_STA);
|
||||
IWL_CMD(REPLY_REMOVE_ALL_STA);
|
||||
IWL_CMD(REPLY_WEPKEY);
|
||||
IWL_CMD(REPLY_3945_RX);
|
||||
IWL_CMD(REPLY_TX);
|
||||
IWL_CMD(REPLY_RATE_SCALE);
|
||||
IWL_CMD(REPLY_LEDS_CMD);
|
||||
|
|
|
@ -206,7 +206,7 @@ static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|||
* there are no buffered multicast frames to send
|
||||
*/
|
||||
ieee80211_stop_queues(priv->hw);
|
||||
return 0;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void lbtf_tx_work(struct work_struct *work)
|
||||
|
|
|
@ -1610,6 +1610,16 @@ static void orinoco_rx_isr_tasklet(unsigned long data)
|
|||
struct orinoco_rx_data *rx_data, *temp;
|
||||
struct hermes_rx_descriptor *desc;
|
||||
struct sk_buff *skb;
|
||||
unsigned long flags;
|
||||
|
||||
/* orinoco_rx requires the driver lock, and we also need to
|
||||
* protect priv->rx_list, so just hold the lock over the
|
||||
* lot.
|
||||
*
|
||||
* If orinoco_lock fails, we've unplugged the card. In this
|
||||
* case just abort. */
|
||||
if (orinoco_lock(priv, &flags) != 0)
|
||||
return;
|
||||
|
||||
/* extract desc and skb from queue */
|
||||
list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
|
||||
|
@ -1622,6 +1632,8 @@ static void orinoco_rx_isr_tasklet(unsigned long data)
|
|||
|
||||
kfree(desc);
|
||||
}
|
||||
|
||||
orinoco_unlock(priv, &flags);
|
||||
}
|
||||
|
||||
/********************************************************************/
|
||||
|
@ -3645,12 +3657,22 @@ struct net_device
|
|||
void free_orinocodev(struct net_device *dev)
|
||||
{
|
||||
struct orinoco_private *priv = netdev_priv(dev);
|
||||
struct orinoco_rx_data *rx_data, *temp;
|
||||
|
||||
/* No need to empty priv->rx_list: if the tasklet is scheduled
|
||||
* when we call tasklet_kill it will run one final time,
|
||||
* emptying the list */
|
||||
/* If the tasklet is scheduled when we call tasklet_kill it
|
||||
* will run one final time. However the tasklet will only
|
||||
* drain priv->rx_list if the hw is still available. */
|
||||
tasklet_kill(&priv->rx_tasklet);
|
||||
|
||||
/* Explicitly drain priv->rx_list */
|
||||
list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
|
||||
list_del(&rx_data->list);
|
||||
|
||||
dev_kfree_skb(rx_data->skb);
|
||||
kfree(rx_data->desc);
|
||||
kfree(rx_data);
|
||||
}
|
||||
|
||||
unregister_pm_notifier(&priv->pm_notifier);
|
||||
orinoco_uncache_fw(priv);
|
||||
|
||||
|
|
|
@ -435,6 +435,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
|
|||
PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
|
||||
|
|
|
@ -138,6 +138,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
|
|||
u8 *fw_version = NULL;
|
||||
size_t len;
|
||||
int i;
|
||||
int maxlen;
|
||||
|
||||
if (priv->rx_start)
|
||||
return 0;
|
||||
|
@ -195,6 +196,16 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
|
|||
else
|
||||
priv->rx_mtu = (size_t)
|
||||
0x620 - priv->tx_hdr_len;
|
||||
maxlen = priv->tx_hdr_len + /* USB devices */
|
||||
sizeof(struct p54_rx_data) +
|
||||
4 + /* rx alignment */
|
||||
IEEE80211_MAX_FRAG_THRESHOLD;
|
||||
if (priv->rx_mtu > maxlen && PAGE_SIZE == 4096) {
|
||||
printk(KERN_INFO "p54: rx_mtu reduced from %d "
|
||||
"to %d\n", priv->rx_mtu,
|
||||
maxlen);
|
||||
priv->rx_mtu = maxlen;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BR_CODE_EXPOSED_IF:
|
||||
|
@ -575,6 +586,7 @@ static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||
u16 freq = le16_to_cpu(hdr->freq);
|
||||
size_t header_len = sizeof(*hdr);
|
||||
u32 tsf32;
|
||||
u8 rate = hdr->rate & 0xf;
|
||||
|
||||
/*
|
||||
* If the device is in a unspecified state we have to
|
||||
|
@ -603,8 +615,11 @@ static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||
rx_status.qual = (100 * hdr->rssi) / 127;
|
||||
if (hdr->rate & 0x10)
|
||||
rx_status.flag |= RX_FLAG_SHORTPRE;
|
||||
rx_status.rate_idx = (dev->conf.channel->band == IEEE80211_BAND_2GHZ ?
|
||||
hdr->rate : (hdr->rate - 4)) & 0xf;
|
||||
if (dev->conf.channel->band == IEEE80211_BAND_5GHZ)
|
||||
rx_status.rate_idx = (rate < 4) ? 0 : rate - 4;
|
||||
else
|
||||
rx_status.rate_idx = rate;
|
||||
|
||||
rx_status.freq = freq;
|
||||
rx_status.band = dev->conf.channel->band;
|
||||
rx_status.antenna = hdr->antenna;
|
||||
|
@ -798,6 +813,16 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
|
||||
info->status.ack_signal = p54_rssi_to_dbm(dev,
|
||||
(int)payload->ack_rssi);
|
||||
|
||||
if (entry_data->key_type == P54_CRYPTO_TKIPMICHAEL) {
|
||||
u8 *iv = (u8 *)(entry_data->align + pad +
|
||||
entry_data->crypt_offset);
|
||||
|
||||
/* Restore the original TKIP IV. */
|
||||
iv[2] = iv[0];
|
||||
iv[0] = iv[1];
|
||||
iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */
|
||||
}
|
||||
skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
|
||||
ieee80211_tx_status_irqsafe(dev, entry);
|
||||
goto out;
|
||||
|
@ -1383,7 +1408,6 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||
hdr->tries = ridx;
|
||||
txhdr->rts_rate_idx = 0;
|
||||
if (info->control.hw_key) {
|
||||
crypt_offset += info->control.hw_key->iv_len;
|
||||
txhdr->key_type = p54_convert_algo(info->control.hw_key->alg);
|
||||
txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
|
||||
memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
|
||||
|
@ -1397,6 +1421,8 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||
}
|
||||
/* reserve some space for ICV */
|
||||
len += info->control.hw_key->icv_len;
|
||||
memset(skb_put(skb, info->control.hw_key->icv_len), 0,
|
||||
info->control.hw_key->icv_len);
|
||||
} else {
|
||||
txhdr->key_type = 0;
|
||||
txhdr->key_len = 0;
|
||||
|
@ -1824,7 +1850,7 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
|
|||
|
||||
static int p54_config(struct ieee80211_hw *dev, u32 changed)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
struct p54_common *priv = dev->priv;
|
||||
struct ieee80211_conf *conf = &dev->conf;
|
||||
|
||||
|
|
|
@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
|
|||
{USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
|
||||
{USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
|
||||
{USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
|
||||
{USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
|
||||
{USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
|
||||
{USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
|
||||
{USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
|
||||
|
@ -284,6 +285,7 @@ static void p54u_tx_lm87(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||
usb_fill_bulk_urb(data_urb, priv->udev,
|
||||
usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
|
||||
skb->data, skb->len, p54u_tx_cb, skb);
|
||||
data_urb->transfer_flags |= URB_ZERO_PACKET;
|
||||
|
||||
usb_anchor_urb(data_urb, &priv->submitted);
|
||||
if (usb_submit_urb(data_urb, GFP_ATOMIC)) {
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
/*
|
||||
* Allow hardware encryption to be disabled.
|
||||
*/
|
||||
static int modparam_nohwcrypt = 1;
|
||||
static int modparam_nohwcrypt = 0;
|
||||
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
|
||||
|
||||
|
@ -376,11 +376,11 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
|
|||
|
||||
/*
|
||||
* The driver does not support the IV/EIV generation
|
||||
* in hardware. However it doesn't support the IV/EIV
|
||||
* inside the ieee80211 frame either, but requires it
|
||||
* to be provided seperately for the descriptor.
|
||||
* rt2x00lib will cut the IV/EIV data out of all frames
|
||||
* given to us by mac80211, but we must tell mac80211
|
||||
* in hardware. However it demands the data to be provided
|
||||
* both seperately as well as inside the frame.
|
||||
* We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib
|
||||
* to ensure rt2x00lib will not strip the data from the
|
||||
* frame after the copy, now we must tell mac80211
|
||||
* to generate the IV/EIV data.
|
||||
*/
|
||||
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
|
@ -1181,7 +1181,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
|
|||
test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
|
||||
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
|
||||
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
|
||||
rt2x00_set_field32(&word, TXD_W0_CIPHER, txdesc->cipher);
|
||||
rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
|
||||
rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
|
||||
rt2x00_desc_write(txd, 0, word);
|
||||
}
|
||||
|
@ -1334,14 +1334,7 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
|
|||
|
||||
/* ICV is located at the end of frame */
|
||||
|
||||
/*
|
||||
* Hardware has stripped IV/EIV data from 802.11 frame during
|
||||
* decryption. It has provided the data seperately but rt2x00lib
|
||||
* should decide if it should be reinserted.
|
||||
*/
|
||||
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
|
||||
if (rxdesc->cipher != CIPHER_TKIP)
|
||||
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
|
||||
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
|
||||
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
|
||||
rxdesc->flags |= RX_FLAG_DECRYPTED;
|
||||
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
|
||||
|
|
|
@ -807,13 +807,11 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry,
|
|||
{
|
||||
entry->flags = 0;
|
||||
entry->bitrate = rate->bitrate;
|
||||
entry->hw_value = rt2x00_create_rate_hw_value(index, 0);
|
||||
entry->hw_value_short = entry->hw_value;
|
||||
entry->hw_value =index;
|
||||
entry->hw_value_short = index;
|
||||
|
||||
if (rate->flags & DEV_RATE_SHORT_PREAMBLE) {
|
||||
if (rate->flags & DEV_RATE_SHORT_PREAMBLE)
|
||||
entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE;
|
||||
entry->hw_value_short |= rt2x00_create_rate_hw_value(index, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
|
||||
|
|
|
@ -97,7 +97,7 @@ void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled)
|
|||
|
||||
void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled)
|
||||
{
|
||||
if (rt2x00dev->led_radio.type == LED_TYPE_ASSOC)
|
||||
if (rt2x00dev->led_radio.type == LED_TYPE_RADIO)
|
||||
rt2x00led_led_simple(&rt2x00dev->led_radio, enabled);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,22 +52,11 @@ struct rt2x00_rate {
|
|||
|
||||
extern const struct rt2x00_rate rt2x00_supported_rates[12];
|
||||
|
||||
static inline u16 rt2x00_create_rate_hw_value(const u16 index,
|
||||
const u16 short_preamble)
|
||||
{
|
||||
return (short_preamble << 8) | (index & 0xff);
|
||||
}
|
||||
|
||||
static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
|
||||
{
|
||||
return &rt2x00_supported_rates[hw_value & 0xff];
|
||||
}
|
||||
|
||||
static inline int rt2x00_get_rate_preamble(const u16 hw_value)
|
||||
{
|
||||
return (hw_value & 0xff00);
|
||||
}
|
||||
|
||||
/*
|
||||
* Radio control handlers.
|
||||
*/
|
||||
|
|
|
@ -313,7 +313,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
|
|||
* When preamble is enabled we should set the
|
||||
* preamble bit for the signal.
|
||||
*/
|
||||
if (rt2x00_get_rate_preamble(rate->hw_value))
|
||||
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
|
||||
txdesc->signal |= 0x08;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -434,11 +434,11 @@ static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
|
|||
|
||||
if (usb_endpoint_is_bulk_in(ep_desc)) {
|
||||
rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
|
||||
} else if (usb_endpoint_is_bulk_out(ep_desc)) {
|
||||
} else if (usb_endpoint_is_bulk_out(ep_desc) &&
|
||||
(queue != queue_end(rt2x00dev))) {
|
||||
rt2x00usb_assign_endpoint(queue, ep_desc);
|
||||
queue = queue_next(queue);
|
||||
|
||||
if (queue != queue_end(rt2x00dev))
|
||||
queue = queue_next(queue);
|
||||
tx_ep_desc = ep_desc;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2321,6 +2321,7 @@ static struct usb_device_id rt73usb_device_table[] = {
|
|||
/* Linksys */
|
||||
{ USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) },
|
||||
{ USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
|
||||
{ USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
|
||||
/* MSI */
|
||||
{ USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
|
||||
{ USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
|
||||
|
|
|
@ -897,6 +897,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
|
|||
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
|
||||
IEEE80211_HW_RX_INCLUDES_FCS |
|
||||
IEEE80211_HW_SIGNAL_UNSPEC;
|
||||
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
|
||||
dev->queues = 1;
|
||||
dev->max_signal = 65;
|
||||
|
||||
|
|
|
@ -213,7 +213,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||
urb = usb_alloc_urb(0, GFP_ATOMIC);
|
||||
if (!urb) {
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
flags = skb->len;
|
||||
|
@ -281,7 +281,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||
}
|
||||
usb_free_urb(urb);
|
||||
|
||||
return rc;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void rtl8187_rx_cb(struct urb *urb)
|
||||
|
@ -1471,6 +1471,7 @@ static void __devexit rtl8187_disconnect(struct usb_interface *intf)
|
|||
ieee80211_unregister_hw(dev);
|
||||
|
||||
priv = dev->priv;
|
||||
usb_reset_device(priv->udev);
|
||||
usb_put_dev(interface_to_usbdev(intf));
|
||||
ieee80211_free_hw(dev);
|
||||
}
|
||||
|
|
|
@ -467,7 +467,7 @@ struct netdev_queue {
|
|||
* This function is called when network device transistions to the down
|
||||
* state.
|
||||
*
|
||||
* int (*ndo_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev);
|
||||
* int (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev);
|
||||
* Called when a packet needs to be transmitted.
|
||||
* Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
|
||||
* Required can not be NULL.
|
||||
|
@ -795,6 +795,7 @@ struct net_device
|
|||
NETREG_UNREGISTERING, /* called unregister_netdevice */
|
||||
NETREG_UNREGISTERED, /* completed unregister todo */
|
||||
NETREG_RELEASED, /* called free_netdev */
|
||||
NETREG_DUMMY, /* dummy device for NAPI poll */
|
||||
} reg_state;
|
||||
|
||||
/* Called from unregister, can be used to call free_netdev */
|
||||
|
@ -1077,6 +1078,8 @@ extern void free_netdev(struct net_device *dev);
|
|||
extern void synchronize_net(void);
|
||||
extern int register_netdevice_notifier(struct notifier_block *nb);
|
||||
extern int unregister_netdevice_notifier(struct notifier_block *nb);
|
||||
extern int init_dummy_netdev(struct net_device *dev);
|
||||
|
||||
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
|
||||
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
|
||||
extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
|
||||
|
|
|
@ -270,6 +270,7 @@ struct xt_match
|
|||
struct list_head list;
|
||||
|
||||
const char name[XT_FUNCTION_MAXNAMELEN-1];
|
||||
u_int8_t revision;
|
||||
|
||||
/* Return true or false: return FALSE and set *hotdrop = 1 to
|
||||
force immediate packet drop. */
|
||||
|
@ -302,7 +303,6 @@ struct xt_match
|
|||
unsigned short proto;
|
||||
|
||||
unsigned short family;
|
||||
u_int8_t revision;
|
||||
};
|
||||
|
||||
/* Registration hooks for targets. */
|
||||
|
|
|
@ -58,11 +58,11 @@ static struct ctl_table_header *brnf_sysctl_header;
|
|||
static int brnf_call_iptables __read_mostly = 1;
|
||||
static int brnf_call_ip6tables __read_mostly = 1;
|
||||
static int brnf_call_arptables __read_mostly = 1;
|
||||
static int brnf_filter_vlan_tagged __read_mostly = 1;
|
||||
static int brnf_filter_pppoe_tagged __read_mostly = 1;
|
||||
static int brnf_filter_vlan_tagged __read_mostly = 0;
|
||||
static int brnf_filter_pppoe_tagged __read_mostly = 0;
|
||||
#else
|
||||
#define brnf_filter_vlan_tagged 1
|
||||
#define brnf_filter_pppoe_tagged 1
|
||||
#define brnf_filter_vlan_tagged 0
|
||||
#define brnf_filter_pppoe_tagged 0
|
||||
#endif
|
||||
|
||||
static inline __be16 vlan_proto(const struct sk_buff *skb)
|
||||
|
@ -686,8 +686,11 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
|
|||
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
|
||||
IS_PPPOE_IP(skb))
|
||||
pf = PF_INET;
|
||||
else
|
||||
else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
|
||||
IS_PPPOE_IPV6(skb))
|
||||
pf = PF_INET6;
|
||||
else
|
||||
return NF_ACCEPT;
|
||||
|
||||
nf_bridge_pull_encap_header(skb);
|
||||
|
||||
|
@ -828,8 +831,11 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
|
|||
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
|
||||
IS_PPPOE_IP(skb))
|
||||
pf = PF_INET;
|
||||
else
|
||||
else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
|
||||
IS_PPPOE_IPV6(skb))
|
||||
pf = PF_INET6;
|
||||
else
|
||||
return NF_ACCEPT;
|
||||
|
||||
#ifdef CONFIG_NETFILTER_DEBUG
|
||||
if (skb->dst == NULL) {
|
||||
|
|
|
@ -79,7 +79,7 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
|
|||
{
|
||||
par->match = m->u.match;
|
||||
par->matchinfo = m->data;
|
||||
return m->u.match->match(skb, par);
|
||||
return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
|
||||
}
|
||||
|
||||
static inline int ebt_dev_check(char *entry, const struct net_device *device)
|
||||
|
|
|
@ -347,16 +347,42 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
|
|||
struct bcm_op *op = (struct bcm_op *)data;
|
||||
struct bcm_msg_head msg_head;
|
||||
|
||||
/* create notification to user */
|
||||
msg_head.opcode = TX_EXPIRED;
|
||||
msg_head.flags = op->flags;
|
||||
msg_head.count = op->count;
|
||||
msg_head.ival1 = op->ival1;
|
||||
msg_head.ival2 = op->ival2;
|
||||
msg_head.can_id = op->can_id;
|
||||
msg_head.nframes = 0;
|
||||
if (op->kt_ival1.tv64 && (op->count > 0)) {
|
||||
|
||||
bcm_send_to_user(op, &msg_head, NULL, 0);
|
||||
op->count--;
|
||||
if (!op->count && (op->flags & TX_COUNTEVT)) {
|
||||
|
||||
/* create notification to user */
|
||||
msg_head.opcode = TX_EXPIRED;
|
||||
msg_head.flags = op->flags;
|
||||
msg_head.count = op->count;
|
||||
msg_head.ival1 = op->ival1;
|
||||
msg_head.ival2 = op->ival2;
|
||||
msg_head.can_id = op->can_id;
|
||||
msg_head.nframes = 0;
|
||||
|
||||
bcm_send_to_user(op, &msg_head, NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (op->kt_ival1.tv64 && (op->count > 0)) {
|
||||
|
||||
/* send (next) frame */
|
||||
bcm_can_tx(op);
|
||||
hrtimer_start(&op->timer,
|
||||
ktime_add(ktime_get(), op->kt_ival1),
|
||||
HRTIMER_MODE_ABS);
|
||||
|
||||
} else {
|
||||
if (op->kt_ival2.tv64) {
|
||||
|
||||
/* send (next) frame */
|
||||
bcm_can_tx(op);
|
||||
hrtimer_start(&op->timer,
|
||||
ktime_add(ktime_get(), op->kt_ival2),
|
||||
HRTIMER_MODE_ABS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -365,33 +391,10 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
|
|||
static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
|
||||
{
|
||||
struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
|
||||
enum hrtimer_restart ret = HRTIMER_NORESTART;
|
||||
|
||||
if (op->kt_ival1.tv64 && (op->count > 0)) {
|
||||
tasklet_schedule(&op->tsklet);
|
||||
|
||||
op->count--;
|
||||
if (!op->count && (op->flags & TX_COUNTEVT))
|
||||
tasklet_schedule(&op->tsklet);
|
||||
}
|
||||
|
||||
if (op->kt_ival1.tv64 && (op->count > 0)) {
|
||||
|
||||
/* send (next) frame */
|
||||
bcm_can_tx(op);
|
||||
hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1);
|
||||
ret = HRTIMER_RESTART;
|
||||
|
||||
} else {
|
||||
if (op->kt_ival2.tv64) {
|
||||
|
||||
/* send (next) frame */
|
||||
bcm_can_tx(op);
|
||||
hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
|
||||
ret = HRTIMER_RESTART;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2392,6 +2392,9 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
|
|||
if (!(skb->dev->features & NETIF_F_GRO))
|
||||
goto normal;
|
||||
|
||||
if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
|
||||
goto normal;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ptype, head, list) {
|
||||
struct sk_buff *p;
|
||||
|
@ -2488,12 +2491,6 @@ EXPORT_SYMBOL(napi_gro_receive);
|
|||
|
||||
void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
|
||||
{
|
||||
skb_shinfo(skb)->nr_frags = 0;
|
||||
|
||||
skb->len -= skb->data_len;
|
||||
skb->truesize -= skb->data_len;
|
||||
skb->data_len = 0;
|
||||
|
||||
__skb_pull(skb, skb_headlen(skb));
|
||||
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
|
||||
|
||||
|
@ -4433,6 +4430,45 @@ err_uninit:
|
|||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_dummy_netdev - init a dummy network device for NAPI
|
||||
* @dev: device to init
|
||||
*
|
||||
* This takes a network device structure and initialize the minimum
|
||||
* amount of fields so it can be used to schedule NAPI polls without
|
||||
* registering a full blown interface. This is to be used by drivers
|
||||
* that need to tie several hardware interfaces to a single NAPI
|
||||
* poll scheduler due to HW limitations.
|
||||
*/
|
||||
int init_dummy_netdev(struct net_device *dev)
|
||||
{
|
||||
/* Clear everything. Note we don't initialize spinlocks
|
||||
* are they aren't supposed to be taken by any of the
|
||||
* NAPI code and this dummy netdev is supposed to be
|
||||
* only ever used for NAPI polls
|
||||
*/
|
||||
memset(dev, 0, sizeof(struct net_device));
|
||||
|
||||
/* make sure we BUG if trying to hit standard
|
||||
* register/unregister code path
|
||||
*/
|
||||
dev->reg_state = NETREG_DUMMY;
|
||||
|
||||
/* initialize the ref count */
|
||||
atomic_set(&dev->refcnt, 1);
|
||||
|
||||
/* NAPI wants this */
|
||||
INIT_LIST_HEAD(&dev->napi_list);
|
||||
|
||||
/* a dummy interface is started by default */
|
||||
set_bit(__LINK_STATE_PRESENT, &dev->state);
|
||||
set_bit(__LINK_STATE_START, &dev->state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(init_dummy_netdev);
|
||||
|
||||
|
||||
/**
|
||||
* register_netdev - register a network device
|
||||
* @dev: device to register
|
||||
|
|
|
@ -2602,6 +2602,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
|
|||
skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
|
||||
|
||||
skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
|
||||
skb_shinfo(skb)->nr_frags = 0;
|
||||
|
||||
skb->truesize -= skb->data_len;
|
||||
skb->len -= skb->data_len;
|
||||
skb->data_len = 0;
|
||||
|
||||
NAPI_GRO_CB(skb)->free = 1;
|
||||
goto done;
|
||||
}
|
||||
|
|
|
@ -93,13 +93,8 @@ ipt_local_out_hook(unsigned int hook,
|
|||
{
|
||||
/* root is playing with raw sockets. */
|
||||
if (skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr)) {
|
||||
if (net_ratelimit())
|
||||
printk("iptable_filter: ignoring short SOCK_RAW "
|
||||
"packet.\n");
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr))
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv4.iptable_filter);
|
||||
}
|
||||
|
|
|
@ -132,12 +132,8 @@ ipt_local_hook(unsigned int hook,
|
|||
|
||||
/* root is playing with raw sockets. */
|
||||
if (skb->len < sizeof(struct iphdr)
|
||||
|| ip_hdrlen(skb) < sizeof(struct iphdr)) {
|
||||
if (net_ratelimit())
|
||||
printk("iptable_mangle: ignoring short SOCK_RAW "
|
||||
"packet.\n");
|
||||
|| ip_hdrlen(skb) < sizeof(struct iphdr))
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
/* Save things which could affect route */
|
||||
mark = skb->mark;
|
||||
|
|
|
@ -65,12 +65,8 @@ ipt_local_hook(unsigned int hook,
|
|||
{
|
||||
/* root is playing with raw sockets. */
|
||||
if (skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr)) {
|
||||
if (net_ratelimit())
|
||||
printk("iptable_raw: ignoring short SOCK_RAW "
|
||||
"packet.\n");
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr))
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv4.iptable_raw);
|
||||
}
|
||||
|
|
|
@ -96,12 +96,8 @@ ipt_local_out_hook(unsigned int hook,
|
|||
{
|
||||
/* Somebody is playing with raw sockets. */
|
||||
if (skb->len < sizeof(struct iphdr)
|
||||
|| ip_hdrlen(skb) < sizeof(struct iphdr)) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_INFO "iptable_security: ignoring short "
|
||||
"SOCK_RAW packet.\n");
|
||||
|| ip_hdrlen(skb) < sizeof(struct iphdr))
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv4.iptable_security);
|
||||
}
|
||||
|
|
|
@ -145,11 +145,8 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum,
|
|||
{
|
||||
/* root is playing with raw sockets. */
|
||||
if (skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr)) {
|
||||
if (net_ratelimit())
|
||||
printk("ipt_hook: happy cracking.\n");
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr))
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
|
||||
static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
|
||||
static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
|
||||
|
||||
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
|
|
|
@ -522,8 +522,12 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
|
|||
unsigned int offset, size_t len)
|
||||
{
|
||||
struct tcp_splice_state *tss = rd_desc->arg.data;
|
||||
int ret;
|
||||
|
||||
return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
|
||||
ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags);
|
||||
if (ret > 0)
|
||||
rd_desc->count -= ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
|
||||
|
@ -531,6 +535,7 @@ static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
|
|||
/* Store TCP splice context information in read_descriptor_t. */
|
||||
read_descriptor_t rd_desc = {
|
||||
.arg.data = tss,
|
||||
.count = tss->len,
|
||||
};
|
||||
|
||||
return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
|
||||
|
@ -611,11 +616,13 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
|
|||
tss.len -= ret;
|
||||
spliced += ret;
|
||||
|
||||
if (!timeo)
|
||||
break;
|
||||
release_sock(sk);
|
||||
lock_sock(sk);
|
||||
|
||||
if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
|
||||
(sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
|
||||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
|
||||
signal_pending(current))
|
||||
break;
|
||||
}
|
||||
|
@ -2382,7 +2389,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
|
|||
unsigned int seq;
|
||||
__be32 delta;
|
||||
unsigned int oldlen;
|
||||
unsigned int len;
|
||||
unsigned int mss;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(*th)))
|
||||
goto out;
|
||||
|
@ -2398,10 +2405,13 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
|
|||
oldlen = (u16)~skb->len;
|
||||
__skb_pull(skb, thlen);
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
if (unlikely(skb->len <= mss))
|
||||
goto out;
|
||||
|
||||
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
|
||||
/* Packet is from an untrusted source, reset gso_segs. */
|
||||
int type = skb_shinfo(skb)->gso_type;
|
||||
int mss;
|
||||
|
||||
if (unlikely(type &
|
||||
~(SKB_GSO_TCPV4 |
|
||||
|
@ -2412,7 +2422,6 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
|
|||
!(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
|
||||
goto out;
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
|
||||
|
||||
segs = NULL;
|
||||
|
@ -2423,8 +2432,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
|
|||
if (IS_ERR(segs))
|
||||
goto out;
|
||||
|
||||
len = skb_shinfo(skb)->gso_size;
|
||||
delta = htonl(oldlen + (thlen + len));
|
||||
delta = htonl(oldlen + (thlen + mss));
|
||||
|
||||
skb = segs;
|
||||
th = tcp_hdr(skb);
|
||||
|
@ -2440,7 +2448,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
|
|||
csum_fold(csum_partial(skb_transport_header(skb),
|
||||
thlen, skb->csum));
|
||||
|
||||
seq += len;
|
||||
seq += mss;
|
||||
skb = skb->next;
|
||||
th = tcp_hdr(skb);
|
||||
|
||||
|
|
|
@ -298,6 +298,10 @@ static void fib6_dump_end(struct netlink_callback *cb)
|
|||
struct fib6_walker_t *w = (void*)cb->args[2];
|
||||
|
||||
if (w) {
|
||||
if (cb->args[4]) {
|
||||
cb->args[4] = 0;
|
||||
fib6_walker_unlink(w);
|
||||
}
|
||||
cb->args[2] = 0;
|
||||
kfree(w);
|
||||
}
|
||||
|
@ -330,15 +334,12 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
|
|||
read_lock_bh(&table->tb6_lock);
|
||||
res = fib6_walk_continue(w);
|
||||
read_unlock_bh(&table->tb6_lock);
|
||||
if (res != 0) {
|
||||
if (res < 0)
|
||||
fib6_walker_unlink(w);
|
||||
goto end;
|
||||
if (res <= 0) {
|
||||
fib6_walker_unlink(w);
|
||||
cb->args[4] = 0;
|
||||
}
|
||||
fib6_walker_unlink(w);
|
||||
cb->args[4] = 0;
|
||||
}
|
||||
end:
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
|
||||
static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
|
||||
static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
|
||||
|
||||
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
|
|
|
@ -469,7 +469,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
|
|||
struct ieee80211_sub_if_data *sdata;
|
||||
u16 start_seq_num;
|
||||
u8 *state;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -699,7 +699,8 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
|
|||
return 0;
|
||||
|
||||
/* Setting ad-hoc mode on non-IBSS channel is not supported. */
|
||||
if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)
|
||||
if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS &&
|
||||
type == NL80211_IFTYPE_ADHOC)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/*
|
||||
|
|
|
@ -107,6 +107,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
sta->flags = WLAN_STA_AUTHORIZED;
|
||||
sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
|
||||
rate_control_rate_init(sta);
|
||||
|
||||
return sta;
|
||||
}
|
||||
|
|
|
@ -395,13 +395,15 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
|
|||
{
|
||||
struct minstrel_sta_info *mi = priv_sta;
|
||||
struct minstrel_priv *mp = priv;
|
||||
struct minstrel_rate *mr_ctl;
|
||||
struct ieee80211_local *local = hw_to_local(mp->hw);
|
||||
struct ieee80211_rate *ctl_rate;
|
||||
unsigned int i, n = 0;
|
||||
unsigned int t_slot = 9; /* FIXME: get real slot time */
|
||||
|
||||
mi->lowest_rix = rate_lowest_index(sband, sta);
|
||||
mr_ctl = &mi->r[rix_to_ndx(mi, mi->lowest_rix)];
|
||||
mi->sp_ack_dur = mr_ctl->ack_time;
|
||||
ctl_rate = &sband->bitrates[mi->lowest_rix];
|
||||
mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate,
|
||||
!!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
|
||||
|
||||
for (i = 0; i < sband->n_bitrates; i++) {
|
||||
struct minstrel_rate *mr = &mi->r[n];
|
||||
|
@ -416,7 +418,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
|
|||
|
||||
mr->rix = i;
|
||||
mr->bitrate = sband->bitrates[i].bitrate / 5;
|
||||
calc_rate_durations(mi, hw_to_local(mp->hw), mr,
|
||||
calc_rate_durations(mi, local, mr,
|
||||
&sband->bitrates[i]);
|
||||
|
||||
/* calculate maximum number of retransmissions before
|
||||
|
|
|
@ -469,7 +469,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
|
|||
const struct nf_conntrack_tuple *repl,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct nf_conn *ct = NULL;
|
||||
struct nf_conn *ct;
|
||||
|
||||
if (unlikely(!nf_conntrack_hash_rnd_initted)) {
|
||||
get_random_bytes(&nf_conntrack_hash_rnd, 4);
|
||||
|
@ -551,7 +551,7 @@ init_conntrack(struct net *net,
|
|||
}
|
||||
|
||||
ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
|
||||
if (ct == NULL || IS_ERR(ct)) {
|
||||
if (IS_ERR(ct)) {
|
||||
pr_debug("Can't allocate conntrack.\n");
|
||||
return (struct nf_conntrack_tuple_hash *)ct;
|
||||
}
|
||||
|
|
|
@ -1134,7 +1134,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
|
|||
struct nf_conntrack_helper *helper;
|
||||
|
||||
ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
|
||||
if (ct == NULL || IS_ERR(ct))
|
||||
if (IS_ERR(ct))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!cda[CTA_TIMEOUT])
|
||||
|
|
|
@ -273,6 +273,10 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
|
|||
have_rev = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (af != NFPROTO_UNSPEC && !have_rev)
|
||||
return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
|
||||
|
||||
return have_rev;
|
||||
}
|
||||
|
||||
|
@ -289,6 +293,10 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
|
|||
have_rev = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (af != NFPROTO_UNSPEC && !have_rev)
|
||||
return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
|
||||
|
||||
return have_rev;
|
||||
}
|
||||
|
||||
|
|
|
@ -243,6 +243,17 @@ static struct xt_match xt_time_mt_reg __read_mostly = {
|
|||
|
||||
static int __init time_mt_init(void)
|
||||
{
|
||||
int minutes = sys_tz.tz_minuteswest;
|
||||
|
||||
if (minutes < 0) /* east of Greenwich */
|
||||
printk(KERN_INFO KBUILD_MODNAME
|
||||
": kernel timezone is +%02d%02d\n",
|
||||
-minutes / 60, -minutes % 60);
|
||||
else /* west of Greenwich */
|
||||
printk(KERN_INFO KBUILD_MODNAME
|
||||
": kernel timezone is -%02d%02d\n",
|
||||
minutes / 60, minutes % 60);
|
||||
|
||||
return xt_register_match(&xt_time_mt_reg);
|
||||
}
|
||||
|
||||
|
|
|
@ -661,12 +661,13 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
|
|||
* next pending event (0 for no event in pq).
|
||||
* Note: Applied are events whose have cl->pq_key <= q->now.
|
||||
*/
|
||||
static psched_time_t htb_do_events(struct htb_sched *q, int level)
|
||||
static psched_time_t htb_do_events(struct htb_sched *q, int level,
|
||||
unsigned long start)
|
||||
{
|
||||
/* don't run for longer than 2 jiffies; 2 is used instead of
|
||||
1 to simplify things when jiffy is going to be incremented
|
||||
too soon */
|
||||
unsigned long stop_at = jiffies + 2;
|
||||
unsigned long stop_at = start + 2;
|
||||
while (time_before(jiffies, stop_at)) {
|
||||
struct htb_class *cl;
|
||||
long diff;
|
||||
|
@ -685,8 +686,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level)
|
|||
if (cl->cmode != HTB_CAN_SEND)
|
||||
htb_add_to_wait_tree(q, cl, diff);
|
||||
}
|
||||
/* too much load - let's continue on next jiffie */
|
||||
return q->now + PSCHED_TICKS_PER_SEC / HZ;
|
||||
/* too much load - let's continue on next jiffie (including above) */
|
||||
return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ;
|
||||
}
|
||||
|
||||
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
|
||||
|
@ -845,6 +846,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
|
|||
struct htb_sched *q = qdisc_priv(sch);
|
||||
int level;
|
||||
psched_time_t next_event;
|
||||
unsigned long start_at;
|
||||
|
||||
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
|
||||
skb = __skb_dequeue(&q->direct_queue);
|
||||
|
@ -857,6 +859,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
|
|||
if (!sch->q.qlen)
|
||||
goto fin;
|
||||
q->now = psched_get_time();
|
||||
start_at = jiffies;
|
||||
|
||||
next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
|
||||
|
||||
|
@ -866,14 +869,14 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
|
|||
psched_time_t event;
|
||||
|
||||
if (q->now >= q->near_ev_cache[level]) {
|
||||
event = htb_do_events(q, level);
|
||||
event = htb_do_events(q, level, start_at);
|
||||
if (!event)
|
||||
event = q->now + PSCHED_TICKS_PER_SEC;
|
||||
q->near_ev_cache[level] = event;
|
||||
} else
|
||||
event = q->near_ev_cache[level];
|
||||
|
||||
if (event && next_event > event)
|
||||
if (next_event > event)
|
||||
next_event = event;
|
||||
|
||||
m = ~q->row_mask[level];
|
||||
|
|
|
@ -1914,10 +1914,17 @@ static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
|
|||
}
|
||||
#endif
|
||||
|
||||
/* For the xfrm_usersa_info cases we have to work around some 32-bit vs.
|
||||
* 64-bit compatability issues. On 32-bit the structure is 220 bytes, but
|
||||
* for 64-bit it gets padded out to 224 bytes. Those bytes are just
|
||||
* padding and don't have any content we care about. Therefore as long
|
||||
* as we have enough bytes for the content we can make both cases work.
|
||||
*/
|
||||
|
||||
#define XMSGSIZE(type) sizeof(struct type)
|
||||
|
||||
static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
|
||||
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
|
||||
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = 220, /* see above */
|
||||
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
|
||||
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
|
||||
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
|
||||
|
@ -1927,7 +1934,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
|
|||
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
|
||||
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
|
||||
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
|
||||
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
|
||||
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = 220, /* see above */
|
||||
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
|
||||
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
|
||||
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
|
||||
|
|
Loading…
Reference in New Issue