Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/wireless/iwlwifi/iwl4965-base.c
This commit is contained in:
commit
1b63ba8a86
|
@ -83,9 +83,9 @@ Valid range: Limited by memory on system
|
||||||
Default: 30
|
Default: 30
|
||||||
|
|
||||||
e. intr_type
|
e. intr_type
|
||||||
Specifies interrupt type. Possible values 1(INTA), 2(MSI), 3(MSI-X)
|
Specifies interrupt type. Possible values 0(INTA), 2(MSI-X)
|
||||||
Valid range: 1-3
|
Valid values: 0, 2
|
||||||
Default: 1
|
Default: 2
|
||||||
|
|
||||||
5. Performance suggestions
|
5. Performance suggestions
|
||||||
General:
|
General:
|
||||||
|
|
|
@ -27,6 +27,8 @@
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/connector.h>
|
#include <linux/connector.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
|
#include <linux/proc_fs.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
#include <net/sock.h>
|
#include <net/sock.h>
|
||||||
|
|
||||||
|
@ -403,6 +405,40 @@ static void cn_callback(void *data)
|
||||||
mutex_unlock(¬ify_lock);
|
mutex_unlock(¬ify_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int cn_proc_show(struct seq_file *m, void *v)
|
||||||
|
{
|
||||||
|
struct cn_queue_dev *dev = cdev.cbdev;
|
||||||
|
struct cn_callback_entry *cbq;
|
||||||
|
|
||||||
|
seq_printf(m, "Name ID\n");
|
||||||
|
|
||||||
|
spin_lock_bh(&dev->queue_lock);
|
||||||
|
|
||||||
|
list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
|
||||||
|
seq_printf(m, "%-15s %u:%u\n",
|
||||||
|
cbq->id.name,
|
||||||
|
cbq->id.id.idx,
|
||||||
|
cbq->id.id.val);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_bh(&dev->queue_lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cn_proc_open(struct inode *inode, struct file *file)
|
||||||
|
{
|
||||||
|
return single_open(file, cn_proc_show, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct file_operations cn_file_ops = {
|
||||||
|
.owner = THIS_MODULE,
|
||||||
|
.open = cn_proc_open,
|
||||||
|
.read = seq_read,
|
||||||
|
.llseek = seq_lseek,
|
||||||
|
.release = single_release
|
||||||
|
};
|
||||||
|
|
||||||
static int __devinit cn_init(void)
|
static int __devinit cn_init(void)
|
||||||
{
|
{
|
||||||
struct cn_dev *dev = &cdev;
|
struct cn_dev *dev = &cdev;
|
||||||
|
@ -434,6 +470,8 @@ static int __devinit cn_init(void)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -443,6 +481,8 @@ static void __devexit cn_fini(void)
|
||||||
|
|
||||||
cn_already_initialized = 0;
|
cn_already_initialized = 0;
|
||||||
|
|
||||||
|
proc_net_remove(&init_net, "connector");
|
||||||
|
|
||||||
cn_del_callback(&dev->id);
|
cn_del_callback(&dev->id);
|
||||||
cn_queue_free_dev(dev->cbdev);
|
cn_queue_free_dev(dev->cbdev);
|
||||||
netlink_kernel_release(dev->nls);
|
netlink_kernel_release(dev->nls);
|
||||||
|
|
|
@ -1768,9 +1768,10 @@ vortex_timer(unsigned long data)
|
||||||
case XCVR_MII: case XCVR_NWAY:
|
case XCVR_MII: case XCVR_NWAY:
|
||||||
{
|
{
|
||||||
ok = 1;
|
ok = 1;
|
||||||
spin_lock_bh(&vp->lock);
|
/* Interrupts are already disabled */
|
||||||
|
spin_lock(&vp->lock);
|
||||||
vortex_check_media(dev, 0);
|
vortex_check_media(dev, 0);
|
||||||
spin_unlock_bh(&vp->lock);
|
spin_unlock(&vp->lock);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default: /* Other media types handled by Tx timeouts. */
|
default: /* Other media types handled by Tx timeouts. */
|
||||||
|
|
|
@ -1803,6 +1803,8 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
|
||||||
if (rx->prev->skb) {
|
if (rx->prev->skb) {
|
||||||
struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
|
struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
|
||||||
put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
|
put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
|
||||||
|
pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
|
||||||
|
sizeof(struct rfd), PCI_DMA_TODEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -347,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, u32 data)
|
||||||
else
|
else
|
||||||
netdev->features &= ~NETIF_F_TSO;
|
netdev->features &= ~NETIF_F_TSO;
|
||||||
|
|
||||||
if (data)
|
if (data && (adapter->hw.mac_type > e1000_82547_rev_2))
|
||||||
netdev->features |= NETIF_F_TSO6;
|
netdev->features |= NETIF_F_TSO6;
|
||||||
else
|
else
|
||||||
netdev->features &= ~NETIF_F_TSO6;
|
netdev->features &= ~NETIF_F_TSO6;
|
||||||
|
|
|
@ -2535,7 +2535,8 @@ void e1000e_down(struct e1000_adapter *adapter)
|
||||||
adapter->link_speed = 0;
|
adapter->link_speed = 0;
|
||||||
adapter->link_duplex = 0;
|
adapter->link_duplex = 0;
|
||||||
|
|
||||||
e1000e_reset(adapter);
|
if (!pci_channel_offline(adapter->pdev))
|
||||||
|
e1000e_reset(adapter);
|
||||||
e1000_clean_tx_ring(adapter);
|
e1000_clean_tx_ring(adapter);
|
||||||
e1000_clean_rx_ring(adapter);
|
e1000_clean_rx_ring(adapter);
|
||||||
|
|
||||||
|
|
|
@ -1077,8 +1077,6 @@ static inline void rx_off(struct scc_priv *priv)
|
||||||
|
|
||||||
static void start_timer(struct scc_priv *priv, int t, int r15)
|
static void start_timer(struct scc_priv *priv, int t, int r15)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
outb(priv->tmr_mode, priv->tmr_ctrl);
|
outb(priv->tmr_mode, priv->tmr_ctrl);
|
||||||
if (t == 0) {
|
if (t == 0) {
|
||||||
tm_isr(priv);
|
tm_isr(priv);
|
||||||
|
|
|
@ -718,7 +718,8 @@ void igb_down(struct igb_adapter *adapter)
|
||||||
adapter->link_speed = 0;
|
adapter->link_speed = 0;
|
||||||
adapter->link_duplex = 0;
|
adapter->link_duplex = 0;
|
||||||
|
|
||||||
igb_reset(adapter);
|
if (!pci_channel_offline(adapter->pdev))
|
||||||
|
igb_reset(adapter);
|
||||||
igb_clean_all_tx_rings(adapter);
|
igb_clean_all_tx_rings(adapter);
|
||||||
igb_clean_all_rx_rings(adapter);
|
igb_clean_all_rx_rings(adapter);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1271,7 +1271,7 @@ static void ipg_nic_rx_with_end(struct net_device *dev,
|
||||||
|
|
||||||
framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
|
framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
|
||||||
|
|
||||||
endframeLen = framelen - jumbo->current_size;
|
endframelen = framelen - jumbo->current_size;
|
||||||
/*
|
/*
|
||||||
if (framelen > IPG_RXFRAG_SIZE)
|
if (framelen > IPG_RXFRAG_SIZE)
|
||||||
framelen=IPG_RXFRAG_SIZE;
|
framelen=IPG_RXFRAG_SIZE;
|
||||||
|
@ -1279,8 +1279,8 @@ static void ipg_nic_rx_with_end(struct net_device *dev,
|
||||||
if (framelen > IPG_RXSUPPORT_SIZE)
|
if (framelen > IPG_RXSUPPORT_SIZE)
|
||||||
dev_kfree_skb_irq(jumbo->skb);
|
dev_kfree_skb_irq(jumbo->skb);
|
||||||
else {
|
else {
|
||||||
memcpy(skb_put(jumbo->skb, endframeLen),
|
memcpy(skb_put(jumbo->skb, endframelen),
|
||||||
skb->data, endframeLen);
|
skb->data, endframelen);
|
||||||
|
|
||||||
jumbo->skb->protocol =
|
jumbo->skb->protocol =
|
||||||
eth_type_trans(jumbo->skb, dev);
|
eth_type_trans(jumbo->skb, dev);
|
||||||
|
@ -1352,16 +1352,16 @@ static int ipg_nic_rx(struct net_device *dev)
|
||||||
|
|
||||||
switch (ipg_nic_rx_check_frame_type(dev)) {
|
switch (ipg_nic_rx_check_frame_type(dev)) {
|
||||||
case FRAME_WITH_START_WITH_END:
|
case FRAME_WITH_START_WITH_END:
|
||||||
ipg_nic_rx_with_start_and_end(dev, tp, rxfd, entry);
|
ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
|
||||||
break;
|
break;
|
||||||
case FRAME_WITH_START:
|
case FRAME_WITH_START:
|
||||||
ipg_nic_rx_with_start(dev, tp, rxfd, entry);
|
ipg_nic_rx_with_start(dev, sp, rxfd, entry);
|
||||||
break;
|
break;
|
||||||
case FRAME_WITH_END:
|
case FRAME_WITH_END:
|
||||||
ipg_nic_rx_with_end(dev, tp, rxfd, entry);
|
ipg_nic_rx_with_end(dev, sp, rxfd, entry);
|
||||||
break;
|
break;
|
||||||
case FRAME_NO_START_NO_END:
|
case FRAME_NO_START_NO_END:
|
||||||
ipg_nic_rx_no_start_no_end(dev, tp, rxfd, entry);
|
ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1808,7 +1808,7 @@ static int ipg_nic_open(struct net_device *dev)
|
||||||
/* initialize JUMBO Frame control variable */
|
/* initialize JUMBO Frame control variable */
|
||||||
sp->jumbo.found_start = 0;
|
sp->jumbo.found_start = 0;
|
||||||
sp->jumbo.current_size = 0;
|
sp->jumbo.current_size = 0;
|
||||||
sp->jumbo.skb = 0;
|
sp->jumbo.skb = NULL;
|
||||||
dev->mtu = IPG_TXFRAG_SIZE;
|
dev->mtu = IPG_TXFRAG_SIZE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -1969,7 +1969,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
|
||||||
netif_carrier_off(netdev);
|
netif_carrier_off(netdev);
|
||||||
netif_stop_queue(netdev);
|
netif_stop_queue(netdev);
|
||||||
|
|
||||||
ixgbe_reset(adapter);
|
if (!pci_channel_offline(adapter->pdev))
|
||||||
|
ixgbe_reset(adapter);
|
||||||
ixgbe_clean_all_tx_rings(adapter);
|
ixgbe_clean_all_tx_rings(adapter);
|
||||||
ixgbe_clean_all_rx_rings(adapter);
|
ixgbe_clean_all_rx_rings(adapter);
|
||||||
|
|
||||||
|
|
|
@ -71,14 +71,18 @@ static irqreturn_t netxen_intr(int irq, void *data);
|
||||||
static irqreturn_t netxen_msi_intr(int irq, void *data);
|
static irqreturn_t netxen_msi_intr(int irq, void *data);
|
||||||
|
|
||||||
/* PCI Device ID Table */
|
/* PCI Device ID Table */
|
||||||
|
#define ENTRY(device) \
|
||||||
|
{PCI_DEVICE(0x4040, (device)), \
|
||||||
|
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
|
||||||
|
|
||||||
static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
|
static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
|
||||||
{PCI_DEVICE(0x4040, 0x0001), PCI_DEVICE_CLASS(0x020000, ~0)},
|
ENTRY(0x0001),
|
||||||
{PCI_DEVICE(0x4040, 0x0002), PCI_DEVICE_CLASS(0x020000, ~0)},
|
ENTRY(0x0002),
|
||||||
{PCI_DEVICE(0x4040, 0x0003), PCI_DEVICE_CLASS(0x020000, ~0)},
|
ENTRY(0x0003),
|
||||||
{PCI_DEVICE(0x4040, 0x0004), PCI_DEVICE_CLASS(0x020000, ~0)},
|
ENTRY(0x0004),
|
||||||
{PCI_DEVICE(0x4040, 0x0005), PCI_DEVICE_CLASS(0x020000, ~0)},
|
ENTRY(0x0005),
|
||||||
{PCI_DEVICE(0x4040, 0x0024), PCI_DEVICE_CLASS(0x020000, ~0)},
|
ENTRY(0x0024),
|
||||||
{PCI_DEVICE(0x4040, 0x0025), PCI_DEVICE_CLASS(0x020000, ~0)},
|
ENTRY(0x0025),
|
||||||
{0,}
|
{0,}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -525,12 +525,14 @@ static int axnet_open(struct net_device *dev)
|
||||||
int ret;
|
int ret;
|
||||||
axnet_dev_t *info = PRIV(dev);
|
axnet_dev_t *info = PRIV(dev);
|
||||||
struct pcmcia_device *link = info->p_dev;
|
struct pcmcia_device *link = info->p_dev;
|
||||||
|
unsigned int nic_base = dev->base_addr;
|
||||||
|
|
||||||
DEBUG(2, "axnet_open('%s')\n", dev->name);
|
DEBUG(2, "axnet_open('%s')\n", dev->name);
|
||||||
|
|
||||||
if (!pcmcia_dev_present(link))
|
if (!pcmcia_dev_present(link))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */
|
||||||
ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev);
|
ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -969,6 +969,7 @@ static int pcnet_open(struct net_device *dev)
|
||||||
int ret;
|
int ret;
|
||||||
pcnet_dev_t *info = PRIV(dev);
|
pcnet_dev_t *info = PRIV(dev);
|
||||||
struct pcmcia_device *link = info->p_dev;
|
struct pcmcia_device *link = info->p_dev;
|
||||||
|
unsigned int nic_base = dev->base_addr;
|
||||||
|
|
||||||
DEBUG(2, "pcnet_open('%s')\n", dev->name);
|
DEBUG(2, "pcnet_open('%s')\n", dev->name);
|
||||||
|
|
||||||
|
@ -976,6 +977,8 @@ static int pcnet_open(struct net_device *dev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
set_misc_reg(dev);
|
set_misc_reg(dev);
|
||||||
|
|
||||||
|
outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */
|
||||||
ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev);
|
ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -942,7 +942,7 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||||
m->msg_namelen = 0;
|
m->msg_namelen = 0;
|
||||||
|
|
||||||
if (skb) {
|
if (skb) {
|
||||||
total_len = min(total_len, skb->len);
|
total_len = min_t(size_t, total_len, skb->len);
|
||||||
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
|
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
|
||||||
if (error == 0)
|
if (error == 0)
|
||||||
error = total_len;
|
error = total_len;
|
||||||
|
|
|
@ -3701,7 +3701,9 @@ static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
|
||||||
printk(KERN_ERR PFX
|
printk(KERN_ERR PFX
|
||||||
"%s: Driver up/down cycle failed, "
|
"%s: Driver up/down cycle failed, "
|
||||||
"closing device\n",qdev->ndev->name);
|
"closing device\n",qdev->ndev->name);
|
||||||
|
rtnl_lock();
|
||||||
dev_close(qdev->ndev);
|
dev_close(qdev->ndev);
|
||||||
|
rtnl_unlock();
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -273,7 +273,7 @@ static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
|
||||||
dma_addr_t mapping = desc_dma;
|
dma_addr_t mapping = desc_dma;
|
||||||
|
|
||||||
while (size-- > 0) {
|
while (size-- > 0) {
|
||||||
mapping += sizeof(sizeof(*desc));
|
mapping += sizeof(*desc);
|
||||||
desc->ndesc = cpu_to_le32(mapping);
|
desc->ndesc = cpu_to_le32(mapping);
|
||||||
desc->vndescp = desc + 1;
|
desc->vndescp = desc + 1;
|
||||||
desc++;
|
desc++;
|
||||||
|
|
|
@ -2625,9 +2625,7 @@ static int fill_rx_buffers(struct ring_info *ring)
|
||||||
rxdp1->Buffer0_ptr = pci_map_single
|
rxdp1->Buffer0_ptr = pci_map_single
|
||||||
(ring->pdev, skb->data, size - NET_IP_ALIGN,
|
(ring->pdev, skb->data, size - NET_IP_ALIGN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if( (rxdp1->Buffer0_ptr == 0) ||
|
if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
|
||||||
(rxdp1->Buffer0_ptr ==
|
|
||||||
DMA_ERROR_CODE))
|
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
|
|
||||||
rxdp->Control_2 =
|
rxdp->Control_2 =
|
||||||
|
@ -2657,6 +2655,7 @@ static int fill_rx_buffers(struct ring_info *ring)
|
||||||
skb->data = (void *) (unsigned long)tmp;
|
skb->data = (void *) (unsigned long)tmp;
|
||||||
skb_reset_tail_pointer(skb);
|
skb_reset_tail_pointer(skb);
|
||||||
|
|
||||||
|
/* AK: check is wrong. 0 can be valid dma address */
|
||||||
if (!(rxdp3->Buffer0_ptr))
|
if (!(rxdp3->Buffer0_ptr))
|
||||||
rxdp3->Buffer0_ptr =
|
rxdp3->Buffer0_ptr =
|
||||||
pci_map_single(ring->pdev, ba->ba_0,
|
pci_map_single(ring->pdev, ba->ba_0,
|
||||||
|
@ -2665,8 +2664,7 @@ static int fill_rx_buffers(struct ring_info *ring)
|
||||||
pci_dma_sync_single_for_device(ring->pdev,
|
pci_dma_sync_single_for_device(ring->pdev,
|
||||||
(dma_addr_t) rxdp3->Buffer0_ptr,
|
(dma_addr_t) rxdp3->Buffer0_ptr,
|
||||||
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
||||||
if( (rxdp3->Buffer0_ptr == 0) ||
|
if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
|
||||||
(rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
|
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
|
|
||||||
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
|
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
|
||||||
|
@ -2681,18 +2679,17 @@ static int fill_rx_buffers(struct ring_info *ring)
|
||||||
(ring->pdev, skb->data, ring->mtu + 4,
|
(ring->pdev, skb->data, ring->mtu + 4,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
if( (rxdp3->Buffer2_ptr == 0) ||
|
if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
|
||||||
(rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
|
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
|
|
||||||
|
/* AK: check is wrong */
|
||||||
if (!rxdp3->Buffer1_ptr)
|
if (!rxdp3->Buffer1_ptr)
|
||||||
rxdp3->Buffer1_ptr =
|
rxdp3->Buffer1_ptr =
|
||||||
pci_map_single(ring->pdev,
|
pci_map_single(ring->pdev,
|
||||||
ba->ba_1, BUF1_LEN,
|
ba->ba_1, BUF1_LEN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
if( (rxdp3->Buffer1_ptr == 0) ||
|
if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
|
||||||
(rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
|
|
||||||
pci_unmap_single
|
pci_unmap_single
|
||||||
(ring->pdev,
|
(ring->pdev,
|
||||||
(dma_addr_t)(unsigned long)
|
(dma_addr_t)(unsigned long)
|
||||||
|
@ -4264,16 +4261,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
|
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
|
||||||
fifo->ufo_in_band_v,
|
fifo->ufo_in_band_v,
|
||||||
sizeof(u64), PCI_DMA_TODEVICE);
|
sizeof(u64), PCI_DMA_TODEVICE);
|
||||||
if((txdp->Buffer_Pointer == 0) ||
|
if (pci_dma_mapping_error(txdp->Buffer_Pointer))
|
||||||
(txdp->Buffer_Pointer == DMA_ERROR_CODE))
|
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
txdp++;
|
txdp++;
|
||||||
}
|
}
|
||||||
|
|
||||||
txdp->Buffer_Pointer = pci_map_single
|
txdp->Buffer_Pointer = pci_map_single
|
||||||
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
|
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
|
||||||
if((txdp->Buffer_Pointer == 0) ||
|
if (pci_dma_mapping_error(txdp->Buffer_Pointer))
|
||||||
(txdp->Buffer_Pointer == DMA_ERROR_CODE))
|
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
|
|
||||||
txdp->Host_Control = (unsigned long) skb;
|
txdp->Host_Control = (unsigned long) skb;
|
||||||
|
@ -6884,10 +6879,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
|
||||||
pci_map_single( sp->pdev, (*skb)->data,
|
pci_map_single( sp->pdev, (*skb)->data,
|
||||||
size - NET_IP_ALIGN,
|
size - NET_IP_ALIGN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if( (rxdp1->Buffer0_ptr == 0) ||
|
if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
|
||||||
(rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
|
|
||||||
goto memalloc_failed;
|
goto memalloc_failed;
|
||||||
}
|
|
||||||
rxdp->Host_Control = (unsigned long) (*skb);
|
rxdp->Host_Control = (unsigned long) (*skb);
|
||||||
}
|
}
|
||||||
} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
|
} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
|
||||||
|
@ -6913,15 +6906,12 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
|
||||||
pci_map_single(sp->pdev, (*skb)->data,
|
pci_map_single(sp->pdev, (*skb)->data,
|
||||||
dev->mtu + 4,
|
dev->mtu + 4,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if( (rxdp3->Buffer2_ptr == 0) ||
|
if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
|
||||||
(rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
|
|
||||||
goto memalloc_failed;
|
goto memalloc_failed;
|
||||||
}
|
|
||||||
rxdp3->Buffer0_ptr = *temp0 =
|
rxdp3->Buffer0_ptr = *temp0 =
|
||||||
pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
|
pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if( (rxdp3->Buffer0_ptr == 0) ||
|
if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
|
||||||
(rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
|
|
||||||
pci_unmap_single (sp->pdev,
|
pci_unmap_single (sp->pdev,
|
||||||
(dma_addr_t)rxdp3->Buffer2_ptr,
|
(dma_addr_t)rxdp3->Buffer2_ptr,
|
||||||
dev->mtu + 4, PCI_DMA_FROMDEVICE);
|
dev->mtu + 4, PCI_DMA_FROMDEVICE);
|
||||||
|
@ -6933,8 +6923,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
|
||||||
rxdp3->Buffer1_ptr = *temp1 =
|
rxdp3->Buffer1_ptr = *temp1 =
|
||||||
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
|
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if( (rxdp3->Buffer1_ptr == 0) ||
|
if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
|
||||||
(rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
|
|
||||||
pci_unmap_single (sp->pdev,
|
pci_unmap_single (sp->pdev,
|
||||||
(dma_addr_t)rxdp3->Buffer0_ptr,
|
(dma_addr_t)rxdp3->Buffer0_ptr,
|
||||||
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
||||||
|
|
|
@ -75,10 +75,6 @@ static int debug_level = ERR_DBG;
|
||||||
/* DEBUG message print. */
|
/* DEBUG message print. */
|
||||||
#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
|
#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
|
||||||
|
|
||||||
#ifndef DMA_ERROR_CODE
|
|
||||||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Protocol assist features of the NIC */
|
/* Protocol assist features of the NIC */
|
||||||
#define L3_CKSUM_OK 0xFFFF
|
#define L3_CKSUM_OK 0xFFFF
|
||||||
#define L4_CKSUM_OK 0xFFFF
|
#define L4_CKSUM_OK 0xFFFF
|
||||||
|
|
|
@ -1394,6 +1394,7 @@ tc35815_open(struct net_device *dev)
|
||||||
tc35815_chip_init(dev);
|
tc35815_chip_init(dev);
|
||||||
spin_unlock_irq(&lp->lock);
|
spin_unlock_irq(&lp->lock);
|
||||||
|
|
||||||
|
netif_carrier_off(dev);
|
||||||
/* schedule a link state check */
|
/* schedule a link state check */
|
||||||
phy_start(lp->phy_dev);
|
phy_start(lp->phy_dev);
|
||||||
|
|
||||||
|
@ -1735,7 +1736,6 @@ tc35815_rx(struct net_device *dev)
|
||||||
skb = lp->rx_skbs[cur_bd].skb;
|
skb = lp->rx_skbs[cur_bd].skb;
|
||||||
prefetch(skb->data);
|
prefetch(skb->data);
|
||||||
lp->rx_skbs[cur_bd].skb = NULL;
|
lp->rx_skbs[cur_bd].skb = NULL;
|
||||||
lp->fbl_count--;
|
|
||||||
pci_unmap_single(lp->pci_dev,
|
pci_unmap_single(lp->pci_dev,
|
||||||
lp->rx_skbs[cur_bd].skb_dma,
|
lp->rx_skbs[cur_bd].skb_dma,
|
||||||
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||||
|
@ -1791,6 +1791,7 @@ tc35815_rx(struct net_device *dev)
|
||||||
#ifdef TC35815_USE_PACKEDBUFFER
|
#ifdef TC35815_USE_PACKEDBUFFER
|
||||||
while (lp->fbl_curid != id)
|
while (lp->fbl_curid != id)
|
||||||
#else
|
#else
|
||||||
|
lp->fbl_count--;
|
||||||
while (lp->fbl_count < RX_BUF_NUM)
|
while (lp->fbl_count < RX_BUF_NUM)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
@ -2453,6 +2454,7 @@ static int tc35815_resume(struct pci_dev *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
pci_set_power_state(pdev, PCI_D0);
|
pci_set_power_state(pdev, PCI_D0);
|
||||||
tc35815_restart(dev);
|
tc35815_restart(dev);
|
||||||
|
netif_carrier_off(dev);
|
||||||
if (lp->phy_dev)
|
if (lp->phy_dev)
|
||||||
phy_start(lp->phy_dev);
|
phy_start(lp->phy_dev);
|
||||||
netif_device_attach(dev);
|
netif_device_attach(dev);
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include <linux/x25.h>
|
#include <linux/x25.h>
|
||||||
#include <linux/lapb.h>
|
#include <linux/lapb.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
#include <linux/rtnetlink.h>
|
||||||
#include "x25_asy.h"
|
#include "x25_asy.h"
|
||||||
|
|
||||||
#include <net/x25device.h>
|
#include <net/x25device.h>
|
||||||
|
@ -601,8 +602,10 @@ static void x25_asy_close_tty(struct tty_struct *tty)
|
||||||
if (!sl || sl->magic != X25_ASY_MAGIC)
|
if (!sl || sl->magic != X25_ASY_MAGIC)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
if (sl->dev->flags & IFF_UP)
|
if (sl->dev->flags & IFF_UP)
|
||||||
dev_close(sl->dev);
|
dev_close(sl->dev);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
tty->disc_data = NULL;
|
tty->disc_data = NULL;
|
||||||
sl->tty = NULL;
|
sl->tty = NULL;
|
||||||
|
|
|
@ -72,6 +72,9 @@ static void b43_led_brightness_set(struct led_classdev *led_dev,
|
||||||
struct b43_wldev *dev = led->dev;
|
struct b43_wldev *dev = led->dev;
|
||||||
bool radio_enabled;
|
bool radio_enabled;
|
||||||
|
|
||||||
|
if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED))
|
||||||
|
return;
|
||||||
|
|
||||||
/* Checking the radio-enabled status here is slightly racy,
|
/* Checking the radio-enabled status here is slightly racy,
|
||||||
* but we want to avoid the locking overhead and we don't care
|
* but we want to avoid the locking overhead and we don't care
|
||||||
* whether the LED has the wrong state for a second. */
|
* whether the LED has the wrong state for a second. */
|
||||||
|
|
|
@ -2976,12 +2976,11 @@ static int b43_op_tx(struct ieee80211_hw *hw,
|
||||||
|
|
||||||
if (unlikely(skb->len < 2 + 2 + 6)) {
|
if (unlikely(skb->len < 2 + 2 + 6)) {
|
||||||
/* Too short, this can't be a valid frame. */
|
/* Too short, this can't be a valid frame. */
|
||||||
dev_kfree_skb_any(skb);
|
goto drop_packet;
|
||||||
return NETDEV_TX_OK;
|
|
||||||
}
|
}
|
||||||
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
|
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
|
||||||
if (unlikely(!dev))
|
if (unlikely(!dev))
|
||||||
return NETDEV_TX_BUSY;
|
goto drop_packet;
|
||||||
|
|
||||||
/* Transmissions on seperate queues can run concurrently. */
|
/* Transmissions on seperate queues can run concurrently. */
|
||||||
read_lock_irqsave(&wl->tx_lock, flags);
|
read_lock_irqsave(&wl->tx_lock, flags);
|
||||||
|
@ -2997,7 +2996,12 @@ static int b43_op_tx(struct ieee80211_hw *hw,
|
||||||
read_unlock_irqrestore(&wl->tx_lock, flags);
|
read_unlock_irqrestore(&wl->tx_lock, flags);
|
||||||
|
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
return NETDEV_TX_BUSY;
|
goto drop_packet;
|
||||||
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
|
drop_packet:
|
||||||
|
/* We can not transmit this packet. Drop it. */
|
||||||
|
dev_kfree_skb_any(skb);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -876,6 +876,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
|
||||||
if (!ring)
|
if (!ring)
|
||||||
goto out;
|
goto out;
|
||||||
ring->type = type;
|
ring->type = type;
|
||||||
|
ring->dev = dev;
|
||||||
|
|
||||||
nr_slots = B43legacy_RXRING_SLOTS;
|
nr_slots = B43legacy_RXRING_SLOTS;
|
||||||
if (for_tx)
|
if (for_tx)
|
||||||
|
@ -922,7 +923,6 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->dev = dev;
|
|
||||||
ring->nr_slots = nr_slots;
|
ring->nr_slots = nr_slots;
|
||||||
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
|
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
|
||||||
ring->index = controller_index;
|
ring->index = controller_index;
|
||||||
|
|
|
@ -2377,8 +2377,10 @@ static int b43legacy_op_tx(struct ieee80211_hw *hw,
|
||||||
} else
|
} else
|
||||||
err = b43legacy_dma_tx(dev, skb);
|
err = b43legacy_dma_tx(dev, skb);
|
||||||
out:
|
out:
|
||||||
if (unlikely(err))
|
if (unlikely(err)) {
|
||||||
return NETDEV_TX_BUSY;
|
/* Drop the packet. */
|
||||||
|
dev_kfree_skb_any(skb);
|
||||||
|
}
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -276,13 +276,18 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
|
||||||
cancel_delayed_work(&priv->scan_check);
|
cancel_delayed_work(&priv->scan_check);
|
||||||
|
|
||||||
IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
|
IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
|
||||||
(priv->scan_bands == 2) ? "2.4" : "5.2",
|
(priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
|
||||||
|
"2.4" : "5.2",
|
||||||
jiffies_to_msecs(elapsed_jiffies
|
jiffies_to_msecs(elapsed_jiffies
|
||||||
(priv->scan_pass_start, jiffies)));
|
(priv->scan_pass_start, jiffies)));
|
||||||
|
|
||||||
/* Remove this scanned band from the list
|
/* Remove this scanned band from the list of pending
|
||||||
* of pending bands to scan */
|
* bands to scan, band G precedes A in order of scanning
|
||||||
priv->scan_bands--;
|
* as seen in iwl_bg_request_scan */
|
||||||
|
if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
|
||||||
|
priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
|
||||||
|
else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
|
||||||
|
priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
|
||||||
|
|
||||||
/* If a request to abort was given, or the scan did not succeed
|
/* If a request to abort was given, or the scan did not succeed
|
||||||
* then we reset the scan state machine and terminate,
|
* then we reset the scan state machine and terminate,
|
||||||
|
@ -292,7 +297,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
|
||||||
clear_bit(STATUS_SCAN_ABORTING, &priv->status);
|
clear_bit(STATUS_SCAN_ABORTING, &priv->status);
|
||||||
} else {
|
} else {
|
||||||
/* If there are more bands on this scan pass reschedule */
|
/* If there are more bands on this scan pass reschedule */
|
||||||
if (priv->scan_bands > 0)
|
if (priv->scan_bands)
|
||||||
goto reschedule;
|
goto reschedule;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,7 +394,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
|
||||||
|
|
||||||
ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
|
ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
|
||||||
if (!is_channel_valid(ch_info)) {
|
if (!is_channel_valid(ch_info)) {
|
||||||
IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
|
IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
|
||||||
scan_ch->channel);
|
scan_ch->channel);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -465,7 +470,10 @@ int iwl_scan_initiate(struct iwl_priv *priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
IWL_DEBUG_INFO("Starting scan...\n");
|
IWL_DEBUG_INFO("Starting scan...\n");
|
||||||
priv->scan_bands = 2;
|
if (priv->cfg->sku & IWL_SKU_G)
|
||||||
|
priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
|
||||||
|
if (priv->cfg->sku & IWL_SKU_A)
|
||||||
|
priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
|
||||||
set_bit(STATUS_SCANNING, &priv->status);
|
set_bit(STATUS_SCANNING, &priv->status);
|
||||||
priv->scan_start = jiffies;
|
priv->scan_start = jiffies;
|
||||||
priv->scan_pass_start = priv->scan_start;
|
priv->scan_pass_start = priv->scan_start;
|
||||||
|
@ -803,8 +811,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
|
||||||
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
||||||
|
|
||||||
|
|
||||||
switch (priv->scan_bands) {
|
if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
|
||||||
case 2:
|
|
||||||
band = IEEE80211_BAND_2GHZ;
|
band = IEEE80211_BAND_2GHZ;
|
||||||
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
|
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
|
||||||
tx_ant = iwl_scan_tx_ant(priv, band);
|
tx_ant = iwl_scan_tx_ant(priv, band);
|
||||||
|
@ -818,9 +825,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
|
||||||
tx_ant |
|
tx_ant |
|
||||||
RATE_MCS_CCK_MSK);
|
RATE_MCS_CCK_MSK);
|
||||||
scan->good_CRC_th = 0;
|
scan->good_CRC_th = 0;
|
||||||
break;
|
} else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
|
||||||
|
|
||||||
case 1:
|
|
||||||
band = IEEE80211_BAND_5GHZ;
|
band = IEEE80211_BAND_5GHZ;
|
||||||
tx_ant = iwl_scan_tx_ant(priv, band);
|
tx_ant = iwl_scan_tx_ant(priv, band);
|
||||||
scan->tx_cmd.rate_n_flags =
|
scan->tx_cmd.rate_n_flags =
|
||||||
|
@ -833,9 +838,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
|
||||||
* MIMO is not used here, but value is required */
|
* MIMO is not used here, but value is required */
|
||||||
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
|
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
|
||||||
rx_chain = 0x6;
|
rx_chain = 0x6;
|
||||||
|
} else {
|
||||||
break;
|
|
||||||
default:
|
|
||||||
IWL_WARNING("Invalid scan band count\n");
|
IWL_WARNING("Invalid scan band count\n");
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2217,7 +2217,10 @@ static int iwl3945_scan_initiate(struct iwl3945_priv *priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
IWL_DEBUG_INFO("Starting scan...\n");
|
IWL_DEBUG_INFO("Starting scan...\n");
|
||||||
priv->scan_bands = 2;
|
if (priv->cfg->sku & IWL_SKU_G)
|
||||||
|
priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
|
||||||
|
if (priv->cfg->sku & IWL_SKU_A)
|
||||||
|
priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
|
||||||
set_bit(STATUS_SCANNING, &priv->status);
|
set_bit(STATUS_SCANNING, &priv->status);
|
||||||
priv->scan_start = jiffies;
|
priv->scan_start = jiffies;
|
||||||
priv->scan_pass_start = priv->scan_start;
|
priv->scan_pass_start = priv->scan_start;
|
||||||
|
@ -3342,13 +3345,18 @@ static void iwl3945_rx_scan_complete_notif(struct iwl3945_priv *priv,
|
||||||
cancel_delayed_work(&priv->scan_check);
|
cancel_delayed_work(&priv->scan_check);
|
||||||
|
|
||||||
IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
|
IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
|
||||||
(priv->scan_bands == 2) ? "2.4" : "5.2",
|
(priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
|
||||||
|
"2.4" : "5.2",
|
||||||
jiffies_to_msecs(elapsed_jiffies
|
jiffies_to_msecs(elapsed_jiffies
|
||||||
(priv->scan_pass_start, jiffies)));
|
(priv->scan_pass_start, jiffies)));
|
||||||
|
|
||||||
/* Remove this scanned band from the list
|
/* Remove this scanned band from the list of pending
|
||||||
* of pending bands to scan */
|
* bands to scan, band G precedes A in order of scanning
|
||||||
priv->scan_bands--;
|
* as seen in iwl3945_bg_request_scan */
|
||||||
|
if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
|
||||||
|
priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
|
||||||
|
else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
|
||||||
|
priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
|
||||||
|
|
||||||
/* If a request to abort was given, or the scan did not succeed
|
/* If a request to abort was given, or the scan did not succeed
|
||||||
* then we reset the scan state machine and terminate,
|
* then we reset the scan state machine and terminate,
|
||||||
|
@ -4961,7 +4969,7 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
|
||||||
|
|
||||||
ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
|
ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
|
||||||
if (!is_channel_valid(ch_info)) {
|
if (!is_channel_valid(ch_info)) {
|
||||||
IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
|
IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
|
||||||
scan_ch->channel);
|
scan_ch->channel);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -6316,21 +6324,16 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
|
||||||
|
|
||||||
/* flags + rate selection */
|
/* flags + rate selection */
|
||||||
|
|
||||||
switch (priv->scan_bands) {
|
if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
|
||||||
case 2:
|
|
||||||
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
|
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
|
||||||
scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
|
scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
|
||||||
scan->good_CRC_th = 0;
|
scan->good_CRC_th = 0;
|
||||||
band = IEEE80211_BAND_2GHZ;
|
band = IEEE80211_BAND_2GHZ;
|
||||||
break;
|
} else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
|
||||||
|
|
||||||
case 1:
|
|
||||||
scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
|
scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
|
||||||
scan->good_CRC_th = IWL_GOOD_CRC_TH;
|
scan->good_CRC_th = IWL_GOOD_CRC_TH;
|
||||||
band = IEEE80211_BAND_5GHZ;
|
band = IEEE80211_BAND_5GHZ;
|
||||||
break;
|
} else {
|
||||||
|
|
||||||
default:
|
|
||||||
IWL_WARNING("Invalid scan band count\n");
|
IWL_WARNING("Invalid scan band count\n");
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
@ -6770,7 +6773,7 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
|
||||||
ch_info = iwl3945_get_channel_info(priv, conf->channel->band,
|
ch_info = iwl3945_get_channel_info(priv, conf->channel->band,
|
||||||
conf->channel->hw_value);
|
conf->channel->hw_value);
|
||||||
if (!is_channel_valid(ch_info)) {
|
if (!is_channel_valid(ch_info)) {
|
||||||
IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
|
IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n",
|
||||||
conf->channel->hw_value, conf->channel->band);
|
conf->channel->hw_value, conf->channel->band);
|
||||||
IWL_DEBUG_MAC80211("leave - invalid channel\n");
|
IWL_DEBUG_MAC80211("leave - invalid channel\n");
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
|
|
|
@ -290,7 +290,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
|
||||||
|
|
||||||
avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
|
avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
|
||||||
avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
|
avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
|
||||||
avs->mactime = cpu_to_be64(le64_to_cpu(clock));
|
avs->mactime = cpu_to_be64(clock);
|
||||||
avs->hosttime = cpu_to_be64(jiffies);
|
avs->hosttime = cpu_to_be64(jiffies);
|
||||||
avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */
|
avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */
|
||||||
avs->channel = cpu_to_be32(channel_of_freq(freq));
|
avs->channel = cpu_to_be32(channel_of_freq(freq));
|
||||||
|
|
|
@ -138,11 +138,8 @@ static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev,
|
||||||
* Wait until the BBP becomes ready.
|
* Wait until the BBP becomes ready.
|
||||||
*/
|
*/
|
||||||
reg = rt2500usb_bbp_check(rt2x00dev);
|
reg = rt2500usb_bbp_check(rt2x00dev);
|
||||||
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) {
|
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY))
|
||||||
ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n");
|
goto exit_fail;
|
||||||
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write the data into the BBP.
|
* Write the data into the BBP.
|
||||||
|
@ -155,6 +152,13 @@ static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev,
|
||||||
rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg);
|
rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg);
|
||||||
|
|
||||||
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
||||||
|
|
||||||
|
return;
|
||||||
|
|
||||||
|
exit_fail:
|
||||||
|
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
||||||
|
|
||||||
|
ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
|
static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
|
||||||
|
@ -168,10 +172,8 @@ static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
|
||||||
* Wait until the BBP becomes ready.
|
* Wait until the BBP becomes ready.
|
||||||
*/
|
*/
|
||||||
reg = rt2500usb_bbp_check(rt2x00dev);
|
reg = rt2500usb_bbp_check(rt2x00dev);
|
||||||
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) {
|
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY))
|
||||||
ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n");
|
goto exit_fail;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write the request into the BBP.
|
* Write the request into the BBP.
|
||||||
|
@ -186,17 +188,21 @@ static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
|
||||||
* Wait until the BBP becomes ready.
|
* Wait until the BBP becomes ready.
|
||||||
*/
|
*/
|
||||||
reg = rt2500usb_bbp_check(rt2x00dev);
|
reg = rt2500usb_bbp_check(rt2x00dev);
|
||||||
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) {
|
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY))
|
||||||
ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n");
|
goto exit_fail;
|
||||||
*value = 0xff;
|
|
||||||
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, ®);
|
rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, ®);
|
||||||
*value = rt2x00_get_field16(reg, PHY_CSR7_DATA);
|
*value = rt2x00_get_field16(reg, PHY_CSR7_DATA);
|
||||||
|
|
||||||
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
||||||
|
|
||||||
|
return;
|
||||||
|
|
||||||
|
exit_fail:
|
||||||
|
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
||||||
|
|
||||||
|
ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n");
|
||||||
|
*value = 0xff;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev,
|
static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev,
|
||||||
|
|
|
@ -818,6 +818,7 @@ struct rt2x00_dev {
|
||||||
/*
|
/*
|
||||||
* Scheduled work.
|
* Scheduled work.
|
||||||
*/
|
*/
|
||||||
|
struct workqueue_struct *workqueue;
|
||||||
struct work_struct intf_work;
|
struct work_struct intf_work;
|
||||||
struct work_struct filter_work;
|
struct work_struct filter_work;
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
|
||||||
|
|
||||||
rt2x00lib_reset_link_tuner(rt2x00dev);
|
rt2x00lib_reset_link_tuner(rt2x00dev);
|
||||||
|
|
||||||
queue_delayed_work(rt2x00dev->hw->workqueue,
|
queue_delayed_work(rt2x00dev->workqueue,
|
||||||
&rt2x00dev->link.work, LINK_TUNE_INTERVAL);
|
&rt2x00dev->link.work, LINK_TUNE_INTERVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,14 +137,6 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
|
||||||
if (!__test_and_clear_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
|
if (!__test_and_clear_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
|
||||||
* Stop all scheduled work.
|
|
||||||
*/
|
|
||||||
if (work_pending(&rt2x00dev->intf_work))
|
|
||||||
cancel_work_sync(&rt2x00dev->intf_work);
|
|
||||||
if (work_pending(&rt2x00dev->filter_work))
|
|
||||||
cancel_work_sync(&rt2x00dev->filter_work);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Stop the TX queues.
|
* Stop the TX queues.
|
||||||
*/
|
*/
|
||||||
|
@ -400,8 +392,8 @@ static void rt2x00lib_link_tuner(struct work_struct *work)
|
||||||
* Increase tuner counter, and reschedule the next link tuner run.
|
* Increase tuner counter, and reschedule the next link tuner run.
|
||||||
*/
|
*/
|
||||||
rt2x00dev->link.count++;
|
rt2x00dev->link.count++;
|
||||||
queue_delayed_work(rt2x00dev->hw->workqueue, &rt2x00dev->link.work,
|
queue_delayed_work(rt2x00dev->workqueue,
|
||||||
LINK_TUNE_INTERVAL);
|
&rt2x00dev->link.work, LINK_TUNE_INTERVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rt2x00lib_packetfilter_scheduled(struct work_struct *work)
|
static void rt2x00lib_packetfilter_scheduled(struct work_struct *work)
|
||||||
|
@ -434,6 +426,15 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
|
||||||
|
|
||||||
spin_unlock(&intf->lock);
|
spin_unlock(&intf->lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is possible the radio was disabled while the work had been
|
||||||
|
* scheduled. If that happens we should return here immediately,
|
||||||
|
* note that in the spinlock protected area above the delayed_flags
|
||||||
|
* have been cleared correctly.
|
||||||
|
*/
|
||||||
|
if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
|
||||||
|
return;
|
||||||
|
|
||||||
if (delayed_flags & DELAYED_UPDATE_BEACON) {
|
if (delayed_flags & DELAYED_UPDATE_BEACON) {
|
||||||
skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
|
skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
|
||||||
if (skb &&
|
if (skb &&
|
||||||
|
@ -442,7 +443,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (delayed_flags & DELAYED_CONFIG_ERP)
|
if (delayed_flags & DELAYED_CONFIG_ERP)
|
||||||
rt2x00lib_config_erp(rt2x00dev, intf, &intf->conf);
|
rt2x00lib_config_erp(rt2x00dev, intf, &conf);
|
||||||
|
|
||||||
if (delayed_flags & DELAYED_LED_ASSOC)
|
if (delayed_flags & DELAYED_LED_ASSOC)
|
||||||
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
|
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
|
||||||
|
@ -488,7 +489,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
|
||||||
rt2x00lib_beacondone_iter,
|
rt2x00lib_beacondone_iter,
|
||||||
rt2x00dev);
|
rt2x00dev);
|
||||||
|
|
||||||
queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work);
|
queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
|
EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
|
||||||
|
|
||||||
|
@ -1003,6 +1004,10 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
|
||||||
/*
|
/*
|
||||||
* Initialize configuration work.
|
* Initialize configuration work.
|
||||||
*/
|
*/
|
||||||
|
rt2x00dev->workqueue = create_singlethread_workqueue("rt2x00lib");
|
||||||
|
if (!rt2x00dev->workqueue)
|
||||||
|
goto exit;
|
||||||
|
|
||||||
INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
|
INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
|
||||||
INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled);
|
INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled);
|
||||||
INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner);
|
INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner);
|
||||||
|
@ -1062,6 +1067,13 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
|
||||||
rt2x00rfkill_free(rt2x00dev);
|
rt2x00rfkill_free(rt2x00dev);
|
||||||
rt2x00leds_unregister(rt2x00dev);
|
rt2x00leds_unregister(rt2x00dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Stop all queued work. Note that most tasks will already be halted
|
||||||
|
* during rt2x00lib_disable_radio() and rt2x00lib_uninitialize().
|
||||||
|
*/
|
||||||
|
flush_workqueue(rt2x00dev->workqueue);
|
||||||
|
destroy_workqueue(rt2x00dev->workqueue);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free ieee80211_hw memory.
|
* Free ieee80211_hw memory.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -431,7 +431,7 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
|
||||||
if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
|
if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
|
||||||
rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
|
rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
|
||||||
else
|
else
|
||||||
queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work);
|
queue_work(rt2x00dev->workqueue, &rt2x00dev->filter_work);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
|
EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
|
||||||
|
|
||||||
|
@ -512,7 +512,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
|
||||||
memcpy(&intf->conf, bss_conf, sizeof(*bss_conf));
|
memcpy(&intf->conf, bss_conf, sizeof(*bss_conf));
|
||||||
if (delayed) {
|
if (delayed) {
|
||||||
intf->delayed_flags |= delayed;
|
intf->delayed_flags |= delayed;
|
||||||
queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work);
|
queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
|
||||||
}
|
}
|
||||||
spin_unlock(&intf->lock);
|
spin_unlock(&intf->lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,11 +134,8 @@ static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev,
|
||||||
* Wait until the BBP becomes ready.
|
* Wait until the BBP becomes ready.
|
||||||
*/
|
*/
|
||||||
reg = rt73usb_bbp_check(rt2x00dev);
|
reg = rt73usb_bbp_check(rt2x00dev);
|
||||||
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) {
|
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY))
|
||||||
ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n");
|
goto exit_fail;
|
||||||
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write the data into the BBP.
|
* Write the data into the BBP.
|
||||||
|
@ -151,6 +148,13 @@ static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev,
|
||||||
|
|
||||||
rt73usb_register_write_lock(rt2x00dev, PHY_CSR3, reg);
|
rt73usb_register_write_lock(rt2x00dev, PHY_CSR3, reg);
|
||||||
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
||||||
|
|
||||||
|
return;
|
||||||
|
|
||||||
|
exit_fail:
|
||||||
|
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
||||||
|
|
||||||
|
ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
|
static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
|
||||||
|
@ -164,11 +168,8 @@ static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
|
||||||
* Wait until the BBP becomes ready.
|
* Wait until the BBP becomes ready.
|
||||||
*/
|
*/
|
||||||
reg = rt73usb_bbp_check(rt2x00dev);
|
reg = rt73usb_bbp_check(rt2x00dev);
|
||||||
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) {
|
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY))
|
||||||
ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n");
|
goto exit_fail;
|
||||||
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write the request into the BBP.
|
* Write the request into the BBP.
|
||||||
|
@ -184,14 +185,19 @@ static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
|
||||||
* Wait until the BBP becomes ready.
|
* Wait until the BBP becomes ready.
|
||||||
*/
|
*/
|
||||||
reg = rt73usb_bbp_check(rt2x00dev);
|
reg = rt73usb_bbp_check(rt2x00dev);
|
||||||
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) {
|
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY))
|
||||||
ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n");
|
goto exit_fail;
|
||||||
*value = 0xff;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
*value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
|
*value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
|
||||||
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
||||||
|
|
||||||
|
return;
|
||||||
|
|
||||||
|
exit_fail:
|
||||||
|
mutex_unlock(&rt2x00dev->usb_cache_mutex);
|
||||||
|
|
||||||
|
ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n");
|
||||||
|
*value = 0xff;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
|
static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
|
||||||
|
|
|
@ -84,7 +84,11 @@ struct net_lro_mgr {
|
||||||
from received packets and eth protocol
|
from received packets and eth protocol
|
||||||
is still ETH_P_8021Q */
|
is still ETH_P_8021Q */
|
||||||
|
|
||||||
u32 ip_summed; /* Set in non generated SKBs in page mode */
|
/*
|
||||||
|
* Set for generated SKBs that are not added to
|
||||||
|
* the frag list in fragmented mode
|
||||||
|
*/
|
||||||
|
u32 ip_summed;
|
||||||
u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
|
u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
|
||||||
* or CHECKSUM_NONE */
|
* or CHECKSUM_NONE */
|
||||||
|
|
||||||
|
|
|
@ -88,6 +88,8 @@ struct wireless_dev;
|
||||||
#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
|
#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
|
||||||
#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
|
#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
|
||||||
|
|
||||||
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute the worst case header length according to the protocols
|
* Compute the worst case header length according to the protocols
|
||||||
* used.
|
* used.
|
||||||
|
@ -114,6 +116,8 @@ struct wireless_dev;
|
||||||
#define MAX_HEADER (LL_MAX_HEADER + 48)
|
#define MAX_HEADER (LL_MAX_HEADER + 48)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
struct net_device_subqueue
|
struct net_device_subqueue
|
||||||
{
|
{
|
||||||
/* Give a control state for each queue. This struct may contain
|
/* Give a control state for each queue. This struct may contain
|
||||||
|
|
|
@ -365,6 +365,12 @@ static inline int ipv6_addr_any(const struct in6_addr *a)
|
||||||
a->s6_addr32[2] | a->s6_addr32[3] ) == 0);
|
a->s6_addr32[2] | a->s6_addr32[3] ) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int ipv6_addr_loopback(const struct in6_addr *a)
|
||||||
|
{
|
||||||
|
return ((a->s6_addr32[0] | a->s6_addr32[1] |
|
||||||
|
a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
|
static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
|
||||||
{
|
{
|
||||||
return ((a->s6_addr32[0] | a->s6_addr32[1] |
|
return ((a->s6_addr32[0] | a->s6_addr32[1] |
|
||||||
|
|
|
@ -95,6 +95,11 @@ extern struct list_head net_namespace_list;
|
||||||
#ifdef CONFIG_NET_NS
|
#ifdef CONFIG_NET_NS
|
||||||
extern void __put_net(struct net *net);
|
extern void __put_net(struct net *net);
|
||||||
|
|
||||||
|
static inline int net_alive(struct net *net)
|
||||||
|
{
|
||||||
|
return net && atomic_read(&net->count);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct net *get_net(struct net *net)
|
static inline struct net *get_net(struct net *net)
|
||||||
{
|
{
|
||||||
atomic_inc(&net->count);
|
atomic_inc(&net->count);
|
||||||
|
@ -125,6 +130,12 @@ int net_eq(const struct net *net1, const struct net *net2)
|
||||||
return net1 == net2;
|
return net1 == net2;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
static inline int net_alive(struct net *net)
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct net *get_net(struct net *net)
|
static inline struct net *get_net(struct net *net)
|
||||||
{
|
{
|
||||||
return net;
|
return net;
|
||||||
|
|
|
@ -2107,6 +2107,10 @@ int netif_receive_skb(struct sk_buff *skb)
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
|
/* Don't receive packets in an exiting network namespace */
|
||||||
|
if (!net_alive(dev_net(skb->dev)))
|
||||||
|
goto out;
|
||||||
|
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
if (skb->tc_verd & TC_NCLS) {
|
if (skb->tc_verd & TC_NCLS) {
|
||||||
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
|
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
|
||||||
|
@ -3034,7 +3038,7 @@ EXPORT_SYMBOL(dev_unicast_delete);
|
||||||
/**
|
/**
|
||||||
* dev_unicast_add - add a secondary unicast address
|
* dev_unicast_add - add a secondary unicast address
|
||||||
* @dev: device
|
* @dev: device
|
||||||
* @addr: address to delete
|
* @addr: address to add
|
||||||
* @alen: length of @addr
|
* @alen: length of @addr
|
||||||
*
|
*
|
||||||
* Add a secondary unicast address to the device or increase
|
* Add a secondary unicast address to the device or increase
|
||||||
|
|
|
@ -140,6 +140,9 @@ static void cleanup_net(struct work_struct *work)
|
||||||
struct pernet_operations *ops;
|
struct pernet_operations *ops;
|
||||||
struct net *net;
|
struct net *net;
|
||||||
|
|
||||||
|
/* Be very certain incoming network packets will not find us */
|
||||||
|
rcu_barrier();
|
||||||
|
|
||||||
net = container_of(work, struct net, work);
|
net = container_of(work, struct net, work);
|
||||||
|
|
||||||
mutex_lock(&net_mutex);
|
mutex_lock(&net_mutex);
|
||||||
|
|
|
@ -1290,12 +1290,14 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
|
||||||
{
|
{
|
||||||
unsigned int nr_pages = spd->nr_pages;
|
unsigned int nr_pages = spd->nr_pages;
|
||||||
unsigned int poff, plen, len, toff, tlen;
|
unsigned int poff, plen, len, toff, tlen;
|
||||||
int headlen, seg;
|
int headlen, seg, error = 0;
|
||||||
|
|
||||||
toff = *offset;
|
toff = *offset;
|
||||||
tlen = *total_len;
|
tlen = *total_len;
|
||||||
if (!tlen)
|
if (!tlen) {
|
||||||
|
error = 1;
|
||||||
goto err;
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the offset is greater than the linear part, go directly to
|
* if the offset is greater than the linear part, go directly to
|
||||||
|
@ -1337,7 +1339,8 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
|
||||||
* just jump directly to update and return, no point
|
* just jump directly to update and return, no point
|
||||||
* in going over fragments when the output is full.
|
* in going over fragments when the output is full.
|
||||||
*/
|
*/
|
||||||
if (spd_fill_page(spd, virt_to_page(p), plen, poff, skb))
|
error = spd_fill_page(spd, virt_to_page(p), plen, poff, skb);
|
||||||
|
if (error)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
tlen -= plen;
|
tlen -= plen;
|
||||||
|
@ -1367,7 +1370,8 @@ map_frag:
|
||||||
if (!plen)
|
if (!plen)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (spd_fill_page(spd, f->page, plen, poff, skb))
|
error = spd_fill_page(spd, f->page, plen, poff, skb);
|
||||||
|
if (error)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
tlen -= plen;
|
tlen -= plen;
|
||||||
|
@ -1380,7 +1384,10 @@ done:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
err:
|
err:
|
||||||
return 1;
|
/* update the offset to reflect the linear part skip, if any */
|
||||||
|
if (!error)
|
||||||
|
*offset = toff;
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -192,14 +192,21 @@ EXPORT_SYMBOL(inet_frag_evictor);
|
||||||
|
|
||||||
static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
||||||
struct inet_frag_queue *qp_in, struct inet_frags *f,
|
struct inet_frag_queue *qp_in, struct inet_frags *f,
|
||||||
unsigned int hash, void *arg)
|
void *arg)
|
||||||
{
|
{
|
||||||
struct inet_frag_queue *qp;
|
struct inet_frag_queue *qp;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
#endif
|
#endif
|
||||||
|
unsigned int hash;
|
||||||
|
|
||||||
write_lock(&f->lock);
|
write_lock(&f->lock);
|
||||||
|
/*
|
||||||
|
* While we stayed w/o the lock other CPU could update
|
||||||
|
* the rnd seed, so we need to re-calculate the hash
|
||||||
|
* chain. Fortunatelly the qp_in can be used to get one.
|
||||||
|
*/
|
||||||
|
hash = f->hashfn(qp_in);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* With SMP race we have to recheck hash table, because
|
/* With SMP race we have to recheck hash table, because
|
||||||
* such entry could be created on other cpu, while we
|
* such entry could be created on other cpu, while we
|
||||||
|
@ -247,7 +254,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
||||||
struct inet_frags *f, void *arg, unsigned int hash)
|
struct inet_frags *f, void *arg)
|
||||||
{
|
{
|
||||||
struct inet_frag_queue *q;
|
struct inet_frag_queue *q;
|
||||||
|
|
||||||
|
@ -255,7 +262,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
||||||
if (q == NULL)
|
if (q == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return inet_frag_intern(nf, q, f, hash, arg);
|
return inet_frag_intern(nf, q, f, arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
||||||
|
@ -264,7 +271,6 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
||||||
struct inet_frag_queue *q;
|
struct inet_frag_queue *q;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
|
|
||||||
read_lock(&f->lock);
|
|
||||||
hlist_for_each_entry(q, n, &f->hash[hash], list) {
|
hlist_for_each_entry(q, n, &f->hash[hash], list) {
|
||||||
if (q->net == nf && f->match(q, key)) {
|
if (q->net == nf && f->match(q, key)) {
|
||||||
atomic_inc(&q->refcnt);
|
atomic_inc(&q->refcnt);
|
||||||
|
@ -274,6 +280,6 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
||||||
}
|
}
|
||||||
read_unlock(&f->lock);
|
read_unlock(&f->lock);
|
||||||
|
|
||||||
return inet_frag_create(nf, f, key, hash);
|
return inet_frag_create(nf, f, key);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(inet_frag_find);
|
EXPORT_SYMBOL(inet_frag_find);
|
||||||
|
|
|
@ -383,8 +383,7 @@ static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
|
||||||
out2: /* send aggregated SKBs to stack */
|
out2: /* send aggregated SKBs to stack */
|
||||||
lro_flush(lro_mgr, lro_desc);
|
lro_flush(lro_mgr, lro_desc);
|
||||||
|
|
||||||
out: /* Original SKB has to be posted to stack */
|
out:
|
||||||
skb->ip_summed = lro_mgr->ip_summed;
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -227,6 +227,8 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
|
||||||
|
|
||||||
arg.iph = iph;
|
arg.iph = iph;
|
||||||
arg.user = user;
|
arg.user = user;
|
||||||
|
|
||||||
|
read_lock(&ip4_frags.lock);
|
||||||
hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
|
hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
|
||||||
|
|
||||||
q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
|
q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
|
||||||
|
|
|
@ -258,6 +258,8 @@
|
||||||
#include <linux/socket.h>
|
#include <linux/socket.h>
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
|
#include <linux/highmem.h>
|
||||||
|
#include <linux/swap.h>
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/crypto.h>
|
#include <linux/crypto.h>
|
||||||
|
@ -2688,7 +2690,7 @@ __setup("thash_entries=", set_thash_entries);
|
||||||
void __init tcp_init(void)
|
void __init tcp_init(void)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb = NULL;
|
struct sk_buff *skb = NULL;
|
||||||
unsigned long limit;
|
unsigned long nr_pages, limit;
|
||||||
int order, i, max_share;
|
int order, i, max_share;
|
||||||
|
|
||||||
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
|
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
|
||||||
|
@ -2757,8 +2759,9 @@ void __init tcp_init(void)
|
||||||
* is up to 1/2 at 256 MB, decreasing toward zero with the amount of
|
* is up to 1/2 at 256 MB, decreasing toward zero with the amount of
|
||||||
* memory, with a floor of 128 pages.
|
* memory, with a floor of 128 pages.
|
||||||
*/
|
*/
|
||||||
limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
|
nr_pages = totalram_pages - totalhigh_pages;
|
||||||
limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
|
limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
|
||||||
|
limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
|
||||||
limit = max(limit, 128UL);
|
limit = max(limit, 128UL);
|
||||||
sysctl_tcp_mem[0] = limit / 4 * 3;
|
sysctl_tcp_mem[0] = limit / 4 * 3;
|
||||||
sysctl_tcp_mem[1] = limit;
|
sysctl_tcp_mem[1] = limit;
|
||||||
|
|
|
@ -2189,7 +2189,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
|
||||||
}
|
}
|
||||||
|
|
||||||
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
|
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
|
||||||
"%08X %5d %8d %lu %d %p %u %u %u %u %d%n",
|
"%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
|
||||||
i, src, srcp, dest, destp, sk->sk_state,
|
i, src, srcp, dest, destp, sk->sk_state,
|
||||||
tp->write_seq - tp->snd_una,
|
tp->write_seq - tp->snd_una,
|
||||||
sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
|
sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
|
||||||
|
@ -2201,8 +2201,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
|
||||||
icsk->icsk_probes_out,
|
icsk->icsk_probes_out,
|
||||||
sock_i_ino(sk),
|
sock_i_ino(sk),
|
||||||
atomic_read(&sk->sk_refcnt), sk,
|
atomic_read(&sk->sk_refcnt), sk,
|
||||||
icsk->icsk_rto,
|
jiffies_to_clock_t(icsk->icsk_rto),
|
||||||
icsk->icsk_ack.ato,
|
jiffies_to_clock_t(icsk->icsk_ack.ato),
|
||||||
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
|
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
|
||||||
tp->snd_cwnd,
|
tp->snd_cwnd,
|
||||||
tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
|
tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
|
||||||
|
|
|
@ -100,6 +100,15 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
||||||
if (hdr->version != 6)
|
if (hdr->version != 6)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RFC4291 2.5.3
|
||||||
|
* A packet received on an interface with a destination address
|
||||||
|
* of loopback must be dropped.
|
||||||
|
*/
|
||||||
|
if (!(dev->flags & IFF_LOOPBACK) &&
|
||||||
|
ipv6_addr_loopback(&hdr->daddr))
|
||||||
|
goto err;
|
||||||
|
|
||||||
skb->transport_header = skb->network_header + sizeof(*hdr);
|
skb->transport_header = skb->network_header + sizeof(*hdr);
|
||||||
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
|
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
|
||||||
|
|
||||||
|
|
|
@ -343,18 +343,21 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
||||||
case IPV6_DSTOPTS:
|
case IPV6_DSTOPTS:
|
||||||
{
|
{
|
||||||
struct ipv6_txoptions *opt;
|
struct ipv6_txoptions *opt;
|
||||||
|
|
||||||
|
/* remove any sticky options header with a zero option
|
||||||
|
* length, per RFC3542.
|
||||||
|
*/
|
||||||
if (optlen == 0)
|
if (optlen == 0)
|
||||||
optval = NULL;
|
optval = NULL;
|
||||||
|
else if (optlen < sizeof(struct ipv6_opt_hdr) ||
|
||||||
|
optlen & 0x7 || optlen > 8 * 255)
|
||||||
|
goto e_inval;
|
||||||
|
|
||||||
/* hop-by-hop / destination options are privileged option */
|
/* hop-by-hop / destination options are privileged option */
|
||||||
retv = -EPERM;
|
retv = -EPERM;
|
||||||
if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW))
|
if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (optlen < sizeof(struct ipv6_opt_hdr) ||
|
|
||||||
optlen & 0x7 || optlen > 8 * 255)
|
|
||||||
goto e_inval;
|
|
||||||
|
|
||||||
opt = ipv6_renew_options(sk, np->opt, optname,
|
opt = ipv6_renew_options(sk, np->opt, optname,
|
||||||
(struct ipv6_opt_hdr __user *)optval,
|
(struct ipv6_opt_hdr __user *)optval,
|
||||||
optlen);
|
optlen);
|
||||||
|
|
|
@ -129,7 +129,7 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = {
|
||||||
.priority = NF_IP6_PRI_MANGLE,
|
.priority = NF_IP6_PRI_MANGLE,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.hook = ip6t_local_hook,
|
.hook = ip6t_route_hook,
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.pf = PF_INET6,
|
.pf = PF_INET6,
|
||||||
.hooknum = NF_INET_LOCAL_IN,
|
.hooknum = NF_INET_LOCAL_IN,
|
||||||
|
|
|
@ -207,9 +207,10 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
|
||||||
arg.id = id;
|
arg.id = id;
|
||||||
arg.src = src;
|
arg.src = src;
|
||||||
arg.dst = dst;
|
arg.dst = dst;
|
||||||
|
|
||||||
|
read_lock_bh(&nf_frags.lock);
|
||||||
hash = ip6qhashfn(id, src, dst);
|
hash = ip6qhashfn(id, src, dst);
|
||||||
|
|
||||||
local_bh_disable();
|
|
||||||
q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
|
q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
if (q == NULL)
|
if (q == NULL)
|
||||||
|
|
|
@ -245,6 +245,8 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
|
||||||
arg.id = id;
|
arg.id = id;
|
||||||
arg.src = src;
|
arg.src = src;
|
||||||
arg.dst = dst;
|
arg.dst = dst;
|
||||||
|
|
||||||
|
read_lock(&ip6_frags.lock);
|
||||||
hash = ip6qhashfn(id, src, dst);
|
hash = ip6qhashfn(id, src, dst);
|
||||||
|
|
||||||
q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
|
q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
|
||||||
|
|
|
@ -238,7 +238,7 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
|
||||||
static inline struct rt6_info *rt6_device_match(struct net *net,
|
static inline struct rt6_info *rt6_device_match(struct net *net,
|
||||||
struct rt6_info *rt,
|
struct rt6_info *rt,
|
||||||
int oif,
|
int oif,
|
||||||
int strict)
|
int flags)
|
||||||
{
|
{
|
||||||
struct rt6_info *local = NULL;
|
struct rt6_info *local = NULL;
|
||||||
struct rt6_info *sprt;
|
struct rt6_info *sprt;
|
||||||
|
@ -251,7 +251,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
|
||||||
if (dev->flags & IFF_LOOPBACK) {
|
if (dev->flags & IFF_LOOPBACK) {
|
||||||
if (sprt->rt6i_idev == NULL ||
|
if (sprt->rt6i_idev == NULL ||
|
||||||
sprt->rt6i_idev->dev->ifindex != oif) {
|
sprt->rt6i_idev->dev->ifindex != oif) {
|
||||||
if (strict && oif)
|
if (flags & RT6_LOOKUP_F_IFACE && oif)
|
||||||
continue;
|
continue;
|
||||||
if (local && (!oif ||
|
if (local && (!oif ||
|
||||||
local->rt6i_idev->dev->ifindex == oif))
|
local->rt6i_idev->dev->ifindex == oif))
|
||||||
|
@ -264,7 +264,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
|
||||||
if (local)
|
if (local)
|
||||||
return local;
|
return local;
|
||||||
|
|
||||||
if (strict)
|
if (flags & RT6_LOOKUP_F_IFACE)
|
||||||
return net->ipv6.ip6_null_entry;
|
return net->ipv6.ip6_null_entry;
|
||||||
}
|
}
|
||||||
return rt;
|
return rt;
|
||||||
|
|
|
@ -1946,7 +1946,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||||
|
|
||||||
seq_printf(seq,
|
seq_printf(seq,
|
||||||
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
|
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
|
||||||
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
|
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
|
||||||
i,
|
i,
|
||||||
src->s6_addr32[0], src->s6_addr32[1],
|
src->s6_addr32[0], src->s6_addr32[1],
|
||||||
src->s6_addr32[2], src->s6_addr32[3], srcp,
|
src->s6_addr32[2], src->s6_addr32[3], srcp,
|
||||||
|
@ -1962,8 +1962,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||||
icsk->icsk_probes_out,
|
icsk->icsk_probes_out,
|
||||||
sock_i_ino(sp),
|
sock_i_ino(sp),
|
||||||
atomic_read(&sp->sk_refcnt), sp,
|
atomic_read(&sp->sk_refcnt), sp,
|
||||||
icsk->icsk_rto,
|
jiffies_to_clock_t(icsk->icsk_rto),
|
||||||
icsk->icsk_ack.ato,
|
jiffies_to_clock_t(icsk->icsk_ack.ato),
|
||||||
(icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
|
(icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
|
||||||
tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
|
tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
|
||||||
);
|
);
|
||||||
|
|
|
@ -387,6 +387,15 @@ void ieee80211_key_free(struct ieee80211_key *key)
|
||||||
if (!key)
|
if (!key)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (!key->sdata) {
|
||||||
|
/* The key has not been linked yet, simply free it
|
||||||
|
* and don't Oops */
|
||||||
|
if (key->conf.alg == ALG_CCMP)
|
||||||
|
ieee80211_aes_key_free(key->u.ccmp.tfm);
|
||||||
|
kfree(key);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&key->sdata->local->key_lock, flags);
|
spin_lock_irqsave(&key->sdata->local->key_lock, flags);
|
||||||
__ieee80211_key_free(key);
|
__ieee80211_key_free(key);
|
||||||
spin_unlock_irqrestore(&key->sdata->local->key_lock, flags);
|
spin_unlock_irqrestore(&key->sdata->local->key_lock, flags);
|
||||||
|
|
|
@ -1534,7 +1534,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
list_for_each_entry_rcu(addr6, &iface->addr6_list, list) {
|
list_for_each_entry_rcu(addr6, &iface->addr6_list, list) {
|
||||||
if (addr6->valid || iter_addr6++ < skip_addr6)
|
if (!addr6->valid || iter_addr6++ < skip_addr6)
|
||||||
continue;
|
continue;
|
||||||
if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
|
if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
|
||||||
iface,
|
iface,
|
||||||
|
|
|
@ -132,6 +132,7 @@ errout:
|
||||||
* @maxtype: maximum attribute type to be expected
|
* @maxtype: maximum attribute type to be expected
|
||||||
* @head: head of attribute stream
|
* @head: head of attribute stream
|
||||||
* @len: length of attribute stream
|
* @len: length of attribute stream
|
||||||
|
* @policy: validation policy
|
||||||
*
|
*
|
||||||
* Parses a stream of attributes and stores a pointer to each attribute in
|
* Parses a stream of attributes and stores a pointer to each attribute in
|
||||||
* the tb array accessable via the attribute type. Attributes with a type
|
* the tb array accessable via the attribute type. Attributes with a type
|
||||||
|
@ -194,7 +195,7 @@ struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
|
||||||
/**
|
/**
|
||||||
* nla_strlcpy - Copy string attribute payload into a sized buffer
|
* nla_strlcpy - Copy string attribute payload into a sized buffer
|
||||||
* @dst: where to copy the string to
|
* @dst: where to copy the string to
|
||||||
* @src: attribute to copy the string from
|
* @nla: attribute to copy the string from
|
||||||
* @dstsize: size of destination buffer
|
* @dstsize: size of destination buffer
|
||||||
*
|
*
|
||||||
* Copies at most dstsize - 1 bytes into the destination buffer.
|
* Copies at most dstsize - 1 bytes into the destination buffer.
|
||||||
|
@ -340,9 +341,9 @@ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nla_reserve - reserve room for attribute without header
|
* nla_reserve_nohdr - reserve room for attribute without header
|
||||||
* @skb: socket buffer to reserve room on
|
* @skb: socket buffer to reserve room on
|
||||||
* @len: length of attribute payload
|
* @attrlen: length of attribute payload
|
||||||
*
|
*
|
||||||
* Reserves room for attribute payload without a header.
|
* Reserves room for attribute payload without a header.
|
||||||
*
|
*
|
||||||
|
|
|
@ -106,17 +106,6 @@ config NET_SCH_PRIO
|
||||||
To compile this code as a module, choose M here: the
|
To compile this code as a module, choose M here: the
|
||||||
module will be called sch_prio.
|
module will be called sch_prio.
|
||||||
|
|
||||||
config NET_SCH_RR
|
|
||||||
tristate "Multi Band Round Robin Queuing (RR)"
|
|
||||||
select NET_SCH_PRIO
|
|
||||||
---help---
|
|
||||||
Say Y here if you want to use an n-band round robin packet
|
|
||||||
scheduler.
|
|
||||||
|
|
||||||
The module uses sch_prio for its framework and is aliased as
|
|
||||||
sch_rr, so it will load sch_prio, although it is referred
|
|
||||||
to using sch_rr.
|
|
||||||
|
|
||||||
config NET_SCH_RED
|
config NET_SCH_RED
|
||||||
tristate "Random Early Detection (RED)"
|
tristate "Random Early Detection (RED)"
|
||||||
---help---
|
---help---
|
||||||
|
|
|
@ -468,7 +468,7 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
|
||||||
|
|
||||||
return sch;
|
return sch;
|
||||||
errout:
|
errout:
|
||||||
return ERR_PTR(-err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
|
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
|
||||||
|
|
|
@ -4512,7 +4512,9 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
|
||||||
if (copy_from_user(&getaddrs, optval, len))
|
if (copy_from_user(&getaddrs, optval, len))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (getaddrs.addr_num <= 0) return -EINVAL;
|
if (getaddrs.addr_num <= 0 ||
|
||||||
|
getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr)))
|
||||||
|
return -EINVAL;
|
||||||
/*
|
/*
|
||||||
* For UDP-style sockets, id specifies the association to query.
|
* For UDP-style sockets, id specifies the association to query.
|
||||||
* If the id field is set to the value '0' then the locally bound
|
* If the id field is set to the value '0' then the locally bound
|
||||||
|
|
|
@ -485,8 +485,8 @@ static int unix_socketpair(struct socket *, struct socket *);
|
||||||
static int unix_accept(struct socket *, struct socket *, int);
|
static int unix_accept(struct socket *, struct socket *, int);
|
||||||
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
|
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
|
||||||
static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
|
static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
|
||||||
static unsigned int unix_datagram_poll(struct file *, struct socket *,
|
static unsigned int unix_dgram_poll(struct file *, struct socket *,
|
||||||
poll_table *);
|
poll_table *);
|
||||||
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
|
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
|
||||||
static int unix_shutdown(struct socket *, int);
|
static int unix_shutdown(struct socket *, int);
|
||||||
static int unix_stream_sendmsg(struct kiocb *, struct socket *,
|
static int unix_stream_sendmsg(struct kiocb *, struct socket *,
|
||||||
|
@ -532,7 +532,7 @@ static const struct proto_ops unix_dgram_ops = {
|
||||||
.socketpair = unix_socketpair,
|
.socketpair = unix_socketpair,
|
||||||
.accept = sock_no_accept,
|
.accept = sock_no_accept,
|
||||||
.getname = unix_getname,
|
.getname = unix_getname,
|
||||||
.poll = unix_datagram_poll,
|
.poll = unix_dgram_poll,
|
||||||
.ioctl = unix_ioctl,
|
.ioctl = unix_ioctl,
|
||||||
.listen = sock_no_listen,
|
.listen = sock_no_listen,
|
||||||
.shutdown = unix_shutdown,
|
.shutdown = unix_shutdown,
|
||||||
|
@ -553,7 +553,7 @@ static const struct proto_ops unix_seqpacket_ops = {
|
||||||
.socketpair = unix_socketpair,
|
.socketpair = unix_socketpair,
|
||||||
.accept = unix_accept,
|
.accept = unix_accept,
|
||||||
.getname = unix_getname,
|
.getname = unix_getname,
|
||||||
.poll = unix_datagram_poll,
|
.poll = unix_dgram_poll,
|
||||||
.ioctl = unix_ioctl,
|
.ioctl = unix_ioctl,
|
||||||
.listen = unix_listen,
|
.listen = unix_listen,
|
||||||
.shutdown = unix_shutdown,
|
.shutdown = unix_shutdown,
|
||||||
|
@ -1992,29 +1992,13 @@ static unsigned int unix_poll(struct file * file, struct socket *sock, poll_tabl
|
||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int unix_datagram_poll(struct file *file, struct socket *sock,
|
static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
|
||||||
poll_table *wait)
|
poll_table *wait)
|
||||||
{
|
{
|
||||||
struct sock *sk = sock->sk, *peer;
|
struct sock *sk = sock->sk, *other;
|
||||||
unsigned int mask;
|
unsigned int mask, writable;
|
||||||
|
|
||||||
poll_wait(file, sk->sk_sleep, wait);
|
poll_wait(file, sk->sk_sleep, wait);
|
||||||
|
|
||||||
peer = unix_peer_get(sk);
|
|
||||||
if (peer) {
|
|
||||||
if (peer != sk) {
|
|
||||||
/*
|
|
||||||
* Writability of a connected socket additionally
|
|
||||||
* depends on the state of the receive queue of the
|
|
||||||
* peer.
|
|
||||||
*/
|
|
||||||
poll_wait(file, &unix_sk(peer)->peer_wait, wait);
|
|
||||||
} else {
|
|
||||||
sock_put(peer);
|
|
||||||
peer = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
|
||||||
/* exceptional events? */
|
/* exceptional events? */
|
||||||
|
@ -2040,14 +2024,26 @@ static unsigned int unix_datagram_poll(struct file *file, struct socket *sock,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* writable? */
|
/* writable? */
|
||||||
if (unix_writable(sk) && !(peer && unix_recvq_full(peer)))
|
writable = unix_writable(sk);
|
||||||
|
if (writable) {
|
||||||
|
other = unix_peer_get(sk);
|
||||||
|
if (other) {
|
||||||
|
if (unix_peer(other) != sk) {
|
||||||
|
poll_wait(file, &unix_sk(other)->peer_wait,
|
||||||
|
wait);
|
||||||
|
if (unix_recvq_full(other))
|
||||||
|
writable = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
sock_put(other);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (writable)
|
||||||
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
|
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
|
||||||
else
|
else
|
||||||
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
||||||
|
|
||||||
if (peer)
|
|
||||||
sock_put(peer);
|
|
||||||
|
|
||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue