Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
This commit is contained in:
commit
a839688362
|
@ -822,7 +822,7 @@ static int corkscrew_open(struct net_device *dev)
|
||||||
break; /* Bad news! */
|
break; /* Bad news! */
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
vp->rx_ring[i].addr = isa_virt_to_bus(skb->tail);
|
vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
|
||||||
}
|
}
|
||||||
vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
|
vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
|
||||||
outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
|
outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
|
||||||
|
@ -1406,7 +1406,7 @@ static int boomerang_rx(struct net_device *dev)
|
||||||
break; /* Bad news! */
|
break; /* Bad news! */
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
vp->rx_ring[entry].addr = isa_virt_to_bus(skb->tail);
|
vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
|
||||||
vp->rx_skbuff[entry] = skb;
|
vp->rx_skbuff[entry] = skb;
|
||||||
}
|
}
|
||||||
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
|
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
|
||||||
|
|
|
@ -1802,7 +1802,7 @@ vortex_open(struct net_device *dev)
|
||||||
break; /* Bad news! */
|
break; /* Bad news! */
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
|
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
|
||||||
}
|
}
|
||||||
if (i != RX_RING_SIZE) {
|
if (i != RX_RING_SIZE) {
|
||||||
int j;
|
int j;
|
||||||
|
@ -2632,7 +2632,7 @@ boomerang_rx(struct net_device *dev)
|
||||||
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||||
/* 'skb_put()' points to the start of sk_buff data area. */
|
/* 'skb_put()' points to the start of sk_buff data area. */
|
||||||
memcpy(skb_put(skb, pkt_len),
|
memcpy(skb_put(skb, pkt_len),
|
||||||
vp->rx_skbuff[entry]->tail,
|
vp->rx_skbuff[entry]->data,
|
||||||
pkt_len);
|
pkt_len);
|
||||||
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||||
vp->rx_copy++;
|
vp->rx_copy++;
|
||||||
|
@ -2678,7 +2678,7 @@ boomerang_rx(struct net_device *dev)
|
||||||
}
|
}
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
|
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
|
||||||
vp->rx_skbuff[entry] = skb;
|
vp->rx_skbuff[entry] = skb;
|
||||||
}
|
}
|
||||||
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
|
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
|
||||||
|
|
|
@ -596,7 +596,7 @@ rx_status_loop:
|
||||||
|
|
||||||
mapping =
|
mapping =
|
||||||
cp->rx_skb[rx_tail].mapping =
|
cp->rx_skb[rx_tail].mapping =
|
||||||
pci_map_single(cp->pdev, new_skb->tail,
|
pci_map_single(cp->pdev, new_skb->data,
|
||||||
buflen, PCI_DMA_FROMDEVICE);
|
buflen, PCI_DMA_FROMDEVICE);
|
||||||
cp->rx_skb[rx_tail].skb = new_skb;
|
cp->rx_skb[rx_tail].skb = new_skb;
|
||||||
|
|
||||||
|
@ -1101,7 +1101,7 @@ static int cp_refill_rx (struct cp_private *cp)
|
||||||
skb_reserve(skb, RX_OFFSET);
|
skb_reserve(skb, RX_OFFSET);
|
||||||
|
|
||||||
cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
|
cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
|
||||||
skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
cp->rx_skb[i].skb = skb;
|
cp->rx_skb[i].skb = skb;
|
||||||
|
|
||||||
cp->rx_ring[i].opts2 = 0;
|
cp->rx_ring[i].opts2 = 0;
|
||||||
|
|
|
@ -546,11 +546,11 @@ static inline void init_rx_bufs(struct net_device *dev)
|
||||||
rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
|
rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
|
||||||
rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
|
rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
|
||||||
rbd->skb = skb;
|
rbd->skb = skb;
|
||||||
rbd->v_data = skb->tail;
|
rbd->v_data = skb->data;
|
||||||
rbd->b_data = WSWAPchar(virt_to_bus(skb->tail));
|
rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
|
||||||
rbd->size = PKT_BUF_SZ;
|
rbd->size = PKT_BUF_SZ;
|
||||||
#ifdef __mc68000__
|
#ifdef __mc68000__
|
||||||
cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ);
|
cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
lp->rbd_head = lp->rbds;
|
lp->rbd_head = lp->rbds;
|
||||||
|
@ -816,10 +816,10 @@ static inline int i596_rx(struct net_device *dev)
|
||||||
rx_in_place = 1;
|
rx_in_place = 1;
|
||||||
rbd->skb = newskb;
|
rbd->skb = newskb;
|
||||||
newskb->dev = dev;
|
newskb->dev = dev;
|
||||||
rbd->v_data = newskb->tail;
|
rbd->v_data = newskb->data;
|
||||||
rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail));
|
rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
|
||||||
#ifdef __mc68000__
|
#ifdef __mc68000__
|
||||||
cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ);
|
cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -840,7 +840,7 @@ memory_squeeze:
|
||||||
skb->protocol=eth_type_trans(skb,dev);
|
skb->protocol=eth_type_trans(skb,dev);
|
||||||
skb->len = pkt_len;
|
skb->len = pkt_len;
|
||||||
#ifdef __mc68000__
|
#ifdef __mc68000__
|
||||||
cache_clear(virt_to_phys(rbd->skb->tail),
|
cache_clear(virt_to_phys(rbd->skb->data),
|
||||||
pkt_len);
|
pkt_len);
|
||||||
#endif
|
#endif
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
|
|
|
@ -547,7 +547,7 @@ rio_timer (unsigned long data)
|
||||||
skb_reserve (skb, 2);
|
skb_reserve (skb, 2);
|
||||||
np->rx_ring[entry].fraginfo =
|
np->rx_ring[entry].fraginfo =
|
||||||
cpu_to_le64 (pci_map_single
|
cpu_to_le64 (pci_map_single
|
||||||
(np->pdev, skb->tail, np->rx_buf_sz,
|
(np->pdev, skb->data, np->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE));
|
PCI_DMA_FROMDEVICE));
|
||||||
}
|
}
|
||||||
np->rx_ring[entry].fraginfo |=
|
np->rx_ring[entry].fraginfo |=
|
||||||
|
@ -618,7 +618,7 @@ alloc_list (struct net_device *dev)
|
||||||
/* Rubicon now supports 40 bits of addressing space. */
|
/* Rubicon now supports 40 bits of addressing space. */
|
||||||
np->rx_ring[i].fraginfo =
|
np->rx_ring[i].fraginfo =
|
||||||
cpu_to_le64 ( pci_map_single (
|
cpu_to_le64 ( pci_map_single (
|
||||||
np->pdev, skb->tail, np->rx_buf_sz,
|
np->pdev, skb->data, np->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE));
|
PCI_DMA_FROMDEVICE));
|
||||||
np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
|
np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
|
||||||
}
|
}
|
||||||
|
@ -906,7 +906,7 @@ receive_packet (struct net_device *dev)
|
||||||
/* 16 byte align the IP header */
|
/* 16 byte align the IP header */
|
||||||
skb_reserve (skb, 2);
|
skb_reserve (skb, 2);
|
||||||
eth_copy_and_sum (skb,
|
eth_copy_and_sum (skb,
|
||||||
np->rx_skbuff[entry]->tail,
|
np->rx_skbuff[entry]->data,
|
||||||
pkt_len, 0);
|
pkt_len, 0);
|
||||||
skb_put (skb, pkt_len);
|
skb_put (skb, pkt_len);
|
||||||
pci_dma_sync_single_for_device(np->pdev,
|
pci_dma_sync_single_for_device(np->pdev,
|
||||||
|
@ -950,7 +950,7 @@ receive_packet (struct net_device *dev)
|
||||||
skb_reserve (skb, 2);
|
skb_reserve (skb, 2);
|
||||||
np->rx_ring[entry].fraginfo =
|
np->rx_ring[entry].fraginfo =
|
||||||
cpu_to_le64 (pci_map_single
|
cpu_to_le64 (pci_map_single
|
||||||
(np->pdev, skb->tail, np->rx_buf_sz,
|
(np->pdev, skb->data, np->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE));
|
PCI_DMA_FROMDEVICE));
|
||||||
}
|
}
|
||||||
np->rx_ring[entry].fraginfo |=
|
np->rx_ring[entry].fraginfo |=
|
||||||
|
|
|
@ -1269,7 +1269,7 @@ speedo_init_rx_ring(struct net_device *dev)
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break; /* OK. Just initially short of Rx bufs. */
|
break; /* OK. Just initially short of Rx bufs. */
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
rxf = (struct RxFD *)skb->tail;
|
rxf = (struct RxFD *)skb->data;
|
||||||
sp->rx_ringp[i] = rxf;
|
sp->rx_ringp[i] = rxf;
|
||||||
sp->rx_ring_dma[i] =
|
sp->rx_ring_dma[i] =
|
||||||
pci_map_single(sp->pdev, rxf,
|
pci_map_single(sp->pdev, rxf,
|
||||||
|
@ -1661,7 +1661,7 @@ static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
|
||||||
sp->rx_ringp[entry] = NULL;
|
sp->rx_ringp[entry] = NULL;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
|
rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
|
||||||
sp->rx_ring_dma[entry] =
|
sp->rx_ring_dma[entry] =
|
||||||
pci_map_single(sp->pdev, rxf,
|
pci_map_single(sp->pdev, rxf,
|
||||||
PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
|
PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
|
||||||
|
@ -1808,10 +1808,10 @@ speedo_rx(struct net_device *dev)
|
||||||
|
|
||||||
#if 1 || USE_IP_CSUM
|
#if 1 || USE_IP_CSUM
|
||||||
/* Packet is in one chunk -- we can copy + cksum. */
|
/* Packet is in one chunk -- we can copy + cksum. */
|
||||||
eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
|
eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
#else
|
#else
|
||||||
memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
|
memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data,
|
||||||
pkt_len);
|
pkt_len);
|
||||||
#endif
|
#endif
|
||||||
pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
|
pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
|
||||||
|
|
|
@ -1003,7 +1003,7 @@ static void epic_init_ring(struct net_device *dev)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
||||||
ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
|
ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
|
||||||
skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
|
ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
|
||||||
}
|
}
|
||||||
ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
|
ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
|
||||||
|
@ -1274,7 +1274,7 @@ static int epic_rx(struct net_device *dev, int budget)
|
||||||
ep->rx_ring[entry].bufaddr,
|
ep->rx_ring[entry].bufaddr,
|
||||||
ep->rx_buf_sz,
|
ep->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
|
eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
pci_dma_sync_single_for_device(ep->pci_dev,
|
pci_dma_sync_single_for_device(ep->pci_dev,
|
||||||
ep->rx_ring[entry].bufaddr,
|
ep->rx_ring[entry].bufaddr,
|
||||||
|
@ -1308,7 +1308,7 @@ static int epic_rx(struct net_device *dev, int budget)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
|
ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
|
||||||
skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
work_done++;
|
work_done++;
|
||||||
}
|
}
|
||||||
ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
|
ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
|
||||||
|
|
|
@ -1107,7 +1107,7 @@ static void allocate_rx_buffers(struct net_device *dev)
|
||||||
|
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
np->lack_rxbuf->skbuff = skb;
|
np->lack_rxbuf->skbuff = skb;
|
||||||
np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->tail,
|
np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
|
||||||
np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
np->lack_rxbuf->status = RXOWN;
|
np->lack_rxbuf->status = RXOWN;
|
||||||
++np->really_rx_count;
|
++np->really_rx_count;
|
||||||
|
@ -1300,7 +1300,7 @@ static void init_ring(struct net_device *dev)
|
||||||
++np->really_rx_count;
|
++np->really_rx_count;
|
||||||
np->rx_ring[i].skbuff = skb;
|
np->rx_ring[i].skbuff = skb;
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->tail,
|
np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
|
||||||
np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
np->rx_ring[i].status = RXOWN;
|
np->rx_ring[i].status = RXOWN;
|
||||||
np->rx_ring[i].control |= RXIC;
|
np->rx_ring[i].control |= RXIC;
|
||||||
|
@ -1737,11 +1737,11 @@ static int netdev_rx(struct net_device *dev)
|
||||||
|
|
||||||
#if ! defined(__alpha__)
|
#if ! defined(__alpha__)
|
||||||
eth_copy_and_sum(skb,
|
eth_copy_and_sum(skb,
|
||||||
np->cur_rx->skbuff->tail, pkt_len, 0);
|
np->cur_rx->skbuff->data, pkt_len, 0);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
#else
|
#else
|
||||||
memcpy(skb_put(skb, pkt_len),
|
memcpy(skb_put(skb, pkt_len),
|
||||||
np->cur_rx->skbuff->tail, pkt_len);
|
np->cur_rx->skbuff->data, pkt_len);
|
||||||
#endif
|
#endif
|
||||||
pci_dma_sync_single_for_device(np->pci_dev,
|
pci_dma_sync_single_for_device(np->pci_dev,
|
||||||
np->cur_rx->buffer,
|
np->cur_rx->buffer,
|
||||||
|
|
|
@ -1149,7 +1149,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
||||||
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
||||||
skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
|
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
|
||||||
DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
|
DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
|
||||||
}
|
}
|
||||||
|
@ -1210,7 +1210,7 @@ static void hamachi_init_ring(struct net_device *dev)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
||||||
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
||||||
skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
/* -2 because it doesn't REALLY have that first 2 bytes -KDU */
|
/* -2 because it doesn't REALLY have that first 2 bytes -KDU */
|
||||||
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
|
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
|
||||||
DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
|
DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
|
||||||
|
@ -1509,7 +1509,7 @@ static int hamachi_rx(struct net_device *dev)
|
||||||
desc->addr,
|
desc->addr,
|
||||||
hmp->rx_buf_sz,
|
hmp->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
buf_addr = (u8 *) hmp->rx_skbuff[entry]->tail;
|
buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
|
||||||
frame_status = le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12])));
|
frame_status = le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12])));
|
||||||
if (hamachi_debug > 4)
|
if (hamachi_debug > 4)
|
||||||
printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
|
printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
|
||||||
|
@ -1678,7 +1678,7 @@ static int hamachi_rx(struct net_device *dev)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
||||||
skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
}
|
}
|
||||||
desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
|
desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
|
||||||
if (entry >= RX_RING_SIZE-1)
|
if (entry >= RX_RING_SIZE-1)
|
||||||
|
@ -1772,9 +1772,9 @@ static int hamachi_close(struct net_device *dev)
|
||||||
readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
|
readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
|
||||||
i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
|
i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
|
||||||
if (hamachi_debug > 6) {
|
if (hamachi_debug > 6) {
|
||||||
if (*(u8*)hmp->rx_skbuff[i]->tail != 0x69) {
|
if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) {
|
||||||
u16 *addr = (u16 *)
|
u16 *addr = (u16 *)
|
||||||
hmp->rx_skbuff[i]->tail;
|
hmp->rx_skbuff[i]->data;
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
for (j = 0; j < 0x50; j++)
|
for (j = 0; j < 0x50; j++)
|
||||||
|
|
|
@ -862,7 +862,7 @@ lance_init_ring(struct net_device *dev, int gfp)
|
||||||
lp->rx_skbuff[i] = skb;
|
lp->rx_skbuff[i] = skb;
|
||||||
if (skb) {
|
if (skb) {
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
rx_buff = skb->tail;
|
rx_buff = skb->data;
|
||||||
} else
|
} else
|
||||||
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
|
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
|
||||||
if (rx_buff == NULL)
|
if (rx_buff == NULL)
|
||||||
|
|
|
@ -553,14 +553,14 @@ static inline void init_rx_bufs(struct net_device *dev)
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
panic("%s: alloc_skb() failed", __FILE__);
|
panic("%s: alloc_skb() failed", __FILE__);
|
||||||
skb_reserve(skb, 2);
|
skb_reserve(skb, 2);
|
||||||
dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ,
|
dma_addr = dma_map_single(lp->dev, skb->data,PKT_BUF_SZ,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
rbd->v_next = rbd+1;
|
rbd->v_next = rbd+1;
|
||||||
rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
|
rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
|
||||||
rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
|
rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
|
||||||
rbd->skb = skb;
|
rbd->skb = skb;
|
||||||
rbd->v_data = skb->tail;
|
rbd->v_data = skb->data;
|
||||||
rbd->b_data = WSWAPchar(dma_addr);
|
rbd->b_data = WSWAPchar(dma_addr);
|
||||||
rbd->size = PKT_BUF_SZ;
|
rbd->size = PKT_BUF_SZ;
|
||||||
}
|
}
|
||||||
|
@ -783,8 +783,8 @@ static inline int i596_rx(struct net_device *dev)
|
||||||
rx_in_place = 1;
|
rx_in_place = 1;
|
||||||
rbd->skb = newskb;
|
rbd->skb = newskb;
|
||||||
newskb->dev = dev;
|
newskb->dev = dev;
|
||||||
dma_addr = dma_map_single(lp->dev, newskb->tail, PKT_BUF_SZ, DMA_FROM_DEVICE);
|
dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||||
rbd->v_data = newskb->tail;
|
rbd->v_data = newskb->data;
|
||||||
rbd->b_data = WSWAPchar(dma_addr);
|
rbd->b_data = WSWAPchar(dma_addr);
|
||||||
CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
|
CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1926,7 +1926,7 @@ static void refill_rx(struct net_device *dev)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
np->rx_dma[entry] = pci_map_single(np->pci_dev,
|
np->rx_dma[entry] = pci_map_single(np->pci_dev,
|
||||||
skb->tail, buflen, PCI_DMA_FROMDEVICE);
|
skb->data, buflen, PCI_DMA_FROMDEVICE);
|
||||||
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
|
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
|
||||||
}
|
}
|
||||||
np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
|
np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
|
||||||
|
@ -2280,7 +2280,7 @@ static void netdev_rx(struct net_device *dev)
|
||||||
buflen,
|
buflen,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
eth_copy_and_sum(skb,
|
eth_copy_and_sum(skb,
|
||||||
np->rx_skbuff[entry]->tail, pkt_len, 0);
|
np->rx_skbuff[entry]->data, pkt_len, 0);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
pci_dma_sync_single_for_device(np->pci_dev,
|
pci_dma_sync_single_for_device(np->pci_dev,
|
||||||
np->rx_dma[entry],
|
np->rx_dma[entry],
|
||||||
|
|
|
@ -574,7 +574,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
|
||||||
|
|
||||||
dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
|
dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
|
||||||
cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
|
cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
|
||||||
buf = pci_map_single(dev->pci_dev, skb->tail,
|
buf = pci_map_single(dev->pci_dev, skb->data,
|
||||||
REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||||
build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
|
build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
|
||||||
/* update link of previous rx */
|
/* update link of previous rx */
|
||||||
|
@ -604,7 +604,7 @@ static inline int rx_refill(struct net_device *ndev, int gfp)
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
res = (long)skb->tail & 0xf;
|
res = (long)skb->data & 0xf;
|
||||||
res = 0x10 - res;
|
res = 0x10 - res;
|
||||||
res &= 0xf;
|
res &= 0xf;
|
||||||
skb_reserve(skb, res);
|
skb_reserve(skb, res);
|
||||||
|
|
|
@ -1602,7 +1602,7 @@ pcnet32_init_ring(struct net_device *dev)
|
||||||
|
|
||||||
rmb();
|
rmb();
|
||||||
if (lp->rx_dma_addr[i] == 0)
|
if (lp->rx_dma_addr[i] == 0)
|
||||||
lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail,
|
lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data,
|
||||||
PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
|
PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
|
||||||
lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
|
lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
|
||||||
lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
|
lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
|
||||||
|
@ -1983,7 +1983,7 @@ pcnet32_rx(struct net_device *dev)
|
||||||
lp->rx_skbuff[entry] = newskb;
|
lp->rx_skbuff[entry] = newskb;
|
||||||
newskb->dev = dev;
|
newskb->dev = dev;
|
||||||
lp->rx_dma_addr[entry] =
|
lp->rx_dma_addr[entry] =
|
||||||
pci_map_single(lp->pci_dev, newskb->tail,
|
pci_map_single(lp->pci_dev, newskb->data,
|
||||||
PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
|
PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
|
||||||
lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
|
lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
|
||||||
rx_in_place = 1;
|
rx_in_place = 1;
|
||||||
|
@ -2020,7 +2020,7 @@ pcnet32_rx(struct net_device *dev)
|
||||||
PKT_BUF_SZ-2,
|
PKT_BUF_SZ-2,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
eth_copy_and_sum(skb,
|
eth_copy_and_sum(skb,
|
||||||
(unsigned char *)(lp->rx_skbuff[entry]->tail),
|
(unsigned char *)(lp->rx_skbuff[entry]->data),
|
||||||
pkt_len,0);
|
pkt_len,0);
|
||||||
pci_dma_sync_single_for_device(lp->pci_dev,
|
pci_dma_sync_single_for_device(lp->pci_dev,
|
||||||
lp->rx_dma_addr[entry],
|
lp->rx_dma_addr[entry],
|
||||||
|
|
|
@ -1876,7 +1876,7 @@ static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
*sk_buff = skb;
|
*sk_buff = skb;
|
||||||
|
|
||||||
mapping = pci_map_single(pdev, skb->tail, rx_buf_sz,
|
mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
|
rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
|
||||||
|
@ -2336,7 +2336,7 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
|
||||||
skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
|
skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
|
||||||
if (skb) {
|
if (skb) {
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);
|
eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
|
||||||
*sk_buff = skb;
|
*sk_buff = skb;
|
||||||
rtl8169_mark_to_asic(desc, rx_buf_sz);
|
rtl8169_mark_to_asic(desc, rx_buf_sz);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
|
@ -1699,11 +1699,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
|
||||||
#else
|
#else
|
||||||
ba = &nic->ba[ring_no][block_no][off];
|
ba = &nic->ba[ring_no][block_no][off];
|
||||||
skb_reserve(skb, BUF0_LEN);
|
skb_reserve(skb, BUF0_LEN);
|
||||||
tmp = (unsigned long) skb->data;
|
tmp = ((unsigned long) skb->data & ALIGN_SIZE);
|
||||||
tmp += ALIGN_SIZE;
|
if (tmp)
|
||||||
tmp &= ~ALIGN_SIZE;
|
skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
|
||||||
skb->data = (void *) tmp;
|
|
||||||
skb->tail = (void *) tmp;
|
|
||||||
|
|
||||||
memset(rxdp, 0, sizeof(RxD_t));
|
memset(rxdp, 0, sizeof(RxD_t));
|
||||||
rxdp->Buffer2_ptr = pci_map_single
|
rxdp->Buffer2_ptr = pci_map_single
|
||||||
|
|
|
@ -963,11 +963,11 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
|
||||||
/*
|
/*
|
||||||
* Do not interrupt per DMA transfer.
|
* Do not interrupt per DMA transfer.
|
||||||
*/
|
*/
|
||||||
dsc->dscr_a = virt_to_phys(sb_new->tail) |
|
dsc->dscr_a = virt_to_phys(sb_new->data) |
|
||||||
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
|
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
|
||||||
0;
|
0;
|
||||||
#else
|
#else
|
||||||
dsc->dscr_a = virt_to_phys(sb_new->tail) |
|
dsc->dscr_a = virt_to_phys(sb_new->data) |
|
||||||
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
|
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
|
||||||
M_DMA_DSCRA_INTERRUPT;
|
M_DMA_DSCRA_INTERRUPT;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1154,7 +1154,7 @@ sis900_init_rx_ring(struct net_device *net_dev)
|
||||||
sis_priv->rx_skbuff[i] = skb;
|
sis_priv->rx_skbuff[i] = skb;
|
||||||
sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
|
sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
|
||||||
sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
|
sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
|
||||||
skb->tail, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||||
}
|
}
|
||||||
sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
|
sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
|
||||||
|
|
||||||
|
@ -1776,7 +1776,7 @@ static int sis900_rx(struct net_device *net_dev)
|
||||||
sis_priv->rx_skbuff[entry] = skb;
|
sis_priv->rx_skbuff[entry] = skb;
|
||||||
sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
|
sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
|
||||||
sis_priv->rx_ring[entry].bufptr =
|
sis_priv->rx_ring[entry].bufptr =
|
||||||
pci_map_single(sis_priv->pci_dev, skb->tail,
|
pci_map_single(sis_priv->pci_dev, skb->data,
|
||||||
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||||
sis_priv->dirty_rx++;
|
sis_priv->dirty_rx++;
|
||||||
}
|
}
|
||||||
|
@ -1809,7 +1809,7 @@ static int sis900_rx(struct net_device *net_dev)
|
||||||
sis_priv->rx_skbuff[entry] = skb;
|
sis_priv->rx_skbuff[entry] = skb;
|
||||||
sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
|
sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
|
||||||
sis_priv->rx_ring[entry].bufptr =
|
sis_priv->rx_ring[entry].bufptr =
|
||||||
pci_map_single(sis_priv->pci_dev, skb->tail,
|
pci_map_single(sis_priv->pci_dev, skb->data,
|
||||||
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,6 +74,7 @@
|
||||||
#include <linux/rtnetlink.h>
|
#include <linux/rtnetlink.h>
|
||||||
#include <linux/if_arp.h>
|
#include <linux/if_arp.h>
|
||||||
#include <linux/if_slip.h>
|
#include <linux/if_slip.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include "slip.h"
|
#include "slip.h"
|
||||||
#ifdef CONFIG_INET
|
#ifdef CONFIG_INET
|
||||||
|
|
|
@ -1286,7 +1286,7 @@ static void init_ring(struct net_device *dev)
|
||||||
np->rx_info[i].skb = skb;
|
np->rx_info[i].skb = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
/* Grrr, we cannot offset to correctly align the IP header. */
|
/* Grrr, we cannot offset to correctly align the IP header. */
|
||||||
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
|
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
|
||||||
|
@ -1572,7 +1572,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
|
||||||
pci_dma_sync_single_for_cpu(np->pci_dev,
|
pci_dma_sync_single_for_cpu(np->pci_dev,
|
||||||
np->rx_info[entry].mapping,
|
np->rx_info[entry].mapping,
|
||||||
pkt_len, PCI_DMA_FROMDEVICE);
|
pkt_len, PCI_DMA_FROMDEVICE);
|
||||||
eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
|
eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
|
||||||
pci_dma_sync_single_for_device(np->pci_dev,
|
pci_dma_sync_single_for_device(np->pci_dev,
|
||||||
np->rx_info[entry].mapping,
|
np->rx_info[entry].mapping,
|
||||||
pkt_len, PCI_DMA_FROMDEVICE);
|
pkt_len, PCI_DMA_FROMDEVICE);
|
||||||
|
@ -1696,7 +1696,7 @@ static void refill_rx_ring(struct net_device *dev)
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
np->rx_info[entry].mapping =
|
np->rx_info[entry].mapping =
|
||||||
pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
np->rx_ring[entry].rxaddr =
|
np->rx_ring[entry].rxaddr =
|
||||||
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
|
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
|
||||||
|
|
|
@ -1028,7 +1028,7 @@ static void init_ring(struct net_device *dev)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
||||||
np->rx_ring[i].frag[0].addr = cpu_to_le32(
|
np->rx_ring[i].frag[0].addr = cpu_to_le32(
|
||||||
pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz,
|
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE));
|
PCI_DMA_FROMDEVICE));
|
||||||
np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
|
np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
|
||||||
}
|
}
|
||||||
|
@ -1341,7 +1341,7 @@ static void rx_poll(unsigned long data)
|
||||||
np->rx_buf_sz,
|
np->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
|
eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
|
||||||
pci_dma_sync_single_for_device(np->pci_dev,
|
pci_dma_sync_single_for_device(np->pci_dev,
|
||||||
desc->frag[0].addr,
|
desc->frag[0].addr,
|
||||||
np->rx_buf_sz,
|
np->rx_buf_sz,
|
||||||
|
@ -1400,7 +1400,7 @@ static void refill_rx (struct net_device *dev)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
np->rx_ring[entry].frag[0].addr = cpu_to_le32(
|
np->rx_ring[entry].frag[0].addr = cpu_to_le32(
|
||||||
pci_map_single(np->pci_dev, skb->tail,
|
pci_map_single(np->pci_dev, skb->data,
|
||||||
np->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
np->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
}
|
}
|
||||||
/* Perhaps we need not reset this field. */
|
/* Perhaps we need not reset this field. */
|
||||||
|
|
|
@ -446,13 +446,13 @@ static void de_rx (struct de_private *de)
|
||||||
|
|
||||||
mapping =
|
mapping =
|
||||||
de->rx_skb[rx_tail].mapping =
|
de->rx_skb[rx_tail].mapping =
|
||||||
pci_map_single(de->pdev, copy_skb->tail,
|
pci_map_single(de->pdev, copy_skb->data,
|
||||||
buflen, PCI_DMA_FROMDEVICE);
|
buflen, PCI_DMA_FROMDEVICE);
|
||||||
de->rx_skb[rx_tail].skb = copy_skb;
|
de->rx_skb[rx_tail].skb = copy_skb;
|
||||||
} else {
|
} else {
|
||||||
pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
|
pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
|
||||||
skb_reserve(copy_skb, RX_OFFSET);
|
skb_reserve(copy_skb, RX_OFFSET);
|
||||||
memcpy(skb_put(copy_skb, len), skb->tail, len);
|
memcpy(skb_put(copy_skb, len), skb->data, len);
|
||||||
|
|
||||||
pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
|
pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
|
@ -1269,7 +1269,7 @@ static int de_refill_rx (struct de_private *de)
|
||||||
skb->dev = de->dev;
|
skb->dev = de->dev;
|
||||||
|
|
||||||
de->rx_skb[i].mapping = pci_map_single(de->pdev,
|
de->rx_skb[i].mapping = pci_map_single(de->pdev,
|
||||||
skb->tail, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
de->rx_skb[i].skb = skb;
|
de->rx_skb[i].skb = skb;
|
||||||
|
|
||||||
de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
|
de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
|
||||||
|
|
|
@ -945,8 +945,8 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
|
||||||
|
|
||||||
/* Received Packet CRC check need or not */
|
/* Received Packet CRC check need or not */
|
||||||
if ( (db->dm910x_chk_mode & 1) &&
|
if ( (db->dm910x_chk_mode & 1) &&
|
||||||
(cal_CRC(skb->tail, rxlen, 1) !=
|
(cal_CRC(skb->data, rxlen, 1) !=
|
||||||
(*(u32 *) (skb->tail+rxlen) ))) { /* FIXME (?) */
|
(*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
|
||||||
/* Found a error received packet */
|
/* Found a error received packet */
|
||||||
dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
|
dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
|
||||||
db->dm910x_chk_mode = 3;
|
db->dm910x_chk_mode = 3;
|
||||||
|
@ -959,7 +959,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
|
||||||
/* size less than COPY_SIZE, allocate a rxlen SKB */
|
/* size less than COPY_SIZE, allocate a rxlen SKB */
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
skb_reserve(skb, 2); /* 16byte align */
|
skb_reserve(skb, 2); /* 16byte align */
|
||||||
memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
|
memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen);
|
||||||
dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
|
dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
|
||||||
} else {
|
} else {
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
|
@ -1252,7 +1252,7 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
|
||||||
|
|
||||||
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
|
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
|
||||||
rxptr->rx_skb_ptr = skb;
|
rxptr->rx_skb_ptr = skb;
|
||||||
rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
|
rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
|
||||||
wmb();
|
wmb();
|
||||||
rxptr->rdes0 = cpu_to_le32(0x80000000);
|
rxptr->rdes0 = cpu_to_le32(0x80000000);
|
||||||
db->rx_avail_cnt++;
|
db->rx_avail_cnt++;
|
||||||
|
@ -1463,7 +1463,7 @@ static void allocate_rx_buffer(struct dmfe_board_info *db)
|
||||||
if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
|
if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
|
||||||
break;
|
break;
|
||||||
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
|
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
|
||||||
rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
|
rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
|
||||||
wmb();
|
wmb();
|
||||||
rxptr->rdes0 = cpu_to_le32(0x80000000);
|
rxptr->rdes0 = cpu_to_le32(0x80000000);
|
||||||
rxptr = rxptr->next_rx_desc;
|
rxptr = rxptr->next_rx_desc;
|
||||||
|
|
|
@ -78,7 +78,7 @@ int tulip_refill_rx(struct net_device *dev)
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
|
mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
tp->rx_buffers[entry].mapping = mapping;
|
tp->rx_buffers[entry].mapping = mapping;
|
||||||
|
|
||||||
|
@ -199,12 +199,12 @@ int tulip_poll(struct net_device *dev, int *budget)
|
||||||
tp->rx_buffers[entry].mapping,
|
tp->rx_buffers[entry].mapping,
|
||||||
pkt_len, PCI_DMA_FROMDEVICE);
|
pkt_len, PCI_DMA_FROMDEVICE);
|
||||||
#if ! defined(__alpha__)
|
#if ! defined(__alpha__)
|
||||||
eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
|
eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
|
||||||
pkt_len, 0);
|
pkt_len, 0);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
#else
|
#else
|
||||||
memcpy(skb_put(skb, pkt_len),
|
memcpy(skb_put(skb, pkt_len),
|
||||||
tp->rx_buffers[entry].skb->tail,
|
tp->rx_buffers[entry].skb->data,
|
||||||
pkt_len);
|
pkt_len);
|
||||||
#endif
|
#endif
|
||||||
pci_dma_sync_single_for_device(tp->pdev,
|
pci_dma_sync_single_for_device(tp->pdev,
|
||||||
|
@ -423,12 +423,12 @@ static int tulip_rx(struct net_device *dev)
|
||||||
tp->rx_buffers[entry].mapping,
|
tp->rx_buffers[entry].mapping,
|
||||||
pkt_len, PCI_DMA_FROMDEVICE);
|
pkt_len, PCI_DMA_FROMDEVICE);
|
||||||
#if ! defined(__alpha__)
|
#if ! defined(__alpha__)
|
||||||
eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
|
eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
|
||||||
pkt_len, 0);
|
pkt_len, 0);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
#else
|
#else
|
||||||
memcpy(skb_put(skb, pkt_len),
|
memcpy(skb_put(skb, pkt_len),
|
||||||
tp->rx_buffers[entry].skb->tail,
|
tp->rx_buffers[entry].skb->data,
|
||||||
pkt_len);
|
pkt_len);
|
||||||
#endif
|
#endif
|
||||||
pci_dma_sync_single_for_device(tp->pdev,
|
pci_dma_sync_single_for_device(tp->pdev,
|
||||||
|
|
|
@ -625,7 +625,7 @@ static void tulip_init_ring(struct net_device *dev)
|
||||||
tp->rx_buffers[i].skb = skb;
|
tp->rx_buffers[i].skb = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
mapping = pci_map_single(tp->pdev, skb->tail,
|
mapping = pci_map_single(tp->pdev, skb->data,
|
||||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||||
tp->rx_buffers[i].mapping = mapping;
|
tp->rx_buffers[i].mapping = mapping;
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
|
|
|
@ -849,7 +849,7 @@ static void init_rxtx_rings(struct net_device *dev)
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
|
np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
|
||||||
skb->len,PCI_DMA_FROMDEVICE);
|
skb->len,PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
np->rx_ring[i].buffer1 = np->rx_addr[i];
|
np->rx_ring[i].buffer1 = np->rx_addr[i];
|
||||||
|
@ -1269,7 +1269,7 @@ static int netdev_rx(struct net_device *dev)
|
||||||
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
|
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
|
||||||
np->rx_skbuff[entry]->len,
|
np->rx_skbuff[entry]->len,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
|
eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
|
pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
|
||||||
np->rx_skbuff[entry]->len,
|
np->rx_skbuff[entry]->len,
|
||||||
|
@ -1315,7 +1315,7 @@ static int netdev_rx(struct net_device *dev)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
np->rx_addr[entry] = pci_map_single(np->pci_dev,
|
np->rx_addr[entry] = pci_map_single(np->pci_dev,
|
||||||
skb->tail,
|
skb->data,
|
||||||
skb->len, PCI_DMA_FROMDEVICE);
|
skb->len, PCI_DMA_FROMDEVICE);
|
||||||
np->rx_ring[entry].buffer1 = np->rx_addr[entry];
|
np->rx_ring[entry].buffer1 = np->rx_addr[entry];
|
||||||
}
|
}
|
||||||
|
|
|
@ -899,7 +899,7 @@ static void xircom_init_ring(struct net_device *dev)
|
||||||
break;
|
break;
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
|
tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
|
||||||
tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
|
tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
|
||||||
}
|
}
|
||||||
tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
|
tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
|
||||||
|
|
||||||
|
@ -1291,7 +1291,7 @@ xircom_rx(struct net_device *dev)
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
|
tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
|
||||||
work_done++;
|
work_done++;
|
||||||
}
|
}
|
||||||
tp->rx_ring[entry].status = Rx0DescOwned;
|
tp->rx_ring[entry].status = Rx0DescOwned;
|
||||||
|
|
|
@ -1661,7 +1661,7 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
skb->dev = tp->dev;
|
skb->dev = tp->dev;
|
||||||
dma_addr = pci_map_single(tp->pdev, skb->tail,
|
dma_addr = pci_map_single(tp->pdev, skb->data,
|
||||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
/* Since no card does 64 bit DAC, the high bits will never
|
/* Since no card does 64 bit DAC, the high bits will never
|
||||||
|
@ -1721,7 +1721,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
|
||||||
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
|
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
|
||||||
PKT_BUF_SZ,
|
PKT_BUF_SZ,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
|
eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
|
||||||
pci_dma_sync_single_for_device(tp->pdev, dma_addr,
|
pci_dma_sync_single_for_device(tp->pdev, dma_addr,
|
||||||
PKT_BUF_SZ,
|
PKT_BUF_SZ,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
|
@ -990,7 +990,7 @@ static void alloc_rbufs(struct net_device *dev)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
|
|
||||||
rp->rx_skbuff_dma[i] =
|
rp->rx_skbuff_dma[i] =
|
||||||
pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz,
|
pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
|
rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
|
||||||
|
@ -1518,7 +1518,7 @@ static void rhine_rx(struct net_device *dev)
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
eth_copy_and_sum(skb,
|
eth_copy_and_sum(skb,
|
||||||
rp->rx_skbuff[entry]->tail,
|
rp->rx_skbuff[entry]->data,
|
||||||
pkt_len, 0);
|
pkt_len, 0);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
pci_dma_sync_single_for_device(rp->pdev,
|
pci_dma_sync_single_for_device(rp->pdev,
|
||||||
|
@ -1561,7 +1561,7 @@ static void rhine_rx(struct net_device *dev)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
rp->rx_skbuff_dma[entry] =
|
rp->rx_skbuff_dma[entry] =
|
||||||
pci_map_single(rp->pdev, skb->tail,
|
pci_map_single(rp->pdev, skb->data,
|
||||||
rp->rx_buf_sz,
|
rp->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
|
rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
|
||||||
|
|
|
@ -1335,7 +1335,7 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
|
||||||
if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
|
if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
|
||||||
skb_reserve(new_skb, 2);
|
skb_reserve(new_skb, 2);
|
||||||
|
|
||||||
memcpy(new_skb->data, rx_skb[0]->tail, pkt_size);
|
memcpy(new_skb->data, rx_skb[0]->data, pkt_size);
|
||||||
*rx_skb = new_skb;
|
*rx_skb = new_skb;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
@ -1456,9 +1456,9 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
|
||||||
* Do the gymnastics to get the buffer head for data at
|
* Do the gymnastics to get the buffer head for data at
|
||||||
* 64byte alignment.
|
* 64byte alignment.
|
||||||
*/
|
*/
|
||||||
skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63);
|
skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
|
||||||
rd_info->skb->dev = vptr->dev;
|
rd_info->skb->dev = vptr->dev;
|
||||||
rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fill in the descriptor to match
|
* Fill in the descriptor to match
|
||||||
|
|
|
@ -72,7 +72,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
|
||||||
}
|
}
|
||||||
skb_reserve(skb, 4);
|
skb_reserve(skb, 4);
|
||||||
cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
|
cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
|
||||||
data = (cisco_packet*)skb->tail;
|
data = (cisco_packet*)skb->data;
|
||||||
|
|
||||||
data->type = htonl(type);
|
data->type = htonl(type);
|
||||||
data->par1 = htonl(par1);
|
data->par1 = htonl(par1);
|
||||||
|
|
|
@ -786,7 +786,7 @@ static void yellowfin_init_ring(struct net_device *dev)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
||||||
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
|
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
|
||||||
skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
}
|
}
|
||||||
yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
|
yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
|
||||||
yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
|
yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
|
||||||
|
@ -1111,7 +1111,7 @@ static int yellowfin_rx(struct net_device *dev)
|
||||||
pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
|
pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
|
||||||
yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
desc_status = le32_to_cpu(desc->result_status) >> 16;
|
desc_status = le32_to_cpu(desc->result_status) >> 16;
|
||||||
buf_addr = rx_skb->tail;
|
buf_addr = rx_skb->data;
|
||||||
data_size = (le32_to_cpu(desc->dbdma_cmd) -
|
data_size = (le32_to_cpu(desc->dbdma_cmd) -
|
||||||
le32_to_cpu(desc->result_status)) & 0xffff;
|
le32_to_cpu(desc->result_status)) & 0xffff;
|
||||||
frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
|
frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
|
||||||
|
@ -1185,7 +1185,7 @@ static int yellowfin_rx(struct net_device *dev)
|
||||||
break;
|
break;
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
||||||
eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0);
|
eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
|
pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
|
||||||
yp->rx_buf_sz,
|
yp->rx_buf_sz,
|
||||||
|
@ -1211,7 +1211,7 @@ static int yellowfin_rx(struct net_device *dev)
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
skb->dev = dev; /* Mark as being used by this device. */
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
|
yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
|
||||||
skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
}
|
}
|
||||||
yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
|
yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
|
||||||
yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
|
yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#define _LINUX_ETHERDEVICE_H
|
#define _LINUX_ETHERDEVICE_H
|
||||||
|
|
||||||
#include <linux/if_ether.h>
|
#include <linux/if_ether.h>
|
||||||
|
#include <linux/netdevice.h>
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
|
@ -156,7 +156,7 @@ struct in6_flowlabel_req
|
||||||
#define IPV6_CHECKSUM 7
|
#define IPV6_CHECKSUM 7
|
||||||
#define IPV6_HOPLIMIT 8
|
#define IPV6_HOPLIMIT 8
|
||||||
#define IPV6_NEXTHOP 9
|
#define IPV6_NEXTHOP 9
|
||||||
#define IPV6_AUTHHDR 10
|
#define IPV6_AUTHHDR 10 /* obsolete */
|
||||||
#define IPV6_FLOWINFO 11
|
#define IPV6_FLOWINFO 11
|
||||||
|
|
||||||
#define IPV6_UNICAST_HOPS 16
|
#define IPV6_UNICAST_HOPS 16
|
||||||
|
|
|
@ -168,6 +168,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
|
||||||
nlh->nlmsg_flags = flags;
|
nlh->nlmsg_flags = flags;
|
||||||
nlh->nlmsg_pid = pid;
|
nlh->nlmsg_pid = pid;
|
||||||
nlh->nlmsg_seq = seq;
|
nlh->nlmsg_seq = seq;
|
||||||
|
memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
|
||||||
return nlh;
|
return nlh;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -276,6 +276,7 @@ struct tc_rsvp_pinfo
|
||||||
__u8 protocol;
|
__u8 protocol;
|
||||||
__u8 tunnelid;
|
__u8 tunnelid;
|
||||||
__u8 tunnelhdr;
|
__u8 tunnelhdr;
|
||||||
|
__u8 pad;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* ROUTE filter */
|
/* ROUTE filter */
|
||||||
|
|
|
@ -221,9 +221,11 @@ struct tc_gred_qopt
|
||||||
/* gred setup */
|
/* gred setup */
|
||||||
struct tc_gred_sopt
|
struct tc_gred_sopt
|
||||||
{
|
{
|
||||||
__u32 DPs;
|
__u32 DPs;
|
||||||
__u32 def_DP;
|
__u32 def_DP;
|
||||||
__u8 grio;
|
__u8 grio;
|
||||||
|
__u8 pad1;
|
||||||
|
__u16 pad2;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* HTB section */
|
/* HTB section */
|
||||||
|
@ -351,6 +353,7 @@ struct tc_cbq_ovl
|
||||||
#define TC_CBQ_OVL_DROP 3
|
#define TC_CBQ_OVL_DROP 3
|
||||||
#define TC_CBQ_OVL_RCLASSIC 4
|
#define TC_CBQ_OVL_RCLASSIC 4
|
||||||
unsigned char priority2;
|
unsigned char priority2;
|
||||||
|
__u16 pad;
|
||||||
__u32 penalty;
|
__u32 penalty;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -363,6 +363,8 @@ enum
|
||||||
struct rta_session
|
struct rta_session
|
||||||
{
|
{
|
||||||
__u8 proto;
|
__u8 proto;
|
||||||
|
__u8 pad1;
|
||||||
|
__u16 pad2;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
|
@ -635,10 +637,13 @@ struct ifinfomsg
|
||||||
struct prefixmsg
|
struct prefixmsg
|
||||||
{
|
{
|
||||||
unsigned char prefix_family;
|
unsigned char prefix_family;
|
||||||
|
unsigned char prefix_pad1;
|
||||||
|
unsigned short prefix_pad2;
|
||||||
int prefix_ifindex;
|
int prefix_ifindex;
|
||||||
unsigned char prefix_type;
|
unsigned char prefix_type;
|
||||||
unsigned char prefix_len;
|
unsigned char prefix_len;
|
||||||
unsigned char prefix_flags;
|
unsigned char prefix_flags;
|
||||||
|
unsigned char prefix_pad3;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum
|
enum
|
||||||
|
@ -898,7 +903,9 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
|
||||||
memcpy(skb_put(skb, attrlen), data, attrlen); })
|
memcpy(skb_put(skb, attrlen), data, attrlen); })
|
||||||
|
|
||||||
#define RTA_PUT_NOHDR(skb, attrlen, data) \
|
#define RTA_PUT_NOHDR(skb, attrlen, data) \
|
||||||
RTA_APPEND(skb, RTA_ALIGN(attrlen), data)
|
({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \
|
||||||
|
memset(skb->tail - (RTA_ALIGN(attrlen) - attrlen), 0, \
|
||||||
|
RTA_ALIGN(attrlen) - attrlen); })
|
||||||
|
|
||||||
#define RTA_PUT_U8(skb, attrtype, value) \
|
#define RTA_PUT_U8(skb, attrtype, value) \
|
||||||
({ u8 _tmp = (value); \
|
({ u8 _tmp = (value); \
|
||||||
|
@ -978,6 +985,7 @@ __rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
|
||||||
rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
|
rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
|
||||||
rta->rta_type = attrtype;
|
rta->rta_type = attrtype;
|
||||||
rta->rta_len = size;
|
rta->rta_len = size;
|
||||||
|
memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
|
||||||
return rta;
|
return rta;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -641,6 +641,7 @@ enum {
|
||||||
NET_SCTP_ADDIP_ENABLE = 13,
|
NET_SCTP_ADDIP_ENABLE = 13,
|
||||||
NET_SCTP_PRSCTP_ENABLE = 14,
|
NET_SCTP_PRSCTP_ENABLE = 14,
|
||||||
NET_SCTP_SNDBUF_POLICY = 15,
|
NET_SCTP_SNDBUF_POLICY = 15,
|
||||||
|
NET_SCTP_SACK_TIMEOUT = 16,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* /proc/sys/net/bridge */
|
/* /proc/sys/net/bridge */
|
||||||
|
|
|
@ -183,7 +183,6 @@ struct ipv6_txoptions
|
||||||
struct ipv6_opt_hdr *hopopt;
|
struct ipv6_opt_hdr *hopopt;
|
||||||
struct ipv6_opt_hdr *dst0opt;
|
struct ipv6_opt_hdr *dst0opt;
|
||||||
struct ipv6_rt_hdr *srcrt; /* Routing Header */
|
struct ipv6_rt_hdr *srcrt; /* Routing Header */
|
||||||
struct ipv6_opt_hdr *auth;
|
|
||||||
struct ipv6_opt_hdr *dst1opt;
|
struct ipv6_opt_hdr *dst1opt;
|
||||||
|
|
||||||
/* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
|
/* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
|
||||||
|
|
|
@ -263,23 +263,11 @@ enum { SCTP_MIN_PMTU = 576 };
|
||||||
enum { SCTP_MAX_DUP_TSNS = 16 };
|
enum { SCTP_MAX_DUP_TSNS = 16 };
|
||||||
enum { SCTP_MAX_GABS = 16 };
|
enum { SCTP_MAX_GABS = 16 };
|
||||||
|
|
||||||
/* Here we define the default timers. */
|
/* Heartbeat interval - 30 secs */
|
||||||
|
#define SCTP_DEFAULT_TIMEOUT_HEARTBEAT (30 * HZ)
|
||||||
|
|
||||||
/* cookie timer def = ? seconds */
|
/* Delayed sack timer - 200ms */
|
||||||
#define SCTP_DEFAULT_TIMEOUT_T1_COOKIE (3 * HZ)
|
|
||||||
|
|
||||||
/* init timer def = 3 seconds */
|
|
||||||
#define SCTP_DEFAULT_TIMEOUT_T1_INIT (3 * HZ)
|
|
||||||
|
|
||||||
/* shutdown timer def = 300 ms */
|
|
||||||
#define SCTP_DEFAULT_TIMEOUT_T2_SHUTDOWN ((300 * HZ) / 1000)
|
|
||||||
|
|
||||||
/* 0 seconds + RTO */
|
|
||||||
#define SCTP_DEFAULT_TIMEOUT_HEARTBEAT (10 * HZ)
|
|
||||||
|
|
||||||
/* recv timer def = 200ms (in usec) */
|
|
||||||
#define SCTP_DEFAULT_TIMEOUT_SACK ((200 * HZ) / 1000)
|
#define SCTP_DEFAULT_TIMEOUT_SACK ((200 * HZ) / 1000)
|
||||||
#define SCTP_DEFAULT_TIMEOUT_SACK_MAX ((500 * HZ) / 1000) /* 500 ms */
|
|
||||||
|
|
||||||
/* RTO.Initial - 3 seconds
|
/* RTO.Initial - 3 seconds
|
||||||
* RTO.Min - 1 second
|
* RTO.Min - 1 second
|
||||||
|
|
|
@ -161,6 +161,9 @@ extern struct sctp_globals {
|
||||||
*/
|
*/
|
||||||
int sndbuf_policy;
|
int sndbuf_policy;
|
||||||
|
|
||||||
|
/* Delayed SACK timeout 200ms default*/
|
||||||
|
int sack_timeout;
|
||||||
|
|
||||||
/* HB.interval - 30 seconds */
|
/* HB.interval - 30 seconds */
|
||||||
int hb_interval;
|
int hb_interval;
|
||||||
|
|
||||||
|
@ -217,6 +220,7 @@ extern struct sctp_globals {
|
||||||
#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy)
|
#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy)
|
||||||
#define sctp_max_retrans_path (sctp_globals.max_retrans_path)
|
#define sctp_max_retrans_path (sctp_globals.max_retrans_path)
|
||||||
#define sctp_max_retrans_init (sctp_globals.max_retrans_init)
|
#define sctp_max_retrans_init (sctp_globals.max_retrans_init)
|
||||||
|
#define sctp_sack_timeout (sctp_globals.sack_timeout)
|
||||||
#define sctp_hb_interval (sctp_globals.hb_interval)
|
#define sctp_hb_interval (sctp_globals.hb_interval)
|
||||||
#define sctp_max_instreams (sctp_globals.max_instreams)
|
#define sctp_max_instreams (sctp_globals.max_instreams)
|
||||||
#define sctp_max_outstreams (sctp_globals.max_outstreams)
|
#define sctp_max_outstreams (sctp_globals.max_outstreams)
|
||||||
|
|
|
@ -844,7 +844,7 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
|
||||||
* doesn't use the bridge parent of the indev by using
|
* doesn't use the bridge parent of the indev by using
|
||||||
* the BRNF_DONT_TAKE_PARENT mask. */
|
* the BRNF_DONT_TAKE_PARENT mask. */
|
||||||
if (hook == NF_IP_FORWARD && nf_bridge->physindev == NULL) {
|
if (hook == NF_IP_FORWARD && nf_bridge->physindev == NULL) {
|
||||||
nf_bridge->mask &= BRNF_DONT_TAKE_PARENT;
|
nf_bridge->mask |= BRNF_DONT_TAKE_PARENT;
|
||||||
nf_bridge->physindev = (struct net_device *)in;
|
nf_bridge->physindev = (struct net_device *)in;
|
||||||
}
|
}
|
||||||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||||||
|
|
|
@ -61,8 +61,6 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
|
||||||
{
|
{
|
||||||
struct ebt_log_info *info = (struct ebt_log_info *)data;
|
struct ebt_log_info *info = (struct ebt_log_info *)data;
|
||||||
char level_string[4] = "< >";
|
char level_string[4] = "< >";
|
||||||
union {struct iphdr iph; struct tcpudphdr ports;
|
|
||||||
struct arphdr arph; struct arppayload arpp;} u;
|
|
||||||
|
|
||||||
level_string[1] = '0' + info->loglevel;
|
level_string[1] = '0' + info->loglevel;
|
||||||
spin_lock_bh(&ebt_log_lock);
|
spin_lock_bh(&ebt_log_lock);
|
||||||
|
@ -88,7 +86,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
|
||||||
}
|
}
|
||||||
printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u,",
|
printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u,",
|
||||||
NIPQUAD(ih->saddr), NIPQUAD(ih->daddr));
|
NIPQUAD(ih->saddr), NIPQUAD(ih->daddr));
|
||||||
printk(" IP tos=0x%02X, IP proto=%d", u.iph.tos,
|
printk(" IP tos=0x%02X, IP proto=%d", ih->tos,
|
||||||
ih->protocol);
|
ih->protocol);
|
||||||
if (ih->protocol == IPPROTO_TCP ||
|
if (ih->protocol == IPPROTO_TCP ||
|
||||||
ih->protocol == IPPROTO_UDP) {
|
ih->protocol == IPPROTO_UDP) {
|
||||||
|
@ -127,7 +125,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
|
||||||
ah->ar_pln == sizeof(uint32_t)) {
|
ah->ar_pln == sizeof(uint32_t)) {
|
||||||
struct arppayload _arpp, *ap;
|
struct arppayload _arpp, *ap;
|
||||||
|
|
||||||
ap = skb_header_pointer(skb, sizeof(u.arph),
|
ap = skb_header_pointer(skb, sizeof(_arph),
|
||||||
sizeof(_arpp), &_arpp);
|
sizeof(_arpp), &_arpp);
|
||||||
if (ap == NULL) {
|
if (ap == NULL) {
|
||||||
printk(" INCOMPLETE ARP payload");
|
printk(" INCOMPLETE ARP payload");
|
||||||
|
|
|
@ -1598,6 +1598,8 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
|
||||||
|
|
||||||
read_lock_bh(&tbl->lock);
|
read_lock_bh(&tbl->lock);
|
||||||
ndtmsg->ndtm_family = tbl->family;
|
ndtmsg->ndtm_family = tbl->family;
|
||||||
|
ndtmsg->ndtm_pad1 = 0;
|
||||||
|
ndtmsg->ndtm_pad2 = 0;
|
||||||
|
|
||||||
RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
|
RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
|
||||||
RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
|
RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
|
||||||
|
@ -1683,6 +1685,8 @@ static int neightbl_fill_param_info(struct neigh_table *tbl,
|
||||||
|
|
||||||
read_lock_bh(&tbl->lock);
|
read_lock_bh(&tbl->lock);
|
||||||
ndtmsg->ndtm_family = tbl->family;
|
ndtmsg->ndtm_family = tbl->family;
|
||||||
|
ndtmsg->ndtm_pad1 = 0;
|
||||||
|
ndtmsg->ndtm_pad2 = 0;
|
||||||
RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
|
RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
|
||||||
|
|
||||||
if (neightbl_fill_parms(skb, parms) < 0)
|
if (neightbl_fill_parms(skb, parms) < 0)
|
||||||
|
@ -1872,6 +1876,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
|
||||||
struct ndmsg *ndm = NLMSG_DATA(nlh);
|
struct ndmsg *ndm = NLMSG_DATA(nlh);
|
||||||
|
|
||||||
ndm->ndm_family = n->ops->family;
|
ndm->ndm_family = n->ops->family;
|
||||||
|
ndm->ndm_pad1 = 0;
|
||||||
|
ndm->ndm_pad2 = 0;
|
||||||
ndm->ndm_flags = n->flags;
|
ndm->ndm_flags = n->flags;
|
||||||
ndm->ndm_type = n->type;
|
ndm->ndm_type = n->type;
|
||||||
ndm->ndm_ifindex = n->dev->ifindex;
|
ndm->ndm_ifindex = n->dev->ifindex;
|
||||||
|
|
|
@ -126,6 +126,7 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data
|
||||||
rta->rta_type = attrtype;
|
rta->rta_type = attrtype;
|
||||||
rta->rta_len = size;
|
rta->rta_len = size;
|
||||||
memcpy(RTA_DATA(rta), data, attrlen);
|
memcpy(RTA_DATA(rta), data, attrlen);
|
||||||
|
memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size)
|
size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size)
|
||||||
|
@ -188,6 +189,7 @@ static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
|
||||||
nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*r), flags);
|
nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*r), flags);
|
||||||
r = NLMSG_DATA(nlh);
|
r = NLMSG_DATA(nlh);
|
||||||
r->ifi_family = AF_UNSPEC;
|
r->ifi_family = AF_UNSPEC;
|
||||||
|
r->__ifi_pad = 0;
|
||||||
r->ifi_type = dev->type;
|
r->ifi_type = dev->type;
|
||||||
r->ifi_index = dev->ifindex;
|
r->ifi_index = dev->ifindex;
|
||||||
r->ifi_flags = dev_get_flags(dev);
|
r->ifi_flags = dev_get_flags(dev);
|
||||||
|
|
|
@ -1102,6 +1102,7 @@ static inline int rtnetlink_fill_iwinfo(struct sk_buff * skb,
|
||||||
nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r));
|
nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r));
|
||||||
r = NLMSG_DATA(nlh);
|
r = NLMSG_DATA(nlh);
|
||||||
r->ifi_family = AF_UNSPEC;
|
r->ifi_family = AF_UNSPEC;
|
||||||
|
r->__ifi_pad = 0;
|
||||||
r->ifi_type = dev->type;
|
r->ifi_type = dev->type;
|
||||||
r->ifi_index = dev->ifindex;
|
r->ifi_index = dev->ifindex;
|
||||||
r->ifi_flags = dev->flags;
|
r->ifi_flags = dev->flags;
|
||||||
|
|
|
@ -92,10 +92,9 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
|
||||||
* Set the source hardware address.
|
* Set the source hardware address.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if(saddr)
|
if(!saddr)
|
||||||
memcpy(eth->h_source,saddr,dev->addr_len);
|
saddr = dev->dev_addr;
|
||||||
else
|
memcpy(eth->h_source,saddr,dev->addr_len);
|
||||||
memcpy(eth->h_source,dev->dev_addr,dev->addr_len);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Anyway, the loopback-device should never use this function...
|
* Anyway, the loopback-device should never use this function...
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
* 2 of the License, or (at your option) any later version.
|
* 2 of the License, or (at your option) any later version.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define VERSION "0.323"
|
#define VERSION "0.324"
|
||||||
|
|
||||||
#include <linux/config.h>
|
#include <linux/config.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
@ -341,8 +341,10 @@ static struct leaf *leaf_new(void)
|
||||||
static struct leaf_info *leaf_info_new(int plen)
|
static struct leaf_info *leaf_info_new(int plen)
|
||||||
{
|
{
|
||||||
struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
|
struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
|
||||||
li->plen = plen;
|
if(li) {
|
||||||
INIT_LIST_HEAD(&li->falh);
|
li->plen = plen;
|
||||||
|
INIT_LIST_HEAD(&li->falh);
|
||||||
|
}
|
||||||
return li;
|
return li;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -879,8 +881,8 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
|
||||||
return (struct node*) tn;
|
return (struct node*) tn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct list_head *
|
static struct list_head *
|
||||||
fib_insert_node(struct trie *t, u32 key, int plen)
|
fib_insert_node(struct trie *t, int *err, u32 key, int plen)
|
||||||
{
|
{
|
||||||
int pos, newpos;
|
int pos, newpos;
|
||||||
struct tnode *tp = NULL, *tn = NULL;
|
struct tnode *tp = NULL, *tn = NULL;
|
||||||
|
@ -940,7 +942,6 @@ fib_insert_node(struct trie *t, u32 key, int plen)
|
||||||
if(tp && IS_LEAF(tp))
|
if(tp && IS_LEAF(tp))
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
t->revision++;
|
|
||||||
|
|
||||||
/* Case 1: n is a leaf. Compare prefixes */
|
/* Case 1: n is a leaf. Compare prefixes */
|
||||||
|
|
||||||
|
@ -949,8 +950,10 @@ fib_insert_node(struct trie *t, u32 key, int plen)
|
||||||
|
|
||||||
li = leaf_info_new(plen);
|
li = leaf_info_new(plen);
|
||||||
|
|
||||||
if(! li)
|
if(! li) {
|
||||||
BUG();
|
*err = -ENOMEM;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
fa_head = &li->falh;
|
fa_head = &li->falh;
|
||||||
insert_leaf_info(&l->list, li);
|
insert_leaf_info(&l->list, li);
|
||||||
|
@ -959,14 +962,19 @@ fib_insert_node(struct trie *t, u32 key, int plen)
|
||||||
t->size++;
|
t->size++;
|
||||||
l = leaf_new();
|
l = leaf_new();
|
||||||
|
|
||||||
if(! l)
|
if(! l) {
|
||||||
BUG();
|
*err = -ENOMEM;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
l->key = key;
|
l->key = key;
|
||||||
li = leaf_info_new(plen);
|
li = leaf_info_new(plen);
|
||||||
|
|
||||||
if(! li)
|
if(! li) {
|
||||||
BUG();
|
tnode_free((struct tnode *) l);
|
||||||
|
*err = -ENOMEM;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
fa_head = &li->falh;
|
fa_head = &li->falh;
|
||||||
insert_leaf_info(&l->list, li);
|
insert_leaf_info(&l->list, li);
|
||||||
|
@ -1003,9 +1011,14 @@ fib_insert_node(struct trie *t, u32 key, int plen)
|
||||||
newpos = 0;
|
newpos = 0;
|
||||||
tn = tnode_new(key, newpos, 1); /* First tnode */
|
tn = tnode_new(key, newpos, 1); /* First tnode */
|
||||||
}
|
}
|
||||||
if(!tn)
|
|
||||||
trie_bug("tnode_pfx_new failed");
|
|
||||||
|
|
||||||
|
if(!tn) {
|
||||||
|
free_leaf_info(li);
|
||||||
|
tnode_free((struct tnode *) l);
|
||||||
|
*err = -ENOMEM;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
NODE_SET_PARENT(tn, tp);
|
NODE_SET_PARENT(tn, tp);
|
||||||
|
|
||||||
missbit=tkey_extract_bits(key, newpos, 1);
|
missbit=tkey_extract_bits(key, newpos, 1);
|
||||||
|
@ -1027,7 +1040,9 @@ fib_insert_node(struct trie *t, u32 key, int plen)
|
||||||
}
|
}
|
||||||
/* Rebalance the trie */
|
/* Rebalance the trie */
|
||||||
t->trie = trie_rebalance(t, tp);
|
t->trie = trie_rebalance(t, tp);
|
||||||
done:;
|
done:
|
||||||
|
t->revision++;
|
||||||
|
err:;
|
||||||
return fa_head;
|
return fa_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1156,8 +1171,12 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
|
||||||
* Insert new entry to the list.
|
* Insert new entry to the list.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if(!fa_head)
|
if(!fa_head) {
|
||||||
fa_head = fib_insert_node(t, key, plen);
|
fa_head = fib_insert_node(t, &err, key, plen);
|
||||||
|
err = 0;
|
||||||
|
if(err)
|
||||||
|
goto out_free_new_fa;
|
||||||
|
}
|
||||||
|
|
||||||
write_lock_bh(&fib_lock);
|
write_lock_bh(&fib_lock);
|
||||||
|
|
||||||
|
@ -1170,6 +1189,9 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
|
||||||
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, nlhdr, req);
|
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, nlhdr, req);
|
||||||
succeeded:
|
succeeded:
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_free_new_fa:
|
||||||
|
kmem_cache_free(fn_alias_kmem, new_fa);
|
||||||
out:
|
out:
|
||||||
fib_release_info(fi);
|
fib_release_info(fi);
|
||||||
err:;
|
err:;
|
||||||
|
|
|
@ -283,14 +283,18 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct net_device *dev = skb->dev;
|
struct net_device *dev = skb->dev;
|
||||||
struct iphdr *iph = skb->nh.iph;
|
struct iphdr *iph = skb->nh.iph;
|
||||||
|
int err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialise the virtual path cache for the packet. It describes
|
* Initialise the virtual path cache for the packet. It describes
|
||||||
* how the packet travels inside Linux networking.
|
* how the packet travels inside Linux networking.
|
||||||
*/
|
*/
|
||||||
if (skb->dst == NULL) {
|
if (skb->dst == NULL) {
|
||||||
if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
|
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
|
||||||
|
if (err == -EHOSTUNREACH)
|
||||||
|
IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
|
||||||
goto drop;
|
goto drop;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NET_CLS_ROUTE
|
#ifdef CONFIG_NET_CLS_ROUTE
|
||||||
|
|
|
@ -188,7 +188,13 @@ static inline int ip_finish_output2(struct sk_buff *skb)
|
||||||
skb = skb2;
|
skb = skb2;
|
||||||
}
|
}
|
||||||
|
|
||||||
nf_reset(skb);
|
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||||
|
/* bridge-netfilter defers calling some IP hooks to the bridge layer
|
||||||
|
* and still needs the conntrack reference.
|
||||||
|
*/
|
||||||
|
if (skb->nf_bridge == NULL)
|
||||||
|
#endif
|
||||||
|
nf_reset(skb);
|
||||||
|
|
||||||
if (hh) {
|
if (hh) {
|
||||||
int hh_alen;
|
int hh_alen;
|
||||||
|
|
|
@ -1149,8 +1149,10 @@ static int __init ic_dynamic(void)
|
||||||
ic_rarp_cleanup();
|
ic_rarp_cleanup();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!ic_got_reply)
|
if (!ic_got_reply) {
|
||||||
|
ic_myaddr = INADDR_NONE;
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
printk("IP-Config: Got %s answer from %u.%u.%u.%u, ",
|
printk("IP-Config: Got %s answer from %u.%u.%u.%u, ",
|
||||||
((ic_got_reply & IC_RARP) ? "RARP"
|
((ic_got_reply & IC_RARP) ? "RARP"
|
||||||
|
|
|
@ -297,6 +297,7 @@ static int vif_delete(int vifi)
|
||||||
static void ipmr_destroy_unres(struct mfc_cache *c)
|
static void ipmr_destroy_unres(struct mfc_cache *c)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
struct nlmsgerr *e;
|
||||||
|
|
||||||
atomic_dec(&cache_resolve_queue_len);
|
atomic_dec(&cache_resolve_queue_len);
|
||||||
|
|
||||||
|
@ -306,7 +307,9 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
|
||||||
nlh->nlmsg_type = NLMSG_ERROR;
|
nlh->nlmsg_type = NLMSG_ERROR;
|
||||||
nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
|
nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
|
||||||
skb_trim(skb, nlh->nlmsg_len);
|
skb_trim(skb, nlh->nlmsg_len);
|
||||||
((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
|
e = NLMSG_DATA(nlh);
|
||||||
|
e->error = -ETIMEDOUT;
|
||||||
|
memset(&e->msg, 0, sizeof(e->msg));
|
||||||
netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
|
netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
|
||||||
} else
|
} else
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
|
@ -499,6 +502,7 @@ static struct mfc_cache *ipmr_cache_alloc_unres(void)
|
||||||
static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
|
static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
struct nlmsgerr *e;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Play the pending entries through our router
|
* Play the pending entries through our router
|
||||||
|
@ -515,7 +519,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
|
||||||
nlh->nlmsg_type = NLMSG_ERROR;
|
nlh->nlmsg_type = NLMSG_ERROR;
|
||||||
nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
|
nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
|
||||||
skb_trim(skb, nlh->nlmsg_len);
|
skb_trim(skb, nlh->nlmsg_len);
|
||||||
((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -EMSGSIZE;
|
e = NLMSG_DATA(nlh);
|
||||||
|
e->error = -EMSGSIZE;
|
||||||
|
memset(&e->msg, 0, sizeof(e->msg));
|
||||||
}
|
}
|
||||||
err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
|
err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
|
||||||
} else
|
} else
|
||||||
|
|
|
@ -548,7 +548,6 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
|
||||||
{
|
{
|
||||||
if (del_timer(&cp->timer))
|
if (del_timer(&cp->timer))
|
||||||
mod_timer(&cp->timer, jiffies);
|
mod_timer(&cp->timer, jiffies);
|
||||||
__ip_vs_conn_put(cp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -764,7 +763,6 @@ void ip_vs_random_dropentry(void)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx;
|
||||||
struct ip_vs_conn *cp;
|
struct ip_vs_conn *cp;
|
||||||
struct ip_vs_conn *ct;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Randomly scan 1/32 of the whole table every second
|
* Randomly scan 1/32 of the whole table every second
|
||||||
|
@ -801,21 +799,12 @@ void ip_vs_random_dropentry(void)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Drop the entry, and drop its ct if not referenced
|
|
||||||
*/
|
|
||||||
atomic_inc(&cp->refcnt);
|
|
||||||
ct_write_unlock(hash);
|
|
||||||
|
|
||||||
if ((ct = cp->control))
|
|
||||||
atomic_inc(&ct->refcnt);
|
|
||||||
IP_VS_DBG(4, "del connection\n");
|
IP_VS_DBG(4, "del connection\n");
|
||||||
ip_vs_conn_expire_now(cp);
|
ip_vs_conn_expire_now(cp);
|
||||||
if (ct) {
|
if (cp->control) {
|
||||||
IP_VS_DBG(4, "del conn template\n");
|
IP_VS_DBG(4, "del conn template\n");
|
||||||
ip_vs_conn_expire_now(ct);
|
ip_vs_conn_expire_now(cp->control);
|
||||||
}
|
}
|
||||||
ct_write_lock(hash);
|
|
||||||
}
|
}
|
||||||
ct_write_unlock(hash);
|
ct_write_unlock(hash);
|
||||||
}
|
}
|
||||||
|
@ -829,7 +818,6 @@ static void ip_vs_conn_flush(void)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx;
|
||||||
struct ip_vs_conn *cp;
|
struct ip_vs_conn *cp;
|
||||||
struct ip_vs_conn *ct;
|
|
||||||
|
|
||||||
flush_again:
|
flush_again:
|
||||||
for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) {
|
for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) {
|
||||||
|
@ -839,18 +827,13 @@ static void ip_vs_conn_flush(void)
|
||||||
ct_write_lock_bh(idx);
|
ct_write_lock_bh(idx);
|
||||||
|
|
||||||
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
|
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
|
||||||
atomic_inc(&cp->refcnt);
|
|
||||||
ct_write_unlock(idx);
|
|
||||||
|
|
||||||
if ((ct = cp->control))
|
|
||||||
atomic_inc(&ct->refcnt);
|
|
||||||
IP_VS_DBG(4, "del connection\n");
|
IP_VS_DBG(4, "del connection\n");
|
||||||
ip_vs_conn_expire_now(cp);
|
ip_vs_conn_expire_now(cp);
|
||||||
if (ct) {
|
if (cp->control) {
|
||||||
IP_VS_DBG(4, "del conn template\n");
|
IP_VS_DBG(4, "del conn template\n");
|
||||||
ip_vs_conn_expire_now(ct);
|
ip_vs_conn_expire_now(cp->control);
|
||||||
}
|
}
|
||||||
ct_write_lock(idx);
|
|
||||||
}
|
}
|
||||||
ct_write_unlock_bh(idx);
|
ct_write_unlock_bh(idx);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
|
#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
|
||||||
#include <linux/netfilter_ipv4/ip_conntrack.h>
|
#include <linux/netfilter_ipv4/ip_conntrack.h>
|
||||||
|
|
||||||
#define CLUSTERIP_VERSION "0.6"
|
#define CLUSTERIP_VERSION "0.7"
|
||||||
|
|
||||||
#define DEBUG_CLUSTERIP
|
#define DEBUG_CLUSTERIP
|
||||||
|
|
||||||
|
@ -524,8 +524,9 @@ arp_mangle(unsigned int hook,
|
||||||
|| arp->ar_pln != 4 || arp->ar_hln != ETH_ALEN)
|
|| arp->ar_pln != 4 || arp->ar_hln != ETH_ALEN)
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
|
||||||
/* we only want to mangle arp replies */
|
/* we only want to mangle arp requests and replies */
|
||||||
if (arp->ar_op != htons(ARPOP_REPLY))
|
if (arp->ar_op != htons(ARPOP_REPLY)
|
||||||
|
&& arp->ar_op != htons(ARPOP_REQUEST))
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
|
||||||
payload = (void *)(arp+1);
|
payload = (void *)(arp+1);
|
||||||
|
|
|
@ -1909,7 +1909,7 @@ static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
|
||||||
*/
|
*/
|
||||||
if ((err = fib_lookup(&fl, &res)) != 0) {
|
if ((err = fib_lookup(&fl, &res)) != 0) {
|
||||||
if (!IN_DEV_FORWARD(in_dev))
|
if (!IN_DEV_FORWARD(in_dev))
|
||||||
goto e_inval;
|
goto e_hostunreach;
|
||||||
goto no_route;
|
goto no_route;
|
||||||
}
|
}
|
||||||
free_res = 1;
|
free_res = 1;
|
||||||
|
@ -1933,7 +1933,7 @@ static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!IN_DEV_FORWARD(in_dev))
|
if (!IN_DEV_FORWARD(in_dev))
|
||||||
goto e_inval;
|
goto e_hostunreach;
|
||||||
if (res.type != RTN_UNICAST)
|
if (res.type != RTN_UNICAST)
|
||||||
goto martian_destination;
|
goto martian_destination;
|
||||||
|
|
||||||
|
@ -2025,6 +2025,11 @@ martian_destination:
|
||||||
"%u.%u.%u.%u, dev %s\n",
|
"%u.%u.%u.%u, dev %s\n",
|
||||||
NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
|
NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
e_hostunreach:
|
||||||
|
err = -EHOSTUNREACH;
|
||||||
|
goto done;
|
||||||
|
|
||||||
e_inval:
|
e_inval:
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto done;
|
goto done;
|
||||||
|
|
|
@ -2777,7 +2777,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
|
||||||
read_lock_bh(&idev->lock);
|
read_lock_bh(&idev->lock);
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case UNICAST_ADDR:
|
case UNICAST_ADDR:
|
||||||
/* unicast address */
|
/* unicast address incl. temp addr */
|
||||||
for (ifa = idev->addr_list; ifa;
|
for (ifa = idev->addr_list; ifa;
|
||||||
ifa = ifa->if_next, ip_idx++) {
|
ifa = ifa->if_next, ip_idx++) {
|
||||||
if (ip_idx < s_ip_idx)
|
if (ip_idx < s_ip_idx)
|
||||||
|
@ -2788,19 +2788,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
|
||||||
NLM_F_MULTI)) <= 0)
|
NLM_F_MULTI)) <= 0)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
/* temp addr */
|
|
||||||
#ifdef CONFIG_IPV6_PRIVACY
|
|
||||||
for (ifa = idev->tempaddr_list; ifa;
|
|
||||||
ifa = ifa->tmp_next, ip_idx++) {
|
|
||||||
if (ip_idx < s_ip_idx)
|
|
||||||
continue;
|
|
||||||
if ((err = inet6_fill_ifaddr(skb, ifa,
|
|
||||||
NETLINK_CB(cb->skb).pid,
|
|
||||||
cb->nlh->nlmsg_seq, RTM_NEWADDR,
|
|
||||||
NLM_F_MULTI)) <= 0)
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
break;
|
break;
|
||||||
case MULTICAST_ADDR:
|
case MULTICAST_ADDR:
|
||||||
/* multicast address */
|
/* multicast address */
|
||||||
|
@ -2923,6 +2910,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
|
||||||
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
|
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
|
||||||
r = NLMSG_DATA(nlh);
|
r = NLMSG_DATA(nlh);
|
||||||
r->ifi_family = AF_INET6;
|
r->ifi_family = AF_INET6;
|
||||||
|
r->__ifi_pad = 0;
|
||||||
r->ifi_type = dev->type;
|
r->ifi_type = dev->type;
|
||||||
r->ifi_index = dev->ifindex;
|
r->ifi_index = dev->ifindex;
|
||||||
r->ifi_flags = dev_get_flags(dev);
|
r->ifi_flags = dev_get_flags(dev);
|
||||||
|
@ -3030,9 +3018,12 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
|
||||||
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*pmsg), flags);
|
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*pmsg), flags);
|
||||||
pmsg = NLMSG_DATA(nlh);
|
pmsg = NLMSG_DATA(nlh);
|
||||||
pmsg->prefix_family = AF_INET6;
|
pmsg->prefix_family = AF_INET6;
|
||||||
|
pmsg->prefix_pad1 = 0;
|
||||||
|
pmsg->prefix_pad2 = 0;
|
||||||
pmsg->prefix_ifindex = idev->dev->ifindex;
|
pmsg->prefix_ifindex = idev->dev->ifindex;
|
||||||
pmsg->prefix_len = pinfo->prefix_len;
|
pmsg->prefix_len = pinfo->prefix_len;
|
||||||
pmsg->prefix_type = pinfo->type;
|
pmsg->prefix_type = pinfo->type;
|
||||||
|
pmsg->prefix_pad3 = 0;
|
||||||
|
|
||||||
pmsg->prefix_flags = 0;
|
pmsg->prefix_flags = 0;
|
||||||
if (pinfo->onlink)
|
if (pinfo->onlink)
|
||||||
|
|
|
@ -244,7 +244,6 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
|
||||||
opt_space->opt_nflen = 0;
|
opt_space->opt_nflen = 0;
|
||||||
}
|
}
|
||||||
opt_space->dst1opt = fopt->dst1opt;
|
opt_space->dst1opt = fopt->dst1opt;
|
||||||
opt_space->auth = fopt->auth;
|
|
||||||
opt_space->opt_flen = fopt->opt_flen;
|
opt_space->opt_flen = fopt->opt_flen;
|
||||||
return opt_space;
|
return opt_space;
|
||||||
}
|
}
|
||||||
|
|
|
@ -439,6 +439,8 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
|
||||||
|
|
||||||
t = NLMSG_DATA(nlh);
|
t = NLMSG_DATA(nlh);
|
||||||
t->tca_family = AF_UNSPEC;
|
t->tca_family = AF_UNSPEC;
|
||||||
|
t->tca__pad1 = 0;
|
||||||
|
t->tca__pad2 = 0;
|
||||||
|
|
||||||
x = (struct rtattr*) skb->tail;
|
x = (struct rtattr*) skb->tail;
|
||||||
RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
|
RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
|
||||||
|
@ -580,6 +582,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
|
||||||
nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t));
|
nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t));
|
||||||
t = NLMSG_DATA(nlh);
|
t = NLMSG_DATA(nlh);
|
||||||
t->tca_family = AF_UNSPEC;
|
t->tca_family = AF_UNSPEC;
|
||||||
|
t->tca__pad1 = 0;
|
||||||
|
t->tca__pad2 = 0;
|
||||||
|
|
||||||
x = (struct rtattr *) skb->tail;
|
x = (struct rtattr *) skb->tail;
|
||||||
RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
|
RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
|
||||||
|
@ -687,7 +691,9 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
|
||||||
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
|
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
|
||||||
t = NLMSG_DATA(nlh);
|
t = NLMSG_DATA(nlh);
|
||||||
t->tca_family = AF_UNSPEC;
|
t->tca_family = AF_UNSPEC;
|
||||||
|
t->tca__pad1 = 0;
|
||||||
|
t->tca__pad2 = 0;
|
||||||
|
|
||||||
x = (struct rtattr*) skb->tail;
|
x = (struct rtattr*) skb->tail;
|
||||||
RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
|
RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
|
||||||
|
|
||||||
|
@ -842,6 +848,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
|
||||||
cb->nlh->nlmsg_type, sizeof(*t));
|
cb->nlh->nlmsg_type, sizeof(*t));
|
||||||
t = NLMSG_DATA(nlh);
|
t = NLMSG_DATA(nlh);
|
||||||
t->tca_family = AF_UNSPEC;
|
t->tca_family = AF_UNSPEC;
|
||||||
|
t->tca__pad1 = 0;
|
||||||
|
t->tca__pad2 = 0;
|
||||||
|
|
||||||
x = (struct rtattr *) skb->tail;
|
x = (struct rtattr *) skb->tail;
|
||||||
RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
|
RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
|
||||||
|
|
|
@ -331,6 +331,8 @@ tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh,
|
||||||
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
|
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
|
||||||
tcm = NLMSG_DATA(nlh);
|
tcm = NLMSG_DATA(nlh);
|
||||||
tcm->tcm_family = AF_UNSPEC;
|
tcm->tcm_family = AF_UNSPEC;
|
||||||
|
tcm->tcm__pad1 = 0;
|
||||||
|
tcm->tcm__pad1 = 0;
|
||||||
tcm->tcm_ifindex = tp->q->dev->ifindex;
|
tcm->tcm_ifindex = tp->q->dev->ifindex;
|
||||||
tcm->tcm_parent = tp->classid;
|
tcm->tcm_parent = tp->classid;
|
||||||
tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
|
tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
|
||||||
|
|
|
@ -618,6 +618,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
|
||||||
pinfo.protocol = s->protocol;
|
pinfo.protocol = s->protocol;
|
||||||
pinfo.tunnelid = s->tunnelid;
|
pinfo.tunnelid = s->tunnelid;
|
||||||
pinfo.tunnelhdr = f->tunnelhdr;
|
pinfo.tunnelhdr = f->tunnelhdr;
|
||||||
|
pinfo.pad = 0;
|
||||||
RTA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
|
RTA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
|
||||||
if (f->res.classid)
|
if (f->res.classid)
|
||||||
RTA_PUT(skb, TCA_RSVP_CLASSID, 4, &f->res.classid);
|
RTA_PUT(skb, TCA_RSVP_CLASSID, 4, &f->res.classid);
|
||||||
|
|
|
@ -770,6 +770,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
|
||||||
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
|
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
|
||||||
tcm = NLMSG_DATA(nlh);
|
tcm = NLMSG_DATA(nlh);
|
||||||
tcm->tcm_family = AF_UNSPEC;
|
tcm->tcm_family = AF_UNSPEC;
|
||||||
|
tcm->tcm__pad1 = 0;
|
||||||
|
tcm->tcm__pad2 = 0;
|
||||||
tcm->tcm_ifindex = q->dev->ifindex;
|
tcm->tcm_ifindex = q->dev->ifindex;
|
||||||
tcm->tcm_parent = clid;
|
tcm->tcm_parent = clid;
|
||||||
tcm->tcm_handle = q->handle;
|
tcm->tcm_handle = q->handle;
|
||||||
|
|
|
@ -1528,6 +1528,7 @@ static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
|
||||||
|
|
||||||
opt.strategy = cl->ovl_strategy;
|
opt.strategy = cl->ovl_strategy;
|
||||||
opt.priority2 = cl->priority2+1;
|
opt.priority2 = cl->priority2+1;
|
||||||
|
opt.pad = 0;
|
||||||
opt.penalty = (cl->penalty*1000)/HZ;
|
opt.penalty = (cl->penalty*1000)/HZ;
|
||||||
RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
|
RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
|
||||||
return skb->len;
|
return skb->len;
|
||||||
|
@ -1563,6 +1564,8 @@ static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
|
||||||
|
|
||||||
if (cl->police) {
|
if (cl->police) {
|
||||||
opt.police = cl->police;
|
opt.police = cl->police;
|
||||||
|
opt.__res1 = 0;
|
||||||
|
opt.__res2 = 0;
|
||||||
RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
|
RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
|
||||||
}
|
}
|
||||||
return skb->len;
|
return skb->len;
|
||||||
|
|
|
@ -102,9 +102,9 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||||
/* Set up the base timeout information. */
|
/* Set up the base timeout information. */
|
||||||
ep->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
|
ep->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
|
||||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
|
ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
|
||||||
SCTP_DEFAULT_TIMEOUT_T1_COOKIE;
|
msecs_to_jiffies(sp->rtoinfo.srto_initial);
|
||||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
|
ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
|
||||||
SCTP_DEFAULT_TIMEOUT_T1_INIT;
|
msecs_to_jiffies(sp->rtoinfo.srto_initial);
|
||||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] =
|
ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] =
|
||||||
msecs_to_jiffies(sp->rtoinfo.srto_initial);
|
msecs_to_jiffies(sp->rtoinfo.srto_initial);
|
||||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
|
ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
|
||||||
|
@ -117,12 +117,9 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
|
ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
|
||||||
= 5 * msecs_to_jiffies(sp->rtoinfo.srto_max);
|
= 5 * msecs_to_jiffies(sp->rtoinfo.srto_max);
|
||||||
|
|
||||||
ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] =
|
ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
|
||||||
SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
|
ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] = sctp_sack_timeout;
|
||||||
ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
|
ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
|
||||||
SCTP_DEFAULT_TIMEOUT_SACK;
|
|
||||||
ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
|
|
||||||
sp->autoclose * HZ;
|
|
||||||
|
|
||||||
/* Use SCTP specific send buffer space queues. */
|
/* Use SCTP specific send buffer space queues. */
|
||||||
ep->sndbuf_policy = sctp_sndbuf_policy;
|
ep->sndbuf_policy = sctp_sndbuf_policy;
|
||||||
|
|
|
@ -1050,7 +1050,10 @@ SCTP_STATIC __init int sctp_init(void)
|
||||||
sctp_sndbuf_policy = 0;
|
sctp_sndbuf_policy = 0;
|
||||||
|
|
||||||
/* HB.interval - 30 seconds */
|
/* HB.interval - 30 seconds */
|
||||||
sctp_hb_interval = 30 * HZ;
|
sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
|
||||||
|
|
||||||
|
/* delayed SACK timeout */
|
||||||
|
sctp_sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK;
|
||||||
|
|
||||||
/* Implementation specific variables. */
|
/* Implementation specific variables. */
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,8 @@
|
||||||
static ctl_handler sctp_sysctl_jiffies_ms;
|
static ctl_handler sctp_sysctl_jiffies_ms;
|
||||||
static long rto_timer_min = 1;
|
static long rto_timer_min = 1;
|
||||||
static long rto_timer_max = 86400000; /* One day */
|
static long rto_timer_max = 86400000; /* One day */
|
||||||
|
static long sack_timer_min = 1;
|
||||||
|
static long sack_timer_max = 500;
|
||||||
|
|
||||||
static ctl_table sctp_table[] = {
|
static ctl_table sctp_table[] = {
|
||||||
{
|
{
|
||||||
|
@ -187,6 +189,17 @@ static ctl_table sctp_table[] = {
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = &proc_dointvec
|
.proc_handler = &proc_dointvec
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.ctl_name = NET_SCTP_SACK_TIMEOUT,
|
||||||
|
.procname = "sack_timeout",
|
||||||
|
.data = &sctp_sack_timeout,
|
||||||
|
.maxlen = sizeof(long),
|
||||||
|
.mode = 0644,
|
||||||
|
.proc_handler = &proc_doulongvec_ms_jiffies_minmax,
|
||||||
|
.strategy = &sctp_sysctl_jiffies_ms,
|
||||||
|
.extra1 = &sack_timer_min,
|
||||||
|
.extra2 = &sack_timer_max,
|
||||||
|
},
|
||||||
{ .ctl_name = 0 }
|
{ .ctl_name = 0 }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -103,7 +103,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
|
||||||
|
|
||||||
/* Set up the heartbeat timer. */
|
/* Set up the heartbeat timer. */
|
||||||
init_timer(&peer->hb_timer);
|
init_timer(&peer->hb_timer);
|
||||||
peer->hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
|
|
||||||
peer->hb_timer.function = sctp_generate_heartbeat_event;
|
peer->hb_timer.function = sctp_generate_heartbeat_event;
|
||||||
peer->hb_timer.data = (unsigned long)peer;
|
peer->hb_timer.data = (unsigned long)peer;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue